KVM: use the new intel iommu APIs
[deliverable/linux.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
35149e21 43#include <linux/swap.h>
6aa8b732 44
e495606d 45#include <asm/processor.h>
e495606d
AK
46#include <asm/io.h>
47#include <asm/uaccess.h>
3e021bf5 48#include <asm/pgtable.h>
6aa8b732 49
f64769eb
SY
50#ifdef CONFIG_X86
51#include <asm/msidef.h>
52#endif
53
5f94c174
LV
54#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
55#include "coalesced_mmio.h"
56#endif
57
8a98f664
XZ
58#ifdef KVM_CAP_DEVICE_ASSIGNMENT
59#include <linux/pci.h>
60#include <linux/interrupt.h>
61#include "irq.h"
62#endif
63
6aa8b732
AK
64MODULE_AUTHOR("Qumranet");
65MODULE_LICENSE("GPL");
66
5319c662
SY
67static int msi2intx = 1;
68module_param(msi2intx, bool, 0);
69
e9b11c17
ZX
70DEFINE_SPINLOCK(kvm_lock);
71LIST_HEAD(vm_list);
133de902 72
7f59f492 73static cpumask_var_t cpus_hardware_enabled;
1b6c0168 74
c16f862d
RR
75struct kmem_cache *kvm_vcpu_cache;
76EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 77
15ad7146
AK
78static __read_mostly struct preempt_ops kvm_preempt_ops;
79
76f7c879 80struct dentry *kvm_debugfs_dir;
6aa8b732 81
bccf2150
AK
82static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
83 unsigned long arg);
84
e8ba5d31 85static bool kvm_rebooting;
4ecac3fd 86
8a98f664 87#ifdef KVM_CAP_DEVICE_ASSIGNMENT
f64769eb
SY
88
89#ifdef CONFIG_X86
90static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
91{
92 int vcpu_id;
93 struct kvm_vcpu *vcpu;
94 struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
95 int dest_id = (dev->guest_msi.address_lo & MSI_ADDR_DEST_ID_MASK)
96 >> MSI_ADDR_DEST_ID_SHIFT;
97 int vector = (dev->guest_msi.data & MSI_DATA_VECTOR_MASK)
98 >> MSI_DATA_VECTOR_SHIFT;
99 int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
100 (unsigned long *)&dev->guest_msi.address_lo);
101 int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
102 (unsigned long *)&dev->guest_msi.data);
103 int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
104 (unsigned long *)&dev->guest_msi.data);
105 u32 deliver_bitmask;
106
107 BUG_ON(!ioapic);
108
109 deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
110 dest_id, dest_mode);
111 /* IOAPIC delivery mode value is the same as MSI here */
112 switch (delivery_mode) {
113 case IOAPIC_LOWEST_PRIORITY:
114 vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
115 deliver_bitmask);
116 if (vcpu != NULL)
117 kvm_apic_set_irq(vcpu, vector, trig_mode);
118 else
119 printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
120 break;
121 case IOAPIC_FIXED:
122 for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
123 if (!(deliver_bitmask & (1 << vcpu_id)))
124 continue;
125 deliver_bitmask &= ~(1 << vcpu_id);
126 vcpu = ioapic->kvm->vcpus[vcpu_id];
127 if (vcpu)
128 kvm_apic_set_irq(vcpu, vector, trig_mode);
129 }
130 break;
131 default:
132 printk(KERN_INFO "kvm: unsupported MSI delivery mode\n");
133 }
134}
135#else
136static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) {}
137#endif
138
8a98f664
XZ
139static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
140 int assigned_dev_id)
141{
142 struct list_head *ptr;
143 struct kvm_assigned_dev_kernel *match;
144
145 list_for_each(ptr, head) {
146 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
147 if (match->assigned_dev_id == assigned_dev_id)
148 return match;
149 }
150 return NULL;
151}
152
153static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
154{
155 struct kvm_assigned_dev_kernel *assigned_dev;
156
157 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
158 interrupt_work);
159
160 /* This is taken to safely inject irq inside the guest. When
161 * the interrupt injection (or the ioapic code) uses a
162 * finer-grained lock, update this
163 */
164 mutex_lock(&assigned_dev->kvm->lock);
6b9cc7fd
SY
165 if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_INTX)
166 kvm_set_irq(assigned_dev->kvm,
167 assigned_dev->irq_source_id,
168 assigned_dev->guest_irq, 1);
169 else if (assigned_dev->irq_requested_type &
170 KVM_ASSIGNED_DEV_GUEST_MSI) {
171 assigned_device_msi_dispatch(assigned_dev);
172 enable_irq(assigned_dev->host_irq);
defaf158 173 assigned_dev->host_irq_disabled = false;
6b9cc7fd 174 }
8a98f664
XZ
175 mutex_unlock(&assigned_dev->kvm->lock);
176 kvm_put_kvm(assigned_dev->kvm);
177}
178
8a98f664
XZ
179static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
180{
181 struct kvm_assigned_dev_kernel *assigned_dev =
182 (struct kvm_assigned_dev_kernel *) dev_id;
183
184 kvm_get_kvm(assigned_dev->kvm);
defaf158 185
8a98f664 186 schedule_work(&assigned_dev->interrupt_work);
defaf158 187
8a98f664 188 disable_irq_nosync(irq);
defaf158
MM
189 assigned_dev->host_irq_disabled = true;
190
8a98f664
XZ
191 return IRQ_HANDLED;
192}
193
194/* Ack the irq line for an assigned device */
195static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
196{
197 struct kvm_assigned_dev_kernel *dev;
198
199 if (kian->gsi == -1)
200 return;
201
202 dev = container_of(kian, struct kvm_assigned_dev_kernel,
203 ack_notifier);
defaf158 204
5550af4d 205 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
defaf158
MM
206
207 /* The guest irq may be shared so this ack may be
208 * from another device.
209 */
210 if (dev->host_irq_disabled) {
211 enable_irq(dev->host_irq);
212 dev->host_irq_disabled = false;
213 }
8a98f664
XZ
214}
215
4a643be8
MM
216static void kvm_free_assigned_irq(struct kvm *kvm,
217 struct kvm_assigned_dev_kernel *assigned_dev)
8a98f664 218{
4a643be8
MM
219 if (!irqchip_in_kernel(kvm))
220 return;
8a98f664 221
e19e30ef 222 kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier);
f29b2673
MM
223
224 if (assigned_dev->irq_source_id != -1)
225 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
226 assigned_dev->irq_source_id = -1;
8a98f664 227
4a643be8
MM
228 if (!assigned_dev->irq_requested_type)
229 return;
230
8a98f664
XZ
231 if (cancel_work_sync(&assigned_dev->interrupt_work))
232 /* We had pending work. That means we will have to take
233 * care of kvm_put_kvm.
234 */
235 kvm_put_kvm(kvm);
236
4a643be8
MM
237 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
238
239 if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
240 pci_disable_msi(assigned_dev->dev);
241
242 assigned_dev->irq_requested_type = 0;
243}
244
245
246static void kvm_free_assigned_device(struct kvm *kvm,
247 struct kvm_assigned_dev_kernel
248 *assigned_dev)
249{
250 kvm_free_assigned_irq(kvm, assigned_dev);
251
6eb55818
SY
252 pci_reset_function(assigned_dev->dev);
253
8a98f664
XZ
254 pci_release_regions(assigned_dev->dev);
255 pci_disable_device(assigned_dev->dev);
256 pci_dev_put(assigned_dev->dev);
257
258 list_del(&assigned_dev->list);
259 kfree(assigned_dev);
260}
261
262void kvm_free_all_assigned_devices(struct kvm *kvm)
263{
264 struct list_head *ptr, *ptr2;
265 struct kvm_assigned_dev_kernel *assigned_dev;
266
267 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
268 assigned_dev = list_entry(ptr,
269 struct kvm_assigned_dev_kernel,
270 list);
271
272 kvm_free_assigned_device(kvm, assigned_dev);
273 }
274}
275
00e3ed39
SY
276static int assigned_device_update_intx(struct kvm *kvm,
277 struct kvm_assigned_dev_kernel *adev,
278 struct kvm_assigned_irq *airq)
279{
fbac7818
SY
280 adev->guest_irq = airq->guest_irq;
281 adev->ack_notifier.gsi = airq->guest_irq;
282
283 if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_INTX)
00e3ed39 284 return 0;
00e3ed39
SY
285
286 if (irqchip_in_kernel(kvm)) {
5319c662
SY
287 if (!msi2intx &&
288 adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) {
6b9cc7fd
SY
289 free_irq(adev->host_irq, (void *)kvm);
290 pci_disable_msi(adev->dev);
291 }
292
00e3ed39
SY
293 if (!capable(CAP_SYS_RAWIO))
294 return -EPERM;
295
296 if (airq->host_irq)
297 adev->host_irq = airq->host_irq;
298 else
299 adev->host_irq = adev->dev->irq;
00e3ed39
SY
300
301 /* Even though this is PCI, we don't want to use shared
302 * interrupts. Sharing host devices with guest-assigned devices
303 * on the same interrupt line is not a happy situation: there
304 * are going to be long delays in accepting, acking, etc.
305 */
306 if (request_irq(adev->host_irq, kvm_assigned_dev_intr,
307 0, "kvm_assigned_intx_device", (void *)adev))
308 return -EIO;
309 }
310
4f906c19
SY
311 adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_INTX |
312 KVM_ASSIGNED_DEV_HOST_INTX;
00e3ed39
SY
313 return 0;
314}
315
6b9cc7fd
SY
316#ifdef CONFIG_X86
317static int assigned_device_update_msi(struct kvm *kvm,
318 struct kvm_assigned_dev_kernel *adev,
319 struct kvm_assigned_irq *airq)
320{
321 int r;
322
5319c662
SY
323 if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) {
324 /* x86 don't care upper address of guest msi message addr */
325 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI;
326 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX;
327 adev->guest_msi.address_lo = airq->guest_msi.addr_lo;
328 adev->guest_msi.data = airq->guest_msi.data;
329 adev->ack_notifier.gsi = -1;
330 } else if (msi2intx) {
331 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX;
332 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI;
333 adev->guest_irq = airq->guest_irq;
334 adev->ack_notifier.gsi = airq->guest_irq;
335 }
6b9cc7fd
SY
336
337 if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
338 return 0;
339
340 if (irqchip_in_kernel(kvm)) {
5319c662
SY
341 if (!msi2intx) {
342 if (adev->irq_requested_type &
343 KVM_ASSIGNED_DEV_HOST_INTX)
344 free_irq(adev->host_irq, (void *)adev);
345
346 r = pci_enable_msi(adev->dev);
347 if (r)
348 return r;
349 }
6b9cc7fd
SY
350
351 adev->host_irq = adev->dev->irq;
352 if (request_irq(adev->host_irq, kvm_assigned_dev_intr, 0,
353 "kvm_assigned_msi_device", (void *)adev))
354 return -EIO;
355 }
356
5319c662
SY
357 if (!msi2intx)
358 adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_MSI;
359
360 adev->irq_requested_type |= KVM_ASSIGNED_DEV_HOST_MSI;
6b9cc7fd
SY
361 return 0;
362}
363#endif
364
8a98f664
XZ
365static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
366 struct kvm_assigned_irq
367 *assigned_irq)
368{
369 int r = 0;
370 struct kvm_assigned_dev_kernel *match;
371
372 mutex_lock(&kvm->lock);
373
374 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
375 assigned_irq->assigned_dev_id);
376 if (!match) {
377 mutex_unlock(&kvm->lock);
378 return -EINVAL;
379 }
380
4f906c19 381 if (!match->irq_requested_type) {
342ffb93
SY
382 INIT_WORK(&match->interrupt_work,
383 kvm_assigned_dev_interrupt_work_handler);
384 if (irqchip_in_kernel(kvm)) {
385 /* Register ack nofitier */
386 match->ack_notifier.gsi = -1;
387 match->ack_notifier.irq_acked =
388 kvm_assigned_dev_ack_irq;
389 kvm_register_irq_ack_notifier(kvm,
390 &match->ack_notifier);
391
392 /* Request IRQ source ID */
393 r = kvm_request_irq_source_id(kvm);
394 if (r < 0)
395 goto out_release;
396 else
397 match->irq_source_id = r;
5319c662
SY
398
399#ifdef CONFIG_X86
400 /* Determine host device irq type, we can know the
401 * result from dev->msi_enabled */
402 if (msi2intx)
403 pci_enable_msi(match->dev);
404#endif
342ffb93 405 }
8a98f664
XZ
406 }
407
5319c662
SY
408 if ((!msi2intx &&
409 (assigned_irq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI)) ||
410 (msi2intx && match->dev->msi_enabled)) {
6b9cc7fd
SY
411#ifdef CONFIG_X86
412 r = assigned_device_update_msi(kvm, match, assigned_irq);
413 if (r) {
414 printk(KERN_WARNING "kvm: failed to enable "
415 "MSI device!\n");
416 goto out_release;
417 }
418#else
419 r = -ENOTTY;
420#endif
421 } else if (assigned_irq->host_irq == 0 && match->dev->irq == 0) {
422 /* Host device IRQ 0 means don't support INTx */
5319c662
SY
423 if (!msi2intx) {
424 printk(KERN_WARNING
425 "kvm: wait device to enable MSI!\n");
426 r = 0;
427 } else {
428 printk(KERN_WARNING
429 "kvm: failed to enable MSI device!\n");
430 r = -ENOTTY;
431 goto out_release;
432 }
6b9cc7fd
SY
433 } else {
434 /* Non-sharing INTx mode */
435 r = assigned_device_update_intx(kvm, match, assigned_irq);
436 if (r) {
437 printk(KERN_WARNING "kvm: failed to enable "
438 "INTx device!\n");
439 goto out_release;
440 }
441 }
8a98f664 442
8a98f664
XZ
443 mutex_unlock(&kvm->lock);
444 return r;
445out_release:
446 mutex_unlock(&kvm->lock);
447 kvm_free_assigned_device(kvm, match);
448 return r;
449}
450
451static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
452 struct kvm_assigned_pci_dev *assigned_dev)
453{
454 int r = 0;
455 struct kvm_assigned_dev_kernel *match;
456 struct pci_dev *dev;
457
458 mutex_lock(&kvm->lock);
459
460 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
461 assigned_dev->assigned_dev_id);
462 if (match) {
463 /* device already assigned */
464 r = -EINVAL;
465 goto out;
466 }
467
468 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
469 if (match == NULL) {
470 printk(KERN_INFO "%s: Couldn't allocate memory\n",
471 __func__);
472 r = -ENOMEM;
473 goto out;
474 }
475 dev = pci_get_bus_and_slot(assigned_dev->busnr,
476 assigned_dev->devfn);
477 if (!dev) {
478 printk(KERN_INFO "%s: host device not found\n", __func__);
479 r = -EINVAL;
480 goto out_free;
481 }
482 if (pci_enable_device(dev)) {
483 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
484 r = -EBUSY;
485 goto out_put;
486 }
487 r = pci_request_regions(dev, "kvm_assigned_device");
488 if (r) {
489 printk(KERN_INFO "%s: Could not get access to device regions\n",
490 __func__);
491 goto out_disable;
492 }
6eb55818
SY
493
494 pci_reset_function(dev);
495
8a98f664
XZ
496 match->assigned_dev_id = assigned_dev->assigned_dev_id;
497 match->host_busnr = assigned_dev->busnr;
498 match->host_devfn = assigned_dev->devfn;
499 match->dev = dev;
f29b2673 500 match->irq_source_id = -1;
8a98f664
XZ
501 match->kvm = kvm;
502
503 list_add(&match->list, &kvm->arch.assigned_dev_head);
504
505 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
260782bc
WH
506 if (!kvm->arch.intel_iommu_domain) {
507 r = kvm_iommu_map_guest(kvm);
508 if (r)
509 goto out_list_del;
510 }
511 r = kvm_assign_device(kvm, match);
8a98f664
XZ
512 if (r)
513 goto out_list_del;
514 }
515
516out:
517 mutex_unlock(&kvm->lock);
518 return r;
519out_list_del:
520 list_del(&match->list);
521 pci_release_regions(dev);
522out_disable:
523 pci_disable_device(dev);
524out_put:
525 pci_dev_put(dev);
526out_free:
527 kfree(match);
528 mutex_unlock(&kvm->lock);
529 return r;
530}
531#endif
532
5aacf0ca
JM
533static inline int valid_vcpu(int n)
534{
535 return likely(n >= 0 && n < KVM_MAX_VCPUS);
536}
537
c77fb9dc 538inline int kvm_is_mmio_pfn(pfn_t pfn)
cbff90a7
BAY
539{
540 if (pfn_valid(pfn))
541 return PageReserved(pfn_to_page(pfn));
542
543 return true;
544}
545
bccf2150
AK
546/*
547 * Switches to specified vcpu, until a matching vcpu_put()
548 */
313a3dc7 549void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 550{
15ad7146
AK
551 int cpu;
552
bccf2150 553 mutex_lock(&vcpu->mutex);
15ad7146
AK
554 cpu = get_cpu();
555 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 556 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 557 put_cpu();
6aa8b732
AK
558}
559
313a3dc7 560void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 561{
15ad7146 562 preempt_disable();
313a3dc7 563 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
564 preempt_notifier_unregister(&vcpu->preempt_notifier);
565 preempt_enable();
6aa8b732
AK
566 mutex_unlock(&vcpu->mutex);
567}
568
d9e368d6
AK
569static void ack_flush(void *_completed)
570{
d9e368d6
AK
571}
572
49846896 573static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
d9e368d6 574{
597a5f55 575 int i, cpu, me;
6ef7a1bc
RR
576 cpumask_var_t cpus;
577 bool called = true;
d9e368d6 578 struct kvm_vcpu *vcpu;
d9e368d6 579
6ef7a1bc
RR
580 if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
581 cpumask_clear(cpus);
582
597a5f55 583 me = get_cpu();
fb3f0f51
RR
584 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
585 vcpu = kvm->vcpus[i];
586 if (!vcpu)
587 continue;
49846896 588 if (test_and_set_bit(req, &vcpu->requests))
d9e368d6
AK
589 continue;
590 cpu = vcpu->cpu;
6ef7a1bc
RR
591 if (cpus != NULL && cpu != -1 && cpu != me)
592 cpumask_set_cpu(cpu, cpus);
49846896 593 }
6ef7a1bc
RR
594 if (unlikely(cpus == NULL))
595 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
596 else if (!cpumask_empty(cpus))
597 smp_call_function_many(cpus, ack_flush, NULL, 1);
598 else
599 called = false;
597a5f55 600 put_cpu();
6ef7a1bc 601 free_cpumask_var(cpus);
49846896 602 return called;
d9e368d6
AK
603}
604
49846896 605void kvm_flush_remote_tlbs(struct kvm *kvm)
2e53d63a 606{
49846896
RR
607 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
608 ++kvm->stat.remote_tlb_flush;
2e53d63a
MT
609}
610
49846896
RR
611void kvm_reload_remote_mmus(struct kvm *kvm)
612{
613 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
614}
2e53d63a 615
fb3f0f51
RR
616int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
617{
618 struct page *page;
619 int r;
620
621 mutex_init(&vcpu->mutex);
622 vcpu->cpu = -1;
fb3f0f51
RR
623 vcpu->kvm = kvm;
624 vcpu->vcpu_id = id;
b6958ce4 625 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
626
627 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
628 if (!page) {
629 r = -ENOMEM;
630 goto fail;
631 }
632 vcpu->run = page_address(page);
633
e9b11c17 634 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 635 if (r < 0)
e9b11c17 636 goto fail_free_run;
fb3f0f51
RR
637 return 0;
638
fb3f0f51
RR
639fail_free_run:
640 free_page((unsigned long)vcpu->run);
641fail:
76fafa5e 642 return r;
fb3f0f51
RR
643}
644EXPORT_SYMBOL_GPL(kvm_vcpu_init);
645
646void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
647{
e9b11c17 648 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
649 free_page((unsigned long)vcpu->run);
650}
651EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
652
e930bffe
AA
653#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
654static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
655{
656 return container_of(mn, struct kvm, mmu_notifier);
657}
658
659static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
660 struct mm_struct *mm,
661 unsigned long address)
662{
663 struct kvm *kvm = mmu_notifier_to_kvm(mn);
664 int need_tlb_flush;
665
666 /*
667 * When ->invalidate_page runs, the linux pte has been zapped
668 * already but the page is still allocated until
669 * ->invalidate_page returns. So if we increase the sequence
670 * here the kvm page fault will notice if the spte can't be
671 * established because the page is going to be freed. If
672 * instead the kvm page fault establishes the spte before
673 * ->invalidate_page runs, kvm_unmap_hva will release it
674 * before returning.
675 *
676 * The sequence increase only need to be seen at spin_unlock
677 * time, and not at spin_lock time.
678 *
679 * Increasing the sequence after the spin_unlock would be
680 * unsafe because the kvm page fault could then establish the
681 * pte after kvm_unmap_hva returned, without noticing the page
682 * is going to be freed.
683 */
684 spin_lock(&kvm->mmu_lock);
685 kvm->mmu_notifier_seq++;
686 need_tlb_flush = kvm_unmap_hva(kvm, address);
687 spin_unlock(&kvm->mmu_lock);
688
689 /* we've to flush the tlb before the pages can be freed */
690 if (need_tlb_flush)
691 kvm_flush_remote_tlbs(kvm);
692
693}
694
695static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
696 struct mm_struct *mm,
697 unsigned long start,
698 unsigned long end)
699{
700 struct kvm *kvm = mmu_notifier_to_kvm(mn);
701 int need_tlb_flush = 0;
702
703 spin_lock(&kvm->mmu_lock);
704 /*
705 * The count increase must become visible at unlock time as no
706 * spte can be established without taking the mmu_lock and
707 * count is also read inside the mmu_lock critical section.
708 */
709 kvm->mmu_notifier_count++;
710 for (; start < end; start += PAGE_SIZE)
711 need_tlb_flush |= kvm_unmap_hva(kvm, start);
712 spin_unlock(&kvm->mmu_lock);
713
714 /* we've to flush the tlb before the pages can be freed */
715 if (need_tlb_flush)
716 kvm_flush_remote_tlbs(kvm);
717}
718
719static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
720 struct mm_struct *mm,
721 unsigned long start,
722 unsigned long end)
723{
724 struct kvm *kvm = mmu_notifier_to_kvm(mn);
725
726 spin_lock(&kvm->mmu_lock);
727 /*
728 * This sequence increase will notify the kvm page fault that
729 * the page that is going to be mapped in the spte could have
730 * been freed.
731 */
732 kvm->mmu_notifier_seq++;
733 /*
734 * The above sequence increase must be visible before the
735 * below count decrease but both values are read by the kvm
736 * page fault under mmu_lock spinlock so we don't need to add
737 * a smb_wmb() here in between the two.
738 */
739 kvm->mmu_notifier_count--;
740 spin_unlock(&kvm->mmu_lock);
741
742 BUG_ON(kvm->mmu_notifier_count < 0);
743}
744
745static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
746 struct mm_struct *mm,
747 unsigned long address)
748{
749 struct kvm *kvm = mmu_notifier_to_kvm(mn);
750 int young;
751
752 spin_lock(&kvm->mmu_lock);
753 young = kvm_age_hva(kvm, address);
754 spin_unlock(&kvm->mmu_lock);
755
756 if (young)
757 kvm_flush_remote_tlbs(kvm);
758
759 return young;
760}
761
762static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
763 .invalidate_page = kvm_mmu_notifier_invalidate_page,
764 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
765 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
766 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
767};
768#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
769
f17abe9a 770static struct kvm *kvm_create_vm(void)
6aa8b732 771{
d19a9cd2 772 struct kvm *kvm = kvm_arch_create_vm();
5f94c174
LV
773#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
774 struct page *page;
775#endif
6aa8b732 776
d19a9cd2
ZX
777 if (IS_ERR(kvm))
778 goto out;
6aa8b732 779
5f94c174
LV
780#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
781 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
782 if (!page) {
783 kfree(kvm);
784 return ERR_PTR(-ENOMEM);
785 }
786 kvm->coalesced_mmio_ring =
787 (struct kvm_coalesced_mmio_ring *)page_address(page);
788#endif
789
e930bffe
AA
790#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
791 {
792 int err;
793 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
794 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
795 if (err) {
796#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
797 put_page(page);
798#endif
799 kfree(kvm);
800 return ERR_PTR(err);
801 }
802 }
803#endif
804
6d4e4c4f
AK
805 kvm->mm = current->mm;
806 atomic_inc(&kvm->mm->mm_count);
aaee2c94 807 spin_lock_init(&kvm->mmu_lock);
74906345 808 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 809 mutex_init(&kvm->lock);
2eeb2e94 810 kvm_io_bus_init(&kvm->mmio_bus);
72dc67a6 811 init_rwsem(&kvm->slots_lock);
d39f13b0 812 atomic_set(&kvm->users_count, 1);
5e58cfe4
RR
813 spin_lock(&kvm_lock);
814 list_add(&kvm->vm_list, &vm_list);
815 spin_unlock(&kvm_lock);
5f94c174
LV
816#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
817 kvm_coalesced_mmio_init(kvm);
818#endif
d19a9cd2 819out:
f17abe9a
AK
820 return kvm;
821}
822
6aa8b732
AK
823/*
824 * Free any memory in @free but not in @dont.
825 */
826static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
827 struct kvm_memory_slot *dont)
828{
290fc38d
IE
829 if (!dont || free->rmap != dont->rmap)
830 vfree(free->rmap);
6aa8b732
AK
831
832 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
833 vfree(free->dirty_bitmap);
834
05da4558
MT
835 if (!dont || free->lpage_info != dont->lpage_info)
836 vfree(free->lpage_info);
837
6aa8b732 838 free->npages = 0;
8b6d44c7 839 free->dirty_bitmap = NULL;
8d4e1288 840 free->rmap = NULL;
05da4558 841 free->lpage_info = NULL;
6aa8b732
AK
842}
843
d19a9cd2 844void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
845{
846 int i;
847
848 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 849 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
850}
851
f17abe9a
AK
852static void kvm_destroy_vm(struct kvm *kvm)
853{
6d4e4c4f
AK
854 struct mm_struct *mm = kvm->mm;
855
133de902
AK
856 spin_lock(&kvm_lock);
857 list_del(&kvm->vm_list);
858 spin_unlock(&kvm_lock);
74906345 859 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 860 kvm_io_bus_destroy(&kvm->mmio_bus);
5f94c174
LV
861#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
862 if (kvm->coalesced_mmio_ring != NULL)
863 free_page((unsigned long)kvm->coalesced_mmio_ring);
e930bffe
AA
864#endif
865#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
866 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
5f94c174 867#endif
d19a9cd2 868 kvm_arch_destroy_vm(kvm);
6d4e4c4f 869 mmdrop(mm);
f17abe9a
AK
870}
871
d39f13b0
IE
872void kvm_get_kvm(struct kvm *kvm)
873{
874 atomic_inc(&kvm->users_count);
875}
876EXPORT_SYMBOL_GPL(kvm_get_kvm);
877
878void kvm_put_kvm(struct kvm *kvm)
879{
880 if (atomic_dec_and_test(&kvm->users_count))
881 kvm_destroy_vm(kvm);
882}
883EXPORT_SYMBOL_GPL(kvm_put_kvm);
884
885
f17abe9a
AK
886static int kvm_vm_release(struct inode *inode, struct file *filp)
887{
888 struct kvm *kvm = filp->private_data;
889
d39f13b0 890 kvm_put_kvm(kvm);
6aa8b732
AK
891 return 0;
892}
893
6aa8b732
AK
894/*
895 * Allocate some memory and give it an address in the guest physical address
896 * space.
897 *
898 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 899 *
10589a46 900 * Must be called holding mmap_sem for write.
6aa8b732 901 */
f78e0e2e
SY
902int __kvm_set_memory_region(struct kvm *kvm,
903 struct kvm_userspace_memory_region *mem,
904 int user_alloc)
6aa8b732
AK
905{
906 int r;
907 gfn_t base_gfn;
908 unsigned long npages;
909 unsigned long i;
910 struct kvm_memory_slot *memslot;
911 struct kvm_memory_slot old, new;
6aa8b732
AK
912
913 r = -EINVAL;
914 /* General sanity checks */
915 if (mem->memory_size & (PAGE_SIZE - 1))
916 goto out;
917 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
918 goto out;
e7cacd40 919 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
78749809 920 goto out;
e0d62c7f 921 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
922 goto out;
923 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
924 goto out;
925
926 memslot = &kvm->memslots[mem->slot];
927 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
928 npages = mem->memory_size >> PAGE_SHIFT;
929
930 if (!npages)
931 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
932
6aa8b732
AK
933 new = old = *memslot;
934
935 new.base_gfn = base_gfn;
936 new.npages = npages;
937 new.flags = mem->flags;
938
939 /* Disallow changing a memory slot's size. */
940 r = -EINVAL;
941 if (npages && old.npages && npages != old.npages)
f78e0e2e 942 goto out_free;
6aa8b732
AK
943
944 /* Check for overlaps */
945 r = -EEXIST;
946 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
947 struct kvm_memory_slot *s = &kvm->memslots[i];
948
949 if (s == memslot)
950 continue;
951 if (!((base_gfn + npages <= s->base_gfn) ||
952 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 953 goto out_free;
6aa8b732 954 }
6aa8b732 955
6aa8b732
AK
956 /* Free page dirty bitmap if unneeded */
957 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 958 new.dirty_bitmap = NULL;
6aa8b732
AK
959
960 r = -ENOMEM;
961
962 /* Allocate if a slot is being created */
eff0114a 963#ifndef CONFIG_S390
8d4e1288 964 if (npages && !new.rmap) {
d77c26fc 965 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
966
967 if (!new.rmap)
f78e0e2e 968 goto out_free;
290fc38d 969
290fc38d 970 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 971
80b14b5b 972 new.user_alloc = user_alloc;
604b38ac
AA
973 /*
974 * hva_to_rmmap() serialzies with the mmu_lock and to be
975 * safe it has to ignore memslots with !user_alloc &&
976 * !userspace_addr.
977 */
978 if (user_alloc)
979 new.userspace_addr = mem->userspace_addr;
980 else
981 new.userspace_addr = 0;
6aa8b732 982 }
05da4558
MT
983 if (npages && !new.lpage_info) {
984 int largepages = npages / KVM_PAGES_PER_HPAGE;
985 if (npages % KVM_PAGES_PER_HPAGE)
986 largepages++;
987 if (base_gfn % KVM_PAGES_PER_HPAGE)
988 largepages++;
989
990 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
991
992 if (!new.lpage_info)
993 goto out_free;
994
995 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
996
997 if (base_gfn % KVM_PAGES_PER_HPAGE)
998 new.lpage_info[0].write_count = 1;
999 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
1000 new.lpage_info[largepages-1].write_count = 1;
1001 }
6aa8b732
AK
1002
1003 /* Allocate page dirty bitmap if needed */
1004 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
1005 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
1006
1007 new.dirty_bitmap = vmalloc(dirty_bytes);
1008 if (!new.dirty_bitmap)
f78e0e2e 1009 goto out_free;
6aa8b732
AK
1010 memset(new.dirty_bitmap, 0, dirty_bytes);
1011 }
eff0114a 1012#endif /* not defined CONFIG_S390 */
6aa8b732 1013
34d4cb8f
MT
1014 if (!npages)
1015 kvm_arch_flush_shadow(kvm);
1016
604b38ac
AA
1017 spin_lock(&kvm->mmu_lock);
1018 if (mem->slot >= kvm->nmemslots)
1019 kvm->nmemslots = mem->slot + 1;
1020
3ad82a7e 1021 *memslot = new;
604b38ac 1022 spin_unlock(&kvm->mmu_lock);
3ad82a7e 1023
0de10343
ZX
1024 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
1025 if (r) {
604b38ac 1026 spin_lock(&kvm->mmu_lock);
0de10343 1027 *memslot = old;
604b38ac 1028 spin_unlock(&kvm->mmu_lock);
0de10343 1029 goto out_free;
82ce2c96
IE
1030 }
1031
6f897248
GC
1032 kvm_free_physmem_slot(&old, npages ? &new : NULL);
1033 /* Slot deletion case: we have to update the current slot */
1034 if (!npages)
1035 *memslot = old;
8a98f664 1036#ifdef CONFIG_DMAR
62c476c7
BAY
1037 /* map the pages in iommu page table */
1038 r = kvm_iommu_map_pages(kvm, base_gfn, npages);
1039 if (r)
1040 goto out;
8a98f664 1041#endif
6aa8b732
AK
1042 return 0;
1043
f78e0e2e 1044out_free:
6aa8b732
AK
1045 kvm_free_physmem_slot(&new, &old);
1046out:
1047 return r;
210c7c4d
IE
1048
1049}
f78e0e2e
SY
1050EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1051
1052int kvm_set_memory_region(struct kvm *kvm,
1053 struct kvm_userspace_memory_region *mem,
1054 int user_alloc)
1055{
1056 int r;
1057
72dc67a6 1058 down_write(&kvm->slots_lock);
f78e0e2e 1059 r = __kvm_set_memory_region(kvm, mem, user_alloc);
72dc67a6 1060 up_write(&kvm->slots_lock);
f78e0e2e
SY
1061 return r;
1062}
210c7c4d
IE
1063EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1064
1fe779f8
CO
1065int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1066 struct
1067 kvm_userspace_memory_region *mem,
1068 int user_alloc)
210c7c4d 1069{
e0d62c7f
IE
1070 if (mem->slot >= KVM_MEMORY_SLOTS)
1071 return -EINVAL;
210c7c4d 1072 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
1073}
1074
5bb064dc
ZX
1075int kvm_get_dirty_log(struct kvm *kvm,
1076 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
1077{
1078 struct kvm_memory_slot *memslot;
1079 int r, i;
1080 int n;
1081 unsigned long any = 0;
1082
6aa8b732
AK
1083 r = -EINVAL;
1084 if (log->slot >= KVM_MEMORY_SLOTS)
1085 goto out;
1086
1087 memslot = &kvm->memslots[log->slot];
1088 r = -ENOENT;
1089 if (!memslot->dirty_bitmap)
1090 goto out;
1091
cd1a4a98 1092 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 1093
cd1a4a98 1094 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
1095 any = memslot->dirty_bitmap[i];
1096
1097 r = -EFAULT;
1098 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1099 goto out;
1100
5bb064dc
ZX
1101 if (any)
1102 *is_dirty = 1;
6aa8b732
AK
1103
1104 r = 0;
6aa8b732 1105out:
6aa8b732
AK
1106 return r;
1107}
1108
cea7bb21
IE
1109int is_error_page(struct page *page)
1110{
1111 return page == bad_page;
1112}
1113EXPORT_SYMBOL_GPL(is_error_page);
1114
35149e21
AL
1115int is_error_pfn(pfn_t pfn)
1116{
1117 return pfn == bad_pfn;
1118}
1119EXPORT_SYMBOL_GPL(is_error_pfn);
1120
f9d46eb0
IE
1121static inline unsigned long bad_hva(void)
1122{
1123 return PAGE_OFFSET;
1124}
1125
1126int kvm_is_error_hva(unsigned long addr)
1127{
1128 return addr == bad_hva();
1129}
1130EXPORT_SYMBOL_GPL(kvm_is_error_hva);
1131
2843099f 1132struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
1133{
1134 int i;
1135
1136 for (i = 0; i < kvm->nmemslots; ++i) {
1137 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1138
1139 if (gfn >= memslot->base_gfn
1140 && gfn < memslot->base_gfn + memslot->npages)
1141 return memslot;
1142 }
8b6d44c7 1143 return NULL;
6aa8b732 1144}
2843099f 1145EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
e8207547
AK
1146
1147struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1148{
1149 gfn = unalias_gfn(kvm, gfn);
2843099f 1150 return gfn_to_memslot_unaliased(kvm, gfn);
e8207547 1151}
6aa8b732 1152
e0d62c7f
IE
1153int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1154{
1155 int i;
1156
1157 gfn = unalias_gfn(kvm, gfn);
1158 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1159 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1160
1161 if (gfn >= memslot->base_gfn
1162 && gfn < memslot->base_gfn + memslot->npages)
1163 return 1;
1164 }
1165 return 0;
1166}
1167EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1168
05da4558 1169unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
539cb660
IE
1170{
1171 struct kvm_memory_slot *slot;
1172
1173 gfn = unalias_gfn(kvm, gfn);
2843099f 1174 slot = gfn_to_memslot_unaliased(kvm, gfn);
539cb660
IE
1175 if (!slot)
1176 return bad_hva();
1177 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
1178}
0d150298 1179EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 1180
35149e21 1181pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
954bbbc2 1182{
8d4e1288 1183 struct page *page[1];
539cb660 1184 unsigned long addr;
8d4e1288 1185 int npages;
2e2e3738 1186 pfn_t pfn;
954bbbc2 1187
60395224
AK
1188 might_sleep();
1189
539cb660
IE
1190 addr = gfn_to_hva(kvm, gfn);
1191 if (kvm_is_error_hva(addr)) {
8a7ae055 1192 get_page(bad_page);
35149e21 1193 return page_to_pfn(bad_page);
8a7ae055 1194 }
8d4e1288 1195
4c2155ce 1196 npages = get_user_pages_fast(addr, 1, 1, page);
539cb660 1197
2e2e3738
AL
1198 if (unlikely(npages != 1)) {
1199 struct vm_area_struct *vma;
1200
4c2155ce 1201 down_read(&current->mm->mmap_sem);
2e2e3738 1202 vma = find_vma(current->mm, addr);
4c2155ce 1203
2e2e3738
AL
1204 if (vma == NULL || addr < vma->vm_start ||
1205 !(vma->vm_flags & VM_PFNMAP)) {
4c2155ce 1206 up_read(&current->mm->mmap_sem);
2e2e3738
AL
1207 get_page(bad_page);
1208 return page_to_pfn(bad_page);
1209 }
1210
1211 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
4c2155ce 1212 up_read(&current->mm->mmap_sem);
c77fb9dc 1213 BUG_ON(!kvm_is_mmio_pfn(pfn));
2e2e3738
AL
1214 } else
1215 pfn = page_to_pfn(page[0]);
8d4e1288 1216
2e2e3738 1217 return pfn;
35149e21
AL
1218}
1219
1220EXPORT_SYMBOL_GPL(gfn_to_pfn);
1221
1222struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1223{
2e2e3738
AL
1224 pfn_t pfn;
1225
1226 pfn = gfn_to_pfn(kvm, gfn);
c77fb9dc 1227 if (!kvm_is_mmio_pfn(pfn))
2e2e3738
AL
1228 return pfn_to_page(pfn);
1229
c77fb9dc 1230 WARN_ON(kvm_is_mmio_pfn(pfn));
2e2e3738
AL
1231
1232 get_page(bad_page);
1233 return bad_page;
954bbbc2 1234}
aab61cc0 1235
954bbbc2
AK
1236EXPORT_SYMBOL_GPL(gfn_to_page);
1237
b4231d61
IE
1238void kvm_release_page_clean(struct page *page)
1239{
35149e21 1240 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
1241}
1242EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1243
35149e21
AL
1244void kvm_release_pfn_clean(pfn_t pfn)
1245{
c77fb9dc 1246 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1247 put_page(pfn_to_page(pfn));
35149e21
AL
1248}
1249EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1250
b4231d61 1251void kvm_release_page_dirty(struct page *page)
8a7ae055 1252{
35149e21
AL
1253 kvm_release_pfn_dirty(page_to_pfn(page));
1254}
1255EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1256
1257void kvm_release_pfn_dirty(pfn_t pfn)
1258{
1259 kvm_set_pfn_dirty(pfn);
1260 kvm_release_pfn_clean(pfn);
1261}
1262EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1263
1264void kvm_set_page_dirty(struct page *page)
1265{
1266 kvm_set_pfn_dirty(page_to_pfn(page));
1267}
1268EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1269
1270void kvm_set_pfn_dirty(pfn_t pfn)
1271{
c77fb9dc 1272 if (!kvm_is_mmio_pfn(pfn)) {
2e2e3738
AL
1273 struct page *page = pfn_to_page(pfn);
1274 if (!PageReserved(page))
1275 SetPageDirty(page);
1276 }
8a7ae055 1277}
35149e21
AL
1278EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1279
1280void kvm_set_pfn_accessed(pfn_t pfn)
1281{
c77fb9dc 1282 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1283 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
1284}
1285EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1286
1287void kvm_get_pfn(pfn_t pfn)
1288{
c77fb9dc 1289 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1290 get_page(pfn_to_page(pfn));
35149e21
AL
1291}
1292EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 1293
195aefde
IE
1294static int next_segment(unsigned long len, int offset)
1295{
1296 if (len > PAGE_SIZE - offset)
1297 return PAGE_SIZE - offset;
1298 else
1299 return len;
1300}
1301
1302int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1303 int len)
1304{
e0506bcb
IE
1305 int r;
1306 unsigned long addr;
195aefde 1307
e0506bcb
IE
1308 addr = gfn_to_hva(kvm, gfn);
1309 if (kvm_is_error_hva(addr))
1310 return -EFAULT;
1311 r = copy_from_user(data, (void __user *)addr + offset, len);
1312 if (r)
195aefde 1313 return -EFAULT;
195aefde
IE
1314 return 0;
1315}
1316EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1317
1318int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1319{
1320 gfn_t gfn = gpa >> PAGE_SHIFT;
1321 int seg;
1322 int offset = offset_in_page(gpa);
1323 int ret;
1324
1325 while ((seg = next_segment(len, offset)) != 0) {
1326 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1327 if (ret < 0)
1328 return ret;
1329 offset = 0;
1330 len -= seg;
1331 data += seg;
1332 ++gfn;
1333 }
1334 return 0;
1335}
1336EXPORT_SYMBOL_GPL(kvm_read_guest);
1337
7ec54588
MT
1338int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1339 unsigned long len)
1340{
1341 int r;
1342 unsigned long addr;
1343 gfn_t gfn = gpa >> PAGE_SHIFT;
1344 int offset = offset_in_page(gpa);
1345
1346 addr = gfn_to_hva(kvm, gfn);
1347 if (kvm_is_error_hva(addr))
1348 return -EFAULT;
0aac03f0 1349 pagefault_disable();
7ec54588 1350 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 1351 pagefault_enable();
7ec54588
MT
1352 if (r)
1353 return -EFAULT;
1354 return 0;
1355}
1356EXPORT_SYMBOL(kvm_read_guest_atomic);
1357
195aefde
IE
1358int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1359 int offset, int len)
1360{
e0506bcb
IE
1361 int r;
1362 unsigned long addr;
195aefde 1363
e0506bcb
IE
1364 addr = gfn_to_hva(kvm, gfn);
1365 if (kvm_is_error_hva(addr))
1366 return -EFAULT;
1367 r = copy_to_user((void __user *)addr + offset, data, len);
1368 if (r)
195aefde 1369 return -EFAULT;
195aefde
IE
1370 mark_page_dirty(kvm, gfn);
1371 return 0;
1372}
1373EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1374
1375int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1376 unsigned long len)
1377{
1378 gfn_t gfn = gpa >> PAGE_SHIFT;
1379 int seg;
1380 int offset = offset_in_page(gpa);
1381 int ret;
1382
1383 while ((seg = next_segment(len, offset)) != 0) {
1384 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1385 if (ret < 0)
1386 return ret;
1387 offset = 0;
1388 len -= seg;
1389 data += seg;
1390 ++gfn;
1391 }
1392 return 0;
1393}
1394
1395int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1396{
3e021bf5 1397 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
1398}
1399EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1400
1401int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1402{
1403 gfn_t gfn = gpa >> PAGE_SHIFT;
1404 int seg;
1405 int offset = offset_in_page(gpa);
1406 int ret;
1407
1408 while ((seg = next_segment(len, offset)) != 0) {
1409 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1410 if (ret < 0)
1411 return ret;
1412 offset = 0;
1413 len -= seg;
1414 ++gfn;
1415 }
1416 return 0;
1417}
1418EXPORT_SYMBOL_GPL(kvm_clear_guest);
1419
6aa8b732
AK
1420void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1421{
31389947 1422 struct kvm_memory_slot *memslot;
6aa8b732 1423
3b6fff19 1424 gfn = unalias_gfn(kvm, gfn);
2843099f 1425 memslot = gfn_to_memslot_unaliased(kvm, gfn);
7e9d619d
RR
1426 if (memslot && memslot->dirty_bitmap) {
1427 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 1428
7e9d619d
RR
1429 /* avoid RMW */
1430 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1431 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
1432 }
1433}
1434
b6958ce4
ED
1435/*
1436 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1437 */
8776e519 1438void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 1439{
e5c239cf
MT
1440 DEFINE_WAIT(wait);
1441
1442 for (;;) {
1443 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1444
d7690175
MT
1445 if (kvm_cpu_has_interrupt(vcpu) ||
1446 kvm_cpu_has_pending_timer(vcpu) ||
1447 kvm_arch_vcpu_runnable(vcpu)) {
1448 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
e5c239cf 1449 break;
d7690175 1450 }
e5c239cf
MT
1451 if (signal_pending(current))
1452 break;
1453
b6958ce4
ED
1454 vcpu_put(vcpu);
1455 schedule();
1456 vcpu_load(vcpu);
1457 }
d3bef15f 1458
e5c239cf 1459 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
1460}
1461
6aa8b732
AK
1462void kvm_resched(struct kvm_vcpu *vcpu)
1463{
3fca0365
YD
1464 if (!need_resched())
1465 return;
6aa8b732 1466 cond_resched();
6aa8b732
AK
1467}
1468EXPORT_SYMBOL_GPL(kvm_resched);
1469
e4a533a4 1470static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
1471{
1472 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
1473 struct page *page;
1474
e4a533a4 1475 if (vmf->pgoff == 0)
039576c0 1476 page = virt_to_page(vcpu->run);
09566765 1477#ifdef CONFIG_X86
e4a533a4 1478 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 1479 page = virt_to_page(vcpu->arch.pio_data);
5f94c174
LV
1480#endif
1481#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1482 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1483 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 1484#endif
039576c0 1485 else
e4a533a4 1486 return VM_FAULT_SIGBUS;
9a2bb7f4 1487 get_page(page);
e4a533a4 1488 vmf->page = page;
1489 return 0;
9a2bb7f4
AK
1490}
1491
1492static struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 1493 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
1494};
1495
1496static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1497{
1498 vma->vm_ops = &kvm_vcpu_vm_ops;
1499 return 0;
1500}
1501
bccf2150
AK
1502static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1503{
1504 struct kvm_vcpu *vcpu = filp->private_data;
1505
66c0b394 1506 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
1507 return 0;
1508}
1509
3d3aab1b 1510static struct file_operations kvm_vcpu_fops = {
bccf2150
AK
1511 .release = kvm_vcpu_release,
1512 .unlocked_ioctl = kvm_vcpu_ioctl,
1513 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 1514 .mmap = kvm_vcpu_mmap,
bccf2150
AK
1515};
1516
1517/*
1518 * Allocates an inode for the vcpu.
1519 */
1520static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1521{
7d9dbca3 1522 int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
2030a42c 1523 if (fd < 0)
66c0b394 1524 kvm_put_kvm(vcpu->kvm);
bccf2150 1525 return fd;
bccf2150
AK
1526}
1527
c5ea7660
AK
1528/*
1529 * Creates some virtual cpus. Good luck creating more than one.
1530 */
1531static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1532{
1533 int r;
1534 struct kvm_vcpu *vcpu;
1535
c5ea7660 1536 if (!valid_vcpu(n))
fb3f0f51 1537 return -EINVAL;
c5ea7660 1538
e9b11c17 1539 vcpu = kvm_arch_vcpu_create(kvm, n);
fb3f0f51
RR
1540 if (IS_ERR(vcpu))
1541 return PTR_ERR(vcpu);
c5ea7660 1542
15ad7146
AK
1543 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1544
26e5215f
AK
1545 r = kvm_arch_vcpu_setup(vcpu);
1546 if (r)
7d8fece6 1547 return r;
26e5215f 1548
11ec2804 1549 mutex_lock(&kvm->lock);
fb3f0f51
RR
1550 if (kvm->vcpus[n]) {
1551 r = -EEXIST;
e9b11c17 1552 goto vcpu_destroy;
fb3f0f51
RR
1553 }
1554 kvm->vcpus[n] = vcpu;
11ec2804 1555 mutex_unlock(&kvm->lock);
c5ea7660 1556
fb3f0f51 1557 /* Now it's all set up, let userspace reach it */
66c0b394 1558 kvm_get_kvm(kvm);
bccf2150
AK
1559 r = create_vcpu_fd(vcpu);
1560 if (r < 0)
fb3f0f51
RR
1561 goto unlink;
1562 return r;
39c3b86e 1563
fb3f0f51 1564unlink:
11ec2804 1565 mutex_lock(&kvm->lock);
fb3f0f51 1566 kvm->vcpus[n] = NULL;
e9b11c17 1567vcpu_destroy:
7d8fece6 1568 mutex_unlock(&kvm->lock);
d40ccc62 1569 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
1570 return r;
1571}
1572
1961d276
AK
1573static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1574{
1575 if (sigset) {
1576 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1577 vcpu->sigset_active = 1;
1578 vcpu->sigset = *sigset;
1579 } else
1580 vcpu->sigset_active = 0;
1581 return 0;
1582}
1583
bccf2150
AK
1584static long kvm_vcpu_ioctl(struct file *filp,
1585 unsigned int ioctl, unsigned long arg)
6aa8b732 1586{
bccf2150 1587 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 1588 void __user *argp = (void __user *)arg;
313a3dc7 1589 int r;
fa3795a7
DH
1590 struct kvm_fpu *fpu = NULL;
1591 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 1592
6d4e4c4f
AK
1593 if (vcpu->kvm->mm != current->mm)
1594 return -EIO;
6aa8b732 1595 switch (ioctl) {
9a2bb7f4 1596 case KVM_RUN:
f0fe5108
AK
1597 r = -EINVAL;
1598 if (arg)
1599 goto out;
b6c7a5dc 1600 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 1601 break;
6aa8b732 1602 case KVM_GET_REGS: {
3e4bb3ac 1603 struct kvm_regs *kvm_regs;
6aa8b732 1604
3e4bb3ac
XZ
1605 r = -ENOMEM;
1606 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1607 if (!kvm_regs)
6aa8b732 1608 goto out;
3e4bb3ac
XZ
1609 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1610 if (r)
1611 goto out_free1;
6aa8b732 1612 r = -EFAULT;
3e4bb3ac
XZ
1613 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1614 goto out_free1;
6aa8b732 1615 r = 0;
3e4bb3ac
XZ
1616out_free1:
1617 kfree(kvm_regs);
6aa8b732
AK
1618 break;
1619 }
1620 case KVM_SET_REGS: {
3e4bb3ac 1621 struct kvm_regs *kvm_regs;
6aa8b732 1622
3e4bb3ac
XZ
1623 r = -ENOMEM;
1624 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1625 if (!kvm_regs)
6aa8b732 1626 goto out;
3e4bb3ac
XZ
1627 r = -EFAULT;
1628 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1629 goto out_free2;
1630 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 1631 if (r)
3e4bb3ac 1632 goto out_free2;
6aa8b732 1633 r = 0;
3e4bb3ac
XZ
1634out_free2:
1635 kfree(kvm_regs);
6aa8b732
AK
1636 break;
1637 }
1638 case KVM_GET_SREGS: {
fa3795a7
DH
1639 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1640 r = -ENOMEM;
1641 if (!kvm_sregs)
1642 goto out;
1643 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1644 if (r)
1645 goto out;
1646 r = -EFAULT;
fa3795a7 1647 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
1648 goto out;
1649 r = 0;
1650 break;
1651 }
1652 case KVM_SET_SREGS: {
fa3795a7
DH
1653 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1654 r = -ENOMEM;
1655 if (!kvm_sregs)
1656 goto out;
6aa8b732 1657 r = -EFAULT;
fa3795a7 1658 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
6aa8b732 1659 goto out;
fa3795a7 1660 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1661 if (r)
1662 goto out;
1663 r = 0;
1664 break;
1665 }
62d9f0db
MT
1666 case KVM_GET_MP_STATE: {
1667 struct kvm_mp_state mp_state;
1668
1669 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1670 if (r)
1671 goto out;
1672 r = -EFAULT;
1673 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1674 goto out;
1675 r = 0;
1676 break;
1677 }
1678 case KVM_SET_MP_STATE: {
1679 struct kvm_mp_state mp_state;
1680
1681 r = -EFAULT;
1682 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1683 goto out;
1684 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1685 if (r)
1686 goto out;
1687 r = 0;
1688 break;
1689 }
6aa8b732
AK
1690 case KVM_TRANSLATE: {
1691 struct kvm_translation tr;
1692
1693 r = -EFAULT;
2f366987 1694 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 1695 goto out;
8b006791 1696 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
1697 if (r)
1698 goto out;
1699 r = -EFAULT;
2f366987 1700 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
1701 goto out;
1702 r = 0;
1703 break;
1704 }
6aa8b732
AK
1705 case KVM_DEBUG_GUEST: {
1706 struct kvm_debug_guest dbg;
1707
1708 r = -EFAULT;
2f366987 1709 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 1710 goto out;
b6c7a5dc 1711 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
1712 if (r)
1713 goto out;
1714 r = 0;
1715 break;
1716 }
1961d276
AK
1717 case KVM_SET_SIGNAL_MASK: {
1718 struct kvm_signal_mask __user *sigmask_arg = argp;
1719 struct kvm_signal_mask kvm_sigmask;
1720 sigset_t sigset, *p;
1721
1722 p = NULL;
1723 if (argp) {
1724 r = -EFAULT;
1725 if (copy_from_user(&kvm_sigmask, argp,
1726 sizeof kvm_sigmask))
1727 goto out;
1728 r = -EINVAL;
1729 if (kvm_sigmask.len != sizeof sigset)
1730 goto out;
1731 r = -EFAULT;
1732 if (copy_from_user(&sigset, sigmask_arg->sigset,
1733 sizeof sigset))
1734 goto out;
1735 p = &sigset;
1736 }
1737 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1738 break;
1739 }
b8836737 1740 case KVM_GET_FPU: {
fa3795a7
DH
1741 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1742 r = -ENOMEM;
1743 if (!fpu)
1744 goto out;
1745 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
1746 if (r)
1747 goto out;
1748 r = -EFAULT;
fa3795a7 1749 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
1750 goto out;
1751 r = 0;
1752 break;
1753 }
1754 case KVM_SET_FPU: {
fa3795a7
DH
1755 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1756 r = -ENOMEM;
1757 if (!fpu)
1758 goto out;
b8836737 1759 r = -EFAULT;
fa3795a7 1760 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
b8836737 1761 goto out;
fa3795a7 1762 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
1763 if (r)
1764 goto out;
1765 r = 0;
1766 break;
1767 }
bccf2150 1768 default:
313a3dc7 1769 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1770 }
1771out:
fa3795a7
DH
1772 kfree(fpu);
1773 kfree(kvm_sregs);
bccf2150
AK
1774 return r;
1775}
1776
1777static long kvm_vm_ioctl(struct file *filp,
1778 unsigned int ioctl, unsigned long arg)
1779{
1780 struct kvm *kvm = filp->private_data;
1781 void __user *argp = (void __user *)arg;
1fe779f8 1782 int r;
bccf2150 1783
6d4e4c4f
AK
1784 if (kvm->mm != current->mm)
1785 return -EIO;
bccf2150
AK
1786 switch (ioctl) {
1787 case KVM_CREATE_VCPU:
1788 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1789 if (r < 0)
1790 goto out;
1791 break;
6fc138d2
IE
1792 case KVM_SET_USER_MEMORY_REGION: {
1793 struct kvm_userspace_memory_region kvm_userspace_mem;
1794
1795 r = -EFAULT;
1796 if (copy_from_user(&kvm_userspace_mem, argp,
1797 sizeof kvm_userspace_mem))
1798 goto out;
1799
1800 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1801 if (r)
1802 goto out;
1803 break;
1804 }
1805 case KVM_GET_DIRTY_LOG: {
1806 struct kvm_dirty_log log;
1807
1808 r = -EFAULT;
2f366987 1809 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1810 goto out;
2c6f5df9 1811 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1812 if (r)
1813 goto out;
1814 break;
1815 }
5f94c174
LV
1816#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1817 case KVM_REGISTER_COALESCED_MMIO: {
1818 struct kvm_coalesced_mmio_zone zone;
1819 r = -EFAULT;
1820 if (copy_from_user(&zone, argp, sizeof zone))
1821 goto out;
1822 r = -ENXIO;
1823 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1824 if (r)
1825 goto out;
1826 r = 0;
1827 break;
1828 }
1829 case KVM_UNREGISTER_COALESCED_MMIO: {
1830 struct kvm_coalesced_mmio_zone zone;
1831 r = -EFAULT;
1832 if (copy_from_user(&zone, argp, sizeof zone))
1833 goto out;
1834 r = -ENXIO;
1835 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1836 if (r)
1837 goto out;
1838 r = 0;
1839 break;
1840 }
8a98f664
XZ
1841#endif
1842#ifdef KVM_CAP_DEVICE_ASSIGNMENT
1843 case KVM_ASSIGN_PCI_DEVICE: {
1844 struct kvm_assigned_pci_dev assigned_dev;
1845
1846 r = -EFAULT;
1847 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
1848 goto out;
1849 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
1850 if (r)
1851 goto out;
1852 break;
1853 }
1854 case KVM_ASSIGN_IRQ: {
1855 struct kvm_assigned_irq assigned_irq;
1856
1857 r = -EFAULT;
1858 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
1859 goto out;
1860 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
1861 if (r)
1862 goto out;
1863 break;
1864 }
5f94c174 1865#endif
f17abe9a 1866 default:
1fe779f8 1867 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
1868 }
1869out:
1870 return r;
1871}
1872
e4a533a4 1873static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a 1874{
777b3f49
MT
1875 struct page *page[1];
1876 unsigned long addr;
1877 int npages;
1878 gfn_t gfn = vmf->pgoff;
f17abe9a 1879 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a 1880
777b3f49
MT
1881 addr = gfn_to_hva(kvm, gfn);
1882 if (kvm_is_error_hva(addr))
e4a533a4 1883 return VM_FAULT_SIGBUS;
777b3f49
MT
1884
1885 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1886 NULL);
1887 if (unlikely(npages != 1))
e4a533a4 1888 return VM_FAULT_SIGBUS;
777b3f49
MT
1889
1890 vmf->page = page[0];
e4a533a4 1891 return 0;
f17abe9a
AK
1892}
1893
1894static struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 1895 .fault = kvm_vm_fault,
f17abe9a
AK
1896};
1897
1898static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1899{
1900 vma->vm_ops = &kvm_vm_vm_ops;
1901 return 0;
1902}
1903
3d3aab1b 1904static struct file_operations kvm_vm_fops = {
f17abe9a
AK
1905 .release = kvm_vm_release,
1906 .unlocked_ioctl = kvm_vm_ioctl,
1907 .compat_ioctl = kvm_vm_ioctl,
1908 .mmap = kvm_vm_mmap,
1909};
1910
1911static int kvm_dev_ioctl_create_vm(void)
1912{
2030a42c 1913 int fd;
f17abe9a
AK
1914 struct kvm *kvm;
1915
f17abe9a 1916 kvm = kvm_create_vm();
d6d28168
AK
1917 if (IS_ERR(kvm))
1918 return PTR_ERR(kvm);
7d9dbca3 1919 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
2030a42c 1920 if (fd < 0)
66c0b394 1921 kvm_put_kvm(kvm);
f17abe9a 1922
f17abe9a 1923 return fd;
f17abe9a
AK
1924}
1925
1a811b61
AK
1926static long kvm_dev_ioctl_check_extension_generic(long arg)
1927{
1928 switch (arg) {
ca9edaee 1929 case KVM_CAP_USER_MEMORY:
1a811b61
AK
1930 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
1931 return 1;
1932 default:
1933 break;
1934 }
1935 return kvm_dev_ioctl_check_extension(arg);
1936}
1937
f17abe9a
AK
1938static long kvm_dev_ioctl(struct file *filp,
1939 unsigned int ioctl, unsigned long arg)
1940{
07c45a36 1941 long r = -EINVAL;
f17abe9a
AK
1942
1943 switch (ioctl) {
1944 case KVM_GET_API_VERSION:
f0fe5108
AK
1945 r = -EINVAL;
1946 if (arg)
1947 goto out;
f17abe9a
AK
1948 r = KVM_API_VERSION;
1949 break;
1950 case KVM_CREATE_VM:
f0fe5108
AK
1951 r = -EINVAL;
1952 if (arg)
1953 goto out;
f17abe9a
AK
1954 r = kvm_dev_ioctl_create_vm();
1955 break;
018d00d2 1956 case KVM_CHECK_EXTENSION:
1a811b61 1957 r = kvm_dev_ioctl_check_extension_generic(arg);
5d308f45 1958 break;
07c45a36
AK
1959 case KVM_GET_VCPU_MMAP_SIZE:
1960 r = -EINVAL;
1961 if (arg)
1962 goto out;
adb1ff46
AK
1963 r = PAGE_SIZE; /* struct kvm_run */
1964#ifdef CONFIG_X86
1965 r += PAGE_SIZE; /* pio data page */
5f94c174
LV
1966#endif
1967#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1968 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 1969#endif
07c45a36 1970 break;
d4c9ff2d
FEL
1971 case KVM_TRACE_ENABLE:
1972 case KVM_TRACE_PAUSE:
1973 case KVM_TRACE_DISABLE:
1974 r = kvm_trace_ioctl(ioctl, arg);
1975 break;
6aa8b732 1976 default:
043405e1 1977 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1978 }
1979out:
1980 return r;
1981}
1982
6aa8b732 1983static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1984 .unlocked_ioctl = kvm_dev_ioctl,
1985 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1986};
1987
1988static struct miscdevice kvm_dev = {
bbe4432e 1989 KVM_MINOR,
6aa8b732
AK
1990 "kvm",
1991 &kvm_chardev_ops,
1992};
1993
1b6c0168
AK
1994static void hardware_enable(void *junk)
1995{
1996 int cpu = raw_smp_processor_id();
1997
7f59f492 1998 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 1999 return;
7f59f492 2000 cpumask_set_cpu(cpu, cpus_hardware_enabled);
e9b11c17 2001 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
2002}
2003
2004static void hardware_disable(void *junk)
2005{
2006 int cpu = raw_smp_processor_id();
2007
7f59f492 2008 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 2009 return;
7f59f492 2010 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
e9b11c17 2011 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
2012}
2013
774c47f1
AK
2014static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2015 void *v)
2016{
2017 int cpu = (long)v;
2018
1a6f4d7f 2019 val &= ~CPU_TASKS_FROZEN;
774c47f1 2020 switch (val) {
cec9ad27 2021 case CPU_DYING:
6ec8a856
AK
2022 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2023 cpu);
2024 hardware_disable(NULL);
2025 break;
774c47f1 2026 case CPU_UP_CANCELED:
43934a38
JK
2027 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2028 cpu);
8691e5a8 2029 smp_call_function_single(cpu, hardware_disable, NULL, 1);
774c47f1 2030 break;
43934a38
JK
2031 case CPU_ONLINE:
2032 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2033 cpu);
8691e5a8 2034 smp_call_function_single(cpu, hardware_enable, NULL, 1);
774c47f1
AK
2035 break;
2036 }
2037 return NOTIFY_OK;
2038}
2039
4ecac3fd
AK
2040
2041asmlinkage void kvm_handle_fault_on_reboot(void)
2042{
2043 if (kvm_rebooting)
2044 /* spin while reset goes on */
2045 while (true)
2046 ;
2047 /* Fault while not rebooting. We want the trace. */
2048 BUG();
2049}
2050EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
2051
9a2b85c6 2052static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 2053 void *v)
9a2b85c6
RR
2054{
2055 if (val == SYS_RESTART) {
2056 /*
2057 * Some (well, at least mine) BIOSes hang on reboot if
2058 * in vmx root mode.
2059 */
2060 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
4ecac3fd 2061 kvm_rebooting = true;
15c8b6c1 2062 on_each_cpu(hardware_disable, NULL, 1);
9a2b85c6
RR
2063 }
2064 return NOTIFY_OK;
2065}
2066
2067static struct notifier_block kvm_reboot_notifier = {
2068 .notifier_call = kvm_reboot,
2069 .priority = 0,
2070};
2071
2eeb2e94
GH
2072void kvm_io_bus_init(struct kvm_io_bus *bus)
2073{
2074 memset(bus, 0, sizeof(*bus));
2075}
2076
2077void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2078{
2079 int i;
2080
2081 for (i = 0; i < bus->dev_count; i++) {
2082 struct kvm_io_device *pos = bus->devs[i];
2083
2084 kvm_iodevice_destructor(pos);
2085 }
2086}
2087
92760499
LV
2088struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
2089 gpa_t addr, int len, int is_write)
2eeb2e94
GH
2090{
2091 int i;
2092
2093 for (i = 0; i < bus->dev_count; i++) {
2094 struct kvm_io_device *pos = bus->devs[i];
2095
92760499 2096 if (pos->in_range(pos, addr, len, is_write))
2eeb2e94
GH
2097 return pos;
2098 }
2099
2100 return NULL;
2101}
2102
2103void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
2104{
2105 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
2106
2107 bus->devs[bus->dev_count++] = dev;
2108}
2109
774c47f1
AK
2110static struct notifier_block kvm_cpu_notifier = {
2111 .notifier_call = kvm_cpu_hotplug,
2112 .priority = 20, /* must be > scheduler priority */
2113};
2114
8b88b099 2115static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
2116{
2117 unsigned offset = (long)_offset;
ba1389b7
AK
2118 struct kvm *kvm;
2119
8b88b099 2120 *val = 0;
ba1389b7
AK
2121 spin_lock(&kvm_lock);
2122 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 2123 *val += *(u32 *)((void *)kvm + offset);
ba1389b7 2124 spin_unlock(&kvm_lock);
8b88b099 2125 return 0;
ba1389b7
AK
2126}
2127
2128DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2129
8b88b099 2130static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
2131{
2132 unsigned offset = (long)_offset;
1165f5fe
AK
2133 struct kvm *kvm;
2134 struct kvm_vcpu *vcpu;
2135 int i;
2136
8b88b099 2137 *val = 0;
1165f5fe
AK
2138 spin_lock(&kvm_lock);
2139 list_for_each_entry(kvm, &vm_list, vm_list)
2140 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
2141 vcpu = kvm->vcpus[i];
2142 if (vcpu)
8b88b099 2143 *val += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
2144 }
2145 spin_unlock(&kvm_lock);
8b88b099 2146 return 0;
1165f5fe
AK
2147}
2148
ba1389b7
AK
2149DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2150
2151static struct file_operations *stat_fops[] = {
2152 [KVM_STAT_VCPU] = &vcpu_stat_fops,
2153 [KVM_STAT_VM] = &vm_stat_fops,
2154};
1165f5fe 2155
a16b043c 2156static void kvm_init_debug(void)
6aa8b732
AK
2157{
2158 struct kvm_stats_debugfs_item *p;
2159
76f7c879 2160 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 2161 for (p = debugfs_entries; p->name; ++p)
76f7c879 2162 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 2163 (void *)(long)p->offset,
ba1389b7 2164 stat_fops[p->kind]);
6aa8b732
AK
2165}
2166
2167static void kvm_exit_debug(void)
2168{
2169 struct kvm_stats_debugfs_item *p;
2170
2171 for (p = debugfs_entries; p->name; ++p)
2172 debugfs_remove(p->dentry);
76f7c879 2173 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
2174}
2175
59ae6c6b
AK
2176static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2177{
4267c41a 2178 hardware_disable(NULL);
59ae6c6b
AK
2179 return 0;
2180}
2181
2182static int kvm_resume(struct sys_device *dev)
2183{
4267c41a 2184 hardware_enable(NULL);
59ae6c6b
AK
2185 return 0;
2186}
2187
2188static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 2189 .name = "kvm",
59ae6c6b
AK
2190 .suspend = kvm_suspend,
2191 .resume = kvm_resume,
2192};
2193
2194static struct sys_device kvm_sysdev = {
2195 .id = 0,
2196 .cls = &kvm_sysdev_class,
2197};
2198
cea7bb21 2199struct page *bad_page;
35149e21 2200pfn_t bad_pfn;
6aa8b732 2201
15ad7146
AK
2202static inline
2203struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2204{
2205 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2206}
2207
2208static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2209{
2210 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2211
e9b11c17 2212 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
2213}
2214
2215static void kvm_sched_out(struct preempt_notifier *pn,
2216 struct task_struct *next)
2217{
2218 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2219
e9b11c17 2220 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
2221}
2222
f8c16bba 2223int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 2224 struct module *module)
6aa8b732
AK
2225{
2226 int r;
002c7f7c 2227 int cpu;
6aa8b732 2228
cb498ea2
ZX
2229 kvm_init_debug();
2230
f8c16bba
ZX
2231 r = kvm_arch_init(opaque);
2232 if (r)
d2308784 2233 goto out_fail;
cb498ea2
ZX
2234
2235 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2236
2237 if (bad_page == NULL) {
2238 r = -ENOMEM;
2239 goto out;
2240 }
2241
35149e21
AL
2242 bad_pfn = page_to_pfn(bad_page);
2243
7f59f492
RR
2244 if (!alloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2245 r = -ENOMEM;
2246 goto out_free_0;
2247 }
2248
e9b11c17 2249 r = kvm_arch_hardware_setup();
6aa8b732 2250 if (r < 0)
7f59f492 2251 goto out_free_0a;
6aa8b732 2252
002c7f7c
YS
2253 for_each_online_cpu(cpu) {
2254 smp_call_function_single(cpu,
e9b11c17 2255 kvm_arch_check_processor_compat,
8691e5a8 2256 &r, 1);
002c7f7c 2257 if (r < 0)
d2308784 2258 goto out_free_1;
002c7f7c
YS
2259 }
2260
15c8b6c1 2261 on_each_cpu(hardware_enable, NULL, 1);
774c47f1
AK
2262 r = register_cpu_notifier(&kvm_cpu_notifier);
2263 if (r)
d2308784 2264 goto out_free_2;
6aa8b732
AK
2265 register_reboot_notifier(&kvm_reboot_notifier);
2266
59ae6c6b
AK
2267 r = sysdev_class_register(&kvm_sysdev_class);
2268 if (r)
d2308784 2269 goto out_free_3;
59ae6c6b
AK
2270
2271 r = sysdev_register(&kvm_sysdev);
2272 if (r)
d2308784 2273 goto out_free_4;
59ae6c6b 2274
c16f862d
RR
2275 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2276 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
2277 __alignof__(struct kvm_vcpu),
2278 0, NULL);
c16f862d
RR
2279 if (!kvm_vcpu_cache) {
2280 r = -ENOMEM;
d2308784 2281 goto out_free_5;
c16f862d
RR
2282 }
2283
6aa8b732 2284 kvm_chardev_ops.owner = module;
3d3aab1b
CB
2285 kvm_vm_fops.owner = module;
2286 kvm_vcpu_fops.owner = module;
6aa8b732
AK
2287
2288 r = misc_register(&kvm_dev);
2289 if (r) {
d77c26fc 2290 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
2291 goto out_free;
2292 }
2293
15ad7146
AK
2294 kvm_preempt_ops.sched_in = kvm_sched_in;
2295 kvm_preempt_ops.sched_out = kvm_sched_out;
5319c662
SY
2296#ifndef CONFIG_X86
2297 msi2intx = 0;
2298#endif
15ad7146 2299
c7addb90 2300 return 0;
6aa8b732
AK
2301
2302out_free:
c16f862d 2303 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 2304out_free_5:
59ae6c6b 2305 sysdev_unregister(&kvm_sysdev);
d2308784 2306out_free_4:
59ae6c6b 2307 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 2308out_free_3:
6aa8b732 2309 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 2310 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 2311out_free_2:
15c8b6c1 2312 on_each_cpu(hardware_disable, NULL, 1);
d2308784 2313out_free_1:
e9b11c17 2314 kvm_arch_hardware_unsetup();
7f59f492
RR
2315out_free_0a:
2316 free_cpumask_var(cpus_hardware_enabled);
d2308784
ZX
2317out_free_0:
2318 __free_page(bad_page);
ca45aaae 2319out:
f8c16bba 2320 kvm_arch_exit();
cb498ea2 2321 kvm_exit_debug();
d2308784 2322out_fail:
6aa8b732
AK
2323 return r;
2324}
cb498ea2 2325EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 2326
cb498ea2 2327void kvm_exit(void)
6aa8b732 2328{
d4c9ff2d 2329 kvm_trace_cleanup();
6aa8b732 2330 misc_deregister(&kvm_dev);
c16f862d 2331 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
2332 sysdev_unregister(&kvm_sysdev);
2333 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 2334 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 2335 unregister_cpu_notifier(&kvm_cpu_notifier);
15c8b6c1 2336 on_each_cpu(hardware_disable, NULL, 1);
e9b11c17 2337 kvm_arch_hardware_unsetup();
f8c16bba 2338 kvm_arch_exit();
6aa8b732 2339 kvm_exit_debug();
7f59f492 2340 free_cpumask_var(cpus_hardware_enabled);
cea7bb21 2341 __free_page(bad_page);
6aa8b732 2342}
cb498ea2 2343EXPORT_SYMBOL_GPL(kvm_exit);
This page took 0.407812 seconds and 5 git commands to generate.