KVM: VMX: EPT misconfiguration handler
[deliverable/linux.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
35149e21 43#include <linux/swap.h>
e56d532f 44#include <linux/bitops.h>
547de29e 45#include <linux/spinlock.h>
6aa8b732 46
e495606d 47#include <asm/processor.h>
e495606d
AK
48#include <asm/io.h>
49#include <asm/uaccess.h>
3e021bf5 50#include <asm/pgtable.h>
6aa8b732 51
5f94c174
LV
52#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
53#include "coalesced_mmio.h"
54#endif
55
8a98f664
XZ
56#ifdef KVM_CAP_DEVICE_ASSIGNMENT
57#include <linux/pci.h>
58#include <linux/interrupt.h>
59#include "irq.h"
60#endif
61
6aa8b732
AK
62MODULE_AUTHOR("Qumranet");
63MODULE_LICENSE("GPL");
64
fa40a821
MT
65/*
66 * Ordering of locks:
67 *
68 * kvm->lock --> kvm->irq_lock
69 */
70
e9b11c17
ZX
71DEFINE_SPINLOCK(kvm_lock);
72LIST_HEAD(vm_list);
133de902 73
7f59f492 74static cpumask_var_t cpus_hardware_enabled;
1b6c0168 75
c16f862d
RR
76struct kmem_cache *kvm_vcpu_cache;
77EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 78
15ad7146
AK
79static __read_mostly struct preempt_ops kvm_preempt_ops;
80
76f7c879 81struct dentry *kvm_debugfs_dir;
6aa8b732 82
bccf2150
AK
83static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
84 unsigned long arg);
85
e8ba5d31 86static bool kvm_rebooting;
4ecac3fd 87
8a98f664
XZ
88#ifdef KVM_CAP_DEVICE_ASSIGNMENT
89static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
90 int assigned_dev_id)
91{
92 struct list_head *ptr;
93 struct kvm_assigned_dev_kernel *match;
94
95 list_for_each(ptr, head) {
96 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
97 if (match->assigned_dev_id == assigned_dev_id)
98 return match;
99 }
100 return NULL;
101}
102
2350bd1f
SY
103static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
104 *assigned_dev, int irq)
105{
106 int i, index;
107 struct msix_entry *host_msix_entries;
108
109 host_msix_entries = assigned_dev->host_msix_entries;
110
111 index = -1;
112 for (i = 0; i < assigned_dev->entries_nr; i++)
113 if (irq == host_msix_entries[i].vector) {
114 index = i;
115 break;
116 }
117 if (index < 0) {
118 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
119 return 0;
120 }
121
122 return index;
123}
124
8a98f664
XZ
125static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
126{
127 struct kvm_assigned_dev_kernel *assigned_dev;
2350bd1f 128 struct kvm *kvm;
968a6347 129 int i;
8a98f664
XZ
130
131 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
132 interrupt_work);
2350bd1f 133 kvm = assigned_dev->kvm;
8a98f664 134
fa40a821 135 mutex_lock(&kvm->irq_lock);
547de29e 136 spin_lock_irq(&assigned_dev->assigned_dev_lock);
e56d532f 137 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
2350bd1f
SY
138 struct kvm_guest_msix_entry *guest_entries =
139 assigned_dev->guest_msix_entries;
140 for (i = 0; i < assigned_dev->entries_nr; i++) {
141 if (!(guest_entries[i].flags &
142 KVM_ASSIGNED_MSIX_PENDING))
143 continue;
144 guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
145 kvm_set_irq(assigned_dev->kvm,
146 assigned_dev->irq_source_id,
147 guest_entries[i].vector, 1);
2350bd1f 148 }
968a6347 149 } else
2350bd1f
SY
150 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
151 assigned_dev->guest_irq, 1);
2350bd1f 152
547de29e 153 spin_unlock_irq(&assigned_dev->assigned_dev_lock);
fa40a821 154 mutex_unlock(&assigned_dev->kvm->irq_lock);
8a98f664
XZ
155}
156
8a98f664
XZ
157static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
158{
547de29e 159 unsigned long flags;
8a98f664
XZ
160 struct kvm_assigned_dev_kernel *assigned_dev =
161 (struct kvm_assigned_dev_kernel *) dev_id;
162
547de29e 163 spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
e56d532f 164 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
2350bd1f
SY
165 int index = find_index_from_host_irq(assigned_dev, irq);
166 if (index < 0)
547de29e 167 goto out;
2350bd1f
SY
168 assigned_dev->guest_msix_entries[index].flags |=
169 KVM_ASSIGNED_MSIX_PENDING;
170 }
171
8a98f664 172 schedule_work(&assigned_dev->interrupt_work);
defaf158 173
968a6347
SY
174 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
175 disable_irq_nosync(irq);
176 assigned_dev->host_irq_disabled = true;
177 }
defaf158 178
547de29e
MT
179out:
180 spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
8a98f664
XZ
181 return IRQ_HANDLED;
182}
183
184/* Ack the irq line for an assigned device */
185static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
186{
187 struct kvm_assigned_dev_kernel *dev;
547de29e 188 unsigned long flags;
8a98f664
XZ
189
190 if (kian->gsi == -1)
191 return;
192
193 dev = container_of(kian, struct kvm_assigned_dev_kernel,
194 ack_notifier);
defaf158 195
5550af4d 196 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
defaf158
MM
197
198 /* The guest irq may be shared so this ack may be
199 * from another device.
200 */
547de29e 201 spin_lock_irqsave(&dev->assigned_dev_lock, flags);
defaf158
MM
202 if (dev->host_irq_disabled) {
203 enable_irq(dev->host_irq);
204 dev->host_irq_disabled = false;
205 }
547de29e 206 spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
8a98f664
XZ
207}
208
e56d532f
SY
209static void deassign_guest_irq(struct kvm *kvm,
210 struct kvm_assigned_dev_kernel *assigned_dev)
8a98f664 211{
fa40a821 212 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
e56d532f 213 assigned_dev->ack_notifier.gsi = -1;
f29b2673
MM
214
215 if (assigned_dev->irq_source_id != -1)
216 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
217 assigned_dev->irq_source_id = -1;
e56d532f
SY
218 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
219}
8a98f664 220
e56d532f
SY
221/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
222static void deassign_host_irq(struct kvm *kvm,
223 struct kvm_assigned_dev_kernel *assigned_dev)
224{
ba4cef31
SY
225 /*
226 * In kvm_free_device_irq, cancel_work_sync return true if:
227 * 1. work is scheduled, and then cancelled.
228 * 2. work callback is executed.
229 *
230 * The first one ensured that the irq is disabled and no more events
231 * would happen. But for the second one, the irq may be enabled (e.g.
232 * for MSI). So we disable irq here to prevent further events.
233 *
234 * Notice this maybe result in nested disable if the interrupt type is
235 * INTx, but it's OK for we are going to free it.
236 *
237 * If this function is a part of VM destroy, please ensure that till
238 * now, the kvm state is still legal for probably we also have to wait
239 * interrupt_work done.
240 */
e56d532f 241 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
d510d6cc
SY
242 int i;
243 for (i = 0; i < assigned_dev->entries_nr; i++)
244 disable_irq_nosync(assigned_dev->
245 host_msix_entries[i].vector);
246
247 cancel_work_sync(&assigned_dev->interrupt_work);
248
249 for (i = 0; i < assigned_dev->entries_nr; i++)
250 free_irq(assigned_dev->host_msix_entries[i].vector,
251 (void *)assigned_dev);
252
253 assigned_dev->entries_nr = 0;
254 kfree(assigned_dev->host_msix_entries);
255 kfree(assigned_dev->guest_msix_entries);
256 pci_disable_msix(assigned_dev->dev);
257 } else {
258 /* Deal with MSI and INTx */
259 disable_irq_nosync(assigned_dev->host_irq);
260 cancel_work_sync(&assigned_dev->interrupt_work);
8a98f664 261
d510d6cc 262 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
4a643be8 263
e56d532f 264 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
d510d6cc
SY
265 pci_disable_msi(assigned_dev->dev);
266 }
4a643be8 267
e56d532f
SY
268 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
269}
270
271static int kvm_deassign_irq(struct kvm *kvm,
272 struct kvm_assigned_dev_kernel *assigned_dev,
273 unsigned long irq_requested_type)
274{
275 unsigned long guest_irq_type, host_irq_type;
276
277 if (!irqchip_in_kernel(kvm))
278 return -EINVAL;
279 /* no irq assignment to deassign */
280 if (!assigned_dev->irq_requested_type)
281 return -ENXIO;
282
283 host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
284 guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
285
286 if (host_irq_type)
287 deassign_host_irq(kvm, assigned_dev);
288 if (guest_irq_type)
289 deassign_guest_irq(kvm, assigned_dev);
290
291 return 0;
4a643be8
MM
292}
293
e56d532f
SY
294static void kvm_free_assigned_irq(struct kvm *kvm,
295 struct kvm_assigned_dev_kernel *assigned_dev)
296{
297 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
298}
4a643be8
MM
299
300static void kvm_free_assigned_device(struct kvm *kvm,
301 struct kvm_assigned_dev_kernel
302 *assigned_dev)
303{
304 kvm_free_assigned_irq(kvm, assigned_dev);
305
6eb55818
SY
306 pci_reset_function(assigned_dev->dev);
307
8a98f664
XZ
308 pci_release_regions(assigned_dev->dev);
309 pci_disable_device(assigned_dev->dev);
310 pci_dev_put(assigned_dev->dev);
311
312 list_del(&assigned_dev->list);
313 kfree(assigned_dev);
314}
315
316void kvm_free_all_assigned_devices(struct kvm *kvm)
317{
318 struct list_head *ptr, *ptr2;
319 struct kvm_assigned_dev_kernel *assigned_dev;
320
321 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
322 assigned_dev = list_entry(ptr,
323 struct kvm_assigned_dev_kernel,
324 list);
325
326 kvm_free_assigned_device(kvm, assigned_dev);
327 }
328}
329
e56d532f
SY
330static int assigned_device_enable_host_intx(struct kvm *kvm,
331 struct kvm_assigned_dev_kernel *dev)
00e3ed39 332{
e56d532f
SY
333 dev->host_irq = dev->dev->irq;
334 /* Even though this is PCI, we don't want to use shared
335 * interrupts. Sharing host devices with guest-assigned devices
336 * on the same interrupt line is not a happy situation: there
337 * are going to be long delays in accepting, acking, etc.
338 */
339 if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
340 0, "kvm_assigned_intx_device", (void *)dev))
341 return -EIO;
342 return 0;
343}
6b9cc7fd 344
e56d532f
SY
345#ifdef __KVM_HAVE_MSI
346static int assigned_device_enable_host_msi(struct kvm *kvm,
347 struct kvm_assigned_dev_kernel *dev)
348{
349 int r;
00e3ed39 350
e56d532f
SY
351 if (!dev->dev->msi_enabled) {
352 r = pci_enable_msi(dev->dev);
353 if (r)
354 return r;
355 }
00e3ed39 356
e56d532f
SY
357 dev->host_irq = dev->dev->irq;
358 if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
359 "kvm_assigned_msi_device", (void *)dev)) {
360 pci_disable_msi(dev->dev);
361 return -EIO;
00e3ed39
SY
362 }
363
00e3ed39
SY
364 return 0;
365}
e56d532f 366#endif
00e3ed39 367
e56d532f
SY
368#ifdef __KVM_HAVE_MSIX
369static int assigned_device_enable_host_msix(struct kvm *kvm,
370 struct kvm_assigned_dev_kernel *dev)
6b9cc7fd 371{
e56d532f 372 int i, r = -EINVAL;
6b9cc7fd 373
e56d532f
SY
374 /* host_msix_entries and guest_msix_entries should have been
375 * initialized */
376 if (dev->entries_nr == 0)
377 return r;
6b9cc7fd 378
e56d532f
SY
379 r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
380 if (r)
381 return r;
6b9cc7fd 382
e56d532f
SY
383 for (i = 0; i < dev->entries_nr; i++) {
384 r = request_irq(dev->host_msix_entries[i].vector,
385 kvm_assigned_dev_intr, 0,
386 "kvm_assigned_msix_device",
387 (void *)dev);
388 /* FIXME: free requested_irq's on failure */
389 if (r)
390 return r;
391 }
5319c662 392
e56d532f
SY
393 return 0;
394}
6b9cc7fd 395
e56d532f 396#endif
6b9cc7fd 397
e56d532f
SY
398static int assigned_device_enable_guest_intx(struct kvm *kvm,
399 struct kvm_assigned_dev_kernel *dev,
400 struct kvm_assigned_irq *irq)
401{
402 dev->guest_irq = irq->guest_irq;
403 dev->ack_notifier.gsi = irq->guest_irq;
404 return 0;
405}
5319c662 406
e56d532f
SY
407#ifdef __KVM_HAVE_MSI
408static int assigned_device_enable_guest_msi(struct kvm *kvm,
409 struct kvm_assigned_dev_kernel *dev,
410 struct kvm_assigned_irq *irq)
411{
412 dev->guest_irq = irq->guest_irq;
413 dev->ack_notifier.gsi = -1;
968a6347 414 dev->host_irq_disabled = false;
6b9cc7fd
SY
415 return 0;
416}
417#endif
e56d532f
SY
418#ifdef __KVM_HAVE_MSIX
419static int assigned_device_enable_guest_msix(struct kvm *kvm,
420 struct kvm_assigned_dev_kernel *dev,
421 struct kvm_assigned_irq *irq)
422{
423 dev->guest_irq = irq->guest_irq;
424 dev->ack_notifier.gsi = -1;
968a6347 425 dev->host_irq_disabled = false;
e56d532f
SY
426 return 0;
427}
428#endif
429
430static int assign_host_irq(struct kvm *kvm,
431 struct kvm_assigned_dev_kernel *dev,
432 __u32 host_irq_type)
433{
434 int r = -EEXIST;
435
436 if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
437 return r;
6b9cc7fd 438
e56d532f
SY
439 switch (host_irq_type) {
440 case KVM_DEV_IRQ_HOST_INTX:
441 r = assigned_device_enable_host_intx(kvm, dev);
442 break;
443#ifdef __KVM_HAVE_MSI
444 case KVM_DEV_IRQ_HOST_MSI:
445 r = assigned_device_enable_host_msi(kvm, dev);
446 break;
447#endif
d510d6cc 448#ifdef __KVM_HAVE_MSIX
e56d532f
SY
449 case KVM_DEV_IRQ_HOST_MSIX:
450 r = assigned_device_enable_host_msix(kvm, dev);
451 break;
452#endif
453 default:
454 r = -EINVAL;
455 }
d510d6cc 456
e56d532f
SY
457 if (!r)
458 dev->irq_requested_type |= host_irq_type;
d510d6cc 459
e56d532f
SY
460 return r;
461}
d510d6cc 462
e56d532f
SY
463static int assign_guest_irq(struct kvm *kvm,
464 struct kvm_assigned_dev_kernel *dev,
465 struct kvm_assigned_irq *irq,
466 unsigned long guest_irq_type)
467{
468 int id;
469 int r = -EEXIST;
d510d6cc 470
e56d532f
SY
471 if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
472 return r;
473
474 id = kvm_request_irq_source_id(kvm);
475 if (id < 0)
476 return id;
477
478 dev->irq_source_id = id;
479
480 switch (guest_irq_type) {
481 case KVM_DEV_IRQ_GUEST_INTX:
482 r = assigned_device_enable_guest_intx(kvm, dev, irq);
483 break;
484#ifdef __KVM_HAVE_MSI
485 case KVM_DEV_IRQ_GUEST_MSI:
486 r = assigned_device_enable_guest_msi(kvm, dev, irq);
487 break;
488#endif
489#ifdef __KVM_HAVE_MSIX
490 case KVM_DEV_IRQ_GUEST_MSIX:
491 r = assigned_device_enable_guest_msix(kvm, dev, irq);
492 break;
493#endif
494 default:
495 r = -EINVAL;
d510d6cc
SY
496 }
497
e56d532f
SY
498 if (!r) {
499 dev->irq_requested_type |= guest_irq_type;
500 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
501 } else
502 kvm_free_irq_source_id(kvm, dev->irq_source_id);
d510d6cc 503
e56d532f 504 return r;
d510d6cc 505}
d510d6cc 506
e56d532f 507/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
8a98f664 508static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
e56d532f 509 struct kvm_assigned_irq *assigned_irq)
8a98f664 510{
e56d532f 511 int r = -EINVAL;
8a98f664 512 struct kvm_assigned_dev_kernel *match;
e56d532f 513 unsigned long host_irq_type, guest_irq_type;
8a98f664 514
e56d532f
SY
515 if (!capable(CAP_SYS_RAWIO))
516 return -EPERM;
8a98f664 517
e56d532f
SY
518 if (!irqchip_in_kernel(kvm))
519 return r;
520
521 mutex_lock(&kvm->lock);
522 r = -ENODEV;
8a98f664
XZ
523 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
524 assigned_irq->assigned_dev_id);
e56d532f
SY
525 if (!match)
526 goto out;
8a98f664 527
e56d532f
SY
528 host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
529 guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
17071fe7 530
e56d532f
SY
531 r = -EINVAL;
532 /* can only assign one type at a time */
533 if (hweight_long(host_irq_type) > 1)
534 goto out;
535 if (hweight_long(guest_irq_type) > 1)
536 goto out;
537 if (host_irq_type == 0 && guest_irq_type == 0)
538 goto out;
17071fe7 539
e56d532f
SY
540 r = 0;
541 if (host_irq_type)
542 r = assign_host_irq(kvm, match, host_irq_type);
543 if (r)
544 goto out;
8a98f664 545
e56d532f
SY
546 if (guest_irq_type)
547 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
548out:
8a98f664
XZ
549 mutex_unlock(&kvm->lock);
550 return r;
e56d532f
SY
551}
552
553static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
554 struct kvm_assigned_irq
555 *assigned_irq)
556{
557 int r = -ENODEV;
558 struct kvm_assigned_dev_kernel *match;
559
560 mutex_lock(&kvm->lock);
561
562 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
563 assigned_irq->assigned_dev_id);
564 if (!match)
565 goto out;
566
567 r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
568out:
8a98f664 569 mutex_unlock(&kvm->lock);
8a98f664
XZ
570 return r;
571}
572
573static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
574 struct kvm_assigned_pci_dev *assigned_dev)
575{
576 int r = 0;
577 struct kvm_assigned_dev_kernel *match;
578 struct pci_dev *dev;
579
682edb4c 580 down_read(&kvm->slots_lock);
8a98f664
XZ
581 mutex_lock(&kvm->lock);
582
583 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
584 assigned_dev->assigned_dev_id);
585 if (match) {
586 /* device already assigned */
e56d532f 587 r = -EEXIST;
8a98f664
XZ
588 goto out;
589 }
590
591 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
592 if (match == NULL) {
593 printk(KERN_INFO "%s: Couldn't allocate memory\n",
594 __func__);
595 r = -ENOMEM;
596 goto out;
597 }
598 dev = pci_get_bus_and_slot(assigned_dev->busnr,
599 assigned_dev->devfn);
600 if (!dev) {
601 printk(KERN_INFO "%s: host device not found\n", __func__);
602 r = -EINVAL;
603 goto out_free;
604 }
605 if (pci_enable_device(dev)) {
606 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
607 r = -EBUSY;
608 goto out_put;
609 }
610 r = pci_request_regions(dev, "kvm_assigned_device");
611 if (r) {
612 printk(KERN_INFO "%s: Could not get access to device regions\n",
613 __func__);
614 goto out_disable;
615 }
6eb55818
SY
616
617 pci_reset_function(dev);
618
8a98f664
XZ
619 match->assigned_dev_id = assigned_dev->assigned_dev_id;
620 match->host_busnr = assigned_dev->busnr;
621 match->host_devfn = assigned_dev->devfn;
b653574a 622 match->flags = assigned_dev->flags;
8a98f664 623 match->dev = dev;
547de29e 624 spin_lock_init(&match->assigned_dev_lock);
f29b2673 625 match->irq_source_id = -1;
8a98f664 626 match->kvm = kvm;
e56d532f
SY
627 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
628 INIT_WORK(&match->interrupt_work,
629 kvm_assigned_dev_interrupt_work_handler);
8a98f664
XZ
630
631 list_add(&match->list, &kvm->arch.assigned_dev_head);
632
633 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
19de40a8 634 if (!kvm->arch.iommu_domain) {
260782bc
WH
635 r = kvm_iommu_map_guest(kvm);
636 if (r)
637 goto out_list_del;
638 }
639 r = kvm_assign_device(kvm, match);
8a98f664
XZ
640 if (r)
641 goto out_list_del;
642 }
643
644out:
645 mutex_unlock(&kvm->lock);
682edb4c 646 up_read(&kvm->slots_lock);
8a98f664
XZ
647 return r;
648out_list_del:
649 list_del(&match->list);
650 pci_release_regions(dev);
651out_disable:
652 pci_disable_device(dev);
653out_put:
654 pci_dev_put(dev);
655out_free:
656 kfree(match);
657 mutex_unlock(&kvm->lock);
682edb4c 658 up_read(&kvm->slots_lock);
8a98f664
XZ
659 return r;
660}
661#endif
662
0a920356
WH
663#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
664static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
665 struct kvm_assigned_pci_dev *assigned_dev)
666{
667 int r = 0;
668 struct kvm_assigned_dev_kernel *match;
669
670 mutex_lock(&kvm->lock);
671
672 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
673 assigned_dev->assigned_dev_id);
674 if (!match) {
675 printk(KERN_INFO "%s: device hasn't been assigned before, "
676 "so cannot be deassigned\n", __func__);
677 r = -EINVAL;
678 goto out;
679 }
680
4a906e49 681 if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
0a920356
WH
682 kvm_deassign_device(kvm, match);
683
684 kvm_free_assigned_device(kvm, match);
685
686out:
687 mutex_unlock(&kvm->lock);
688 return r;
689}
690#endif
691
c77fb9dc 692inline int kvm_is_mmio_pfn(pfn_t pfn)
cbff90a7 693{
fc5659c8
JR
694 if (pfn_valid(pfn)) {
695 struct page *page = compound_head(pfn_to_page(pfn));
696 return PageReserved(page);
697 }
cbff90a7
BAY
698
699 return true;
700}
701
bccf2150
AK
702/*
703 * Switches to specified vcpu, until a matching vcpu_put()
704 */
313a3dc7 705void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 706{
15ad7146
AK
707 int cpu;
708
bccf2150 709 mutex_lock(&vcpu->mutex);
15ad7146
AK
710 cpu = get_cpu();
711 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 712 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 713 put_cpu();
6aa8b732
AK
714}
715
313a3dc7 716void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 717{
15ad7146 718 preempt_disable();
313a3dc7 719 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
720 preempt_notifier_unregister(&vcpu->preempt_notifier);
721 preempt_enable();
6aa8b732
AK
722 mutex_unlock(&vcpu->mutex);
723}
724
d9e368d6
AK
725static void ack_flush(void *_completed)
726{
d9e368d6
AK
727}
728
49846896 729static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
d9e368d6 730{
597a5f55 731 int i, cpu, me;
6ef7a1bc
RR
732 cpumask_var_t cpus;
733 bool called = true;
d9e368d6 734 struct kvm_vcpu *vcpu;
d9e368d6 735
6ef7a1bc
RR
736 if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
737 cpumask_clear(cpus);
738
597a5f55 739 me = get_cpu();
84261923 740 spin_lock(&kvm->requests_lock);
988a2cae 741 kvm_for_each_vcpu(i, vcpu, kvm) {
49846896 742 if (test_and_set_bit(req, &vcpu->requests))
d9e368d6
AK
743 continue;
744 cpu = vcpu->cpu;
6ef7a1bc
RR
745 if (cpus != NULL && cpu != -1 && cpu != me)
746 cpumask_set_cpu(cpu, cpus);
49846896 747 }
6ef7a1bc
RR
748 if (unlikely(cpus == NULL))
749 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
750 else if (!cpumask_empty(cpus))
751 smp_call_function_many(cpus, ack_flush, NULL, 1);
752 else
753 called = false;
84261923 754 spin_unlock(&kvm->requests_lock);
597a5f55 755 put_cpu();
6ef7a1bc 756 free_cpumask_var(cpus);
49846896 757 return called;
d9e368d6
AK
758}
759
49846896 760void kvm_flush_remote_tlbs(struct kvm *kvm)
2e53d63a 761{
49846896
RR
762 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
763 ++kvm->stat.remote_tlb_flush;
2e53d63a
MT
764}
765
49846896
RR
766void kvm_reload_remote_mmus(struct kvm *kvm)
767{
768 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
769}
2e53d63a 770
fb3f0f51
RR
771int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
772{
773 struct page *page;
774 int r;
775
776 mutex_init(&vcpu->mutex);
777 vcpu->cpu = -1;
fb3f0f51
RR
778 vcpu->kvm = kvm;
779 vcpu->vcpu_id = id;
b6958ce4 780 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
781
782 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
783 if (!page) {
784 r = -ENOMEM;
785 goto fail;
786 }
787 vcpu->run = page_address(page);
788
e9b11c17 789 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 790 if (r < 0)
e9b11c17 791 goto fail_free_run;
fb3f0f51
RR
792 return 0;
793
fb3f0f51
RR
794fail_free_run:
795 free_page((unsigned long)vcpu->run);
796fail:
76fafa5e 797 return r;
fb3f0f51
RR
798}
799EXPORT_SYMBOL_GPL(kvm_vcpu_init);
800
801void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
802{
e9b11c17 803 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
804 free_page((unsigned long)vcpu->run);
805}
806EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
807
e930bffe
AA
808#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
809static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
810{
811 return container_of(mn, struct kvm, mmu_notifier);
812}
813
814static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
815 struct mm_struct *mm,
816 unsigned long address)
817{
818 struct kvm *kvm = mmu_notifier_to_kvm(mn);
819 int need_tlb_flush;
820
821 /*
822 * When ->invalidate_page runs, the linux pte has been zapped
823 * already but the page is still allocated until
824 * ->invalidate_page returns. So if we increase the sequence
825 * here the kvm page fault will notice if the spte can't be
826 * established because the page is going to be freed. If
827 * instead the kvm page fault establishes the spte before
828 * ->invalidate_page runs, kvm_unmap_hva will release it
829 * before returning.
830 *
831 * The sequence increase only need to be seen at spin_unlock
832 * time, and not at spin_lock time.
833 *
834 * Increasing the sequence after the spin_unlock would be
835 * unsafe because the kvm page fault could then establish the
836 * pte after kvm_unmap_hva returned, without noticing the page
837 * is going to be freed.
838 */
839 spin_lock(&kvm->mmu_lock);
840 kvm->mmu_notifier_seq++;
841 need_tlb_flush = kvm_unmap_hva(kvm, address);
842 spin_unlock(&kvm->mmu_lock);
843
844 /* we've to flush the tlb before the pages can be freed */
845 if (need_tlb_flush)
846 kvm_flush_remote_tlbs(kvm);
847
848}
849
850static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
851 struct mm_struct *mm,
852 unsigned long start,
853 unsigned long end)
854{
855 struct kvm *kvm = mmu_notifier_to_kvm(mn);
856 int need_tlb_flush = 0;
857
858 spin_lock(&kvm->mmu_lock);
859 /*
860 * The count increase must become visible at unlock time as no
861 * spte can be established without taking the mmu_lock and
862 * count is also read inside the mmu_lock critical section.
863 */
864 kvm->mmu_notifier_count++;
865 for (; start < end; start += PAGE_SIZE)
866 need_tlb_flush |= kvm_unmap_hva(kvm, start);
867 spin_unlock(&kvm->mmu_lock);
868
869 /* we've to flush the tlb before the pages can be freed */
870 if (need_tlb_flush)
871 kvm_flush_remote_tlbs(kvm);
872}
873
874static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
875 struct mm_struct *mm,
876 unsigned long start,
877 unsigned long end)
878{
879 struct kvm *kvm = mmu_notifier_to_kvm(mn);
880
881 spin_lock(&kvm->mmu_lock);
882 /*
883 * This sequence increase will notify the kvm page fault that
884 * the page that is going to be mapped in the spte could have
885 * been freed.
886 */
887 kvm->mmu_notifier_seq++;
888 /*
889 * The above sequence increase must be visible before the
890 * below count decrease but both values are read by the kvm
891 * page fault under mmu_lock spinlock so we don't need to add
892 * a smb_wmb() here in between the two.
893 */
894 kvm->mmu_notifier_count--;
895 spin_unlock(&kvm->mmu_lock);
896
897 BUG_ON(kvm->mmu_notifier_count < 0);
898}
899
900static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
901 struct mm_struct *mm,
902 unsigned long address)
903{
904 struct kvm *kvm = mmu_notifier_to_kvm(mn);
905 int young;
906
907 spin_lock(&kvm->mmu_lock);
908 young = kvm_age_hva(kvm, address);
909 spin_unlock(&kvm->mmu_lock);
910
911 if (young)
912 kvm_flush_remote_tlbs(kvm);
913
914 return young;
915}
916
85db06e5
MT
917static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
918 struct mm_struct *mm)
919{
920 struct kvm *kvm = mmu_notifier_to_kvm(mn);
921 kvm_arch_flush_shadow(kvm);
922}
923
e930bffe
AA
924static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
925 .invalidate_page = kvm_mmu_notifier_invalidate_page,
926 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
927 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
928 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
85db06e5 929 .release = kvm_mmu_notifier_release,
e930bffe
AA
930};
931#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
932
f17abe9a 933static struct kvm *kvm_create_vm(void)
6aa8b732 934{
d19a9cd2 935 struct kvm *kvm = kvm_arch_create_vm();
5f94c174
LV
936#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
937 struct page *page;
938#endif
6aa8b732 939
d19a9cd2
ZX
940 if (IS_ERR(kvm))
941 goto out;
75858a84 942#ifdef CONFIG_HAVE_KVM_IRQCHIP
399ec807 943 INIT_LIST_HEAD(&kvm->irq_routing);
75858a84
AK
944 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
945#endif
6aa8b732 946
5f94c174
LV
947#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
948 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
949 if (!page) {
950 kfree(kvm);
951 return ERR_PTR(-ENOMEM);
952 }
953 kvm->coalesced_mmio_ring =
954 (struct kvm_coalesced_mmio_ring *)page_address(page);
955#endif
956
e930bffe
AA
957#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
958 {
959 int err;
960 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
961 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
962 if (err) {
963#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
964 put_page(page);
965#endif
966 kfree(kvm);
967 return ERR_PTR(err);
968 }
969 }
970#endif
971
6d4e4c4f
AK
972 kvm->mm = current->mm;
973 atomic_inc(&kvm->mm->mm_count);
aaee2c94 974 spin_lock_init(&kvm->mmu_lock);
84261923 975 spin_lock_init(&kvm->requests_lock);
74906345 976 kvm_io_bus_init(&kvm->pio_bus);
721eecbf 977 kvm_irqfd_init(kvm);
11ec2804 978 mutex_init(&kvm->lock);
60eead79 979 mutex_init(&kvm->irq_lock);
2eeb2e94 980 kvm_io_bus_init(&kvm->mmio_bus);
72dc67a6 981 init_rwsem(&kvm->slots_lock);
d39f13b0 982 atomic_set(&kvm->users_count, 1);
5e58cfe4
RR
983 spin_lock(&kvm_lock);
984 list_add(&kvm->vm_list, &vm_list);
985 spin_unlock(&kvm_lock);
5f94c174
LV
986#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
987 kvm_coalesced_mmio_init(kvm);
988#endif
d19a9cd2 989out:
f17abe9a
AK
990 return kvm;
991}
992
6aa8b732
AK
993/*
994 * Free any memory in @free but not in @dont.
995 */
996static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
997 struct kvm_memory_slot *dont)
998{
290fc38d
IE
999 if (!dont || free->rmap != dont->rmap)
1000 vfree(free->rmap);
6aa8b732
AK
1001
1002 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
1003 vfree(free->dirty_bitmap);
1004
05da4558
MT
1005 if (!dont || free->lpage_info != dont->lpage_info)
1006 vfree(free->lpage_info);
1007
6aa8b732 1008 free->npages = 0;
8b6d44c7 1009 free->dirty_bitmap = NULL;
8d4e1288 1010 free->rmap = NULL;
05da4558 1011 free->lpage_info = NULL;
6aa8b732
AK
1012}
1013
d19a9cd2 1014void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
1015{
1016 int i;
1017
1018 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 1019 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
1020}
1021
f17abe9a
AK
1022static void kvm_destroy_vm(struct kvm *kvm)
1023{
6d4e4c4f
AK
1024 struct mm_struct *mm = kvm->mm;
1025
ad8ba2cd 1026 kvm_arch_sync_events(kvm);
133de902
AK
1027 spin_lock(&kvm_lock);
1028 list_del(&kvm->vm_list);
1029 spin_unlock(&kvm_lock);
399ec807 1030 kvm_free_irq_routing(kvm);
74906345 1031 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 1032 kvm_io_bus_destroy(&kvm->mmio_bus);
5f94c174
LV
1033#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1034 if (kvm->coalesced_mmio_ring != NULL)
1035 free_page((unsigned long)kvm->coalesced_mmio_ring);
e930bffe
AA
1036#endif
1037#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1038 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
f00be0ca
GN
1039#else
1040 kvm_arch_flush_shadow(kvm);
5f94c174 1041#endif
d19a9cd2 1042 kvm_arch_destroy_vm(kvm);
6d4e4c4f 1043 mmdrop(mm);
f17abe9a
AK
1044}
1045
d39f13b0
IE
1046void kvm_get_kvm(struct kvm *kvm)
1047{
1048 atomic_inc(&kvm->users_count);
1049}
1050EXPORT_SYMBOL_GPL(kvm_get_kvm);
1051
1052void kvm_put_kvm(struct kvm *kvm)
1053{
1054 if (atomic_dec_and_test(&kvm->users_count))
1055 kvm_destroy_vm(kvm);
1056}
1057EXPORT_SYMBOL_GPL(kvm_put_kvm);
1058
1059
f17abe9a
AK
1060static int kvm_vm_release(struct inode *inode, struct file *filp)
1061{
1062 struct kvm *kvm = filp->private_data;
1063
721eecbf
GH
1064 kvm_irqfd_release(kvm);
1065
d39f13b0 1066 kvm_put_kvm(kvm);
6aa8b732
AK
1067 return 0;
1068}
1069
6aa8b732
AK
1070/*
1071 * Allocate some memory and give it an address in the guest physical address
1072 * space.
1073 *
1074 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 1075 *
10589a46 1076 * Must be called holding mmap_sem for write.
6aa8b732 1077 */
f78e0e2e
SY
1078int __kvm_set_memory_region(struct kvm *kvm,
1079 struct kvm_userspace_memory_region *mem,
1080 int user_alloc)
6aa8b732
AK
1081{
1082 int r;
1083 gfn_t base_gfn;
ac04527f 1084 unsigned long npages, ugfn;
09f8ca74 1085 unsigned long largepages, i;
6aa8b732
AK
1086 struct kvm_memory_slot *memslot;
1087 struct kvm_memory_slot old, new;
6aa8b732
AK
1088
1089 r = -EINVAL;
1090 /* General sanity checks */
1091 if (mem->memory_size & (PAGE_SIZE - 1))
1092 goto out;
1093 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1094 goto out;
e7cacd40 1095 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
78749809 1096 goto out;
e0d62c7f 1097 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
1098 goto out;
1099 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1100 goto out;
1101
1102 memslot = &kvm->memslots[mem->slot];
1103 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
1104 npages = mem->memory_size >> PAGE_SHIFT;
1105
1106 if (!npages)
1107 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1108
6aa8b732
AK
1109 new = old = *memslot;
1110
1111 new.base_gfn = base_gfn;
1112 new.npages = npages;
1113 new.flags = mem->flags;
1114
1115 /* Disallow changing a memory slot's size. */
1116 r = -EINVAL;
1117 if (npages && old.npages && npages != old.npages)
f78e0e2e 1118 goto out_free;
6aa8b732
AK
1119
1120 /* Check for overlaps */
1121 r = -EEXIST;
1122 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1123 struct kvm_memory_slot *s = &kvm->memslots[i];
1124
4cd481f6 1125 if (s == memslot || !s->npages)
6aa8b732
AK
1126 continue;
1127 if (!((base_gfn + npages <= s->base_gfn) ||
1128 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 1129 goto out_free;
6aa8b732 1130 }
6aa8b732 1131
6aa8b732
AK
1132 /* Free page dirty bitmap if unneeded */
1133 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 1134 new.dirty_bitmap = NULL;
6aa8b732
AK
1135
1136 r = -ENOMEM;
1137
1138 /* Allocate if a slot is being created */
eff0114a 1139#ifndef CONFIG_S390
8d4e1288 1140 if (npages && !new.rmap) {
d77c26fc 1141 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
1142
1143 if (!new.rmap)
f78e0e2e 1144 goto out_free;
290fc38d 1145
290fc38d 1146 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 1147
80b14b5b 1148 new.user_alloc = user_alloc;
604b38ac
AA
1149 /*
1150 * hva_to_rmmap() serialzies with the mmu_lock and to be
1151 * safe it has to ignore memslots with !user_alloc &&
1152 * !userspace_addr.
1153 */
1154 if (user_alloc)
1155 new.userspace_addr = mem->userspace_addr;
1156 else
1157 new.userspace_addr = 0;
6aa8b732 1158 }
05da4558 1159 if (npages && !new.lpage_info) {
99894a79
AK
1160 largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE;
1161 largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
05da4558
MT
1162
1163 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
1164
1165 if (!new.lpage_info)
1166 goto out_free;
1167
1168 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
1169
1170 if (base_gfn % KVM_PAGES_PER_HPAGE)
1171 new.lpage_info[0].write_count = 1;
1172 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
1173 new.lpage_info[largepages-1].write_count = 1;
ac04527f
AK
1174 ugfn = new.userspace_addr >> PAGE_SHIFT;
1175 /*
1176 * If the gfn and userspace address are not aligned wrt each
1177 * other, disable large page support for this slot
1178 */
1179 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1))
1180 for (i = 0; i < largepages; ++i)
1181 new.lpage_info[i].write_count = 1;
05da4558 1182 }
6aa8b732
AK
1183
1184 /* Allocate page dirty bitmap if needed */
1185 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
1186 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
1187
1188 new.dirty_bitmap = vmalloc(dirty_bytes);
1189 if (!new.dirty_bitmap)
f78e0e2e 1190 goto out_free;
6aa8b732 1191 memset(new.dirty_bitmap, 0, dirty_bytes);
e244584f
IE
1192 if (old.npages)
1193 kvm_arch_flush_shadow(kvm);
6aa8b732 1194 }
eff0114a 1195#endif /* not defined CONFIG_S390 */
6aa8b732 1196
34d4cb8f
MT
1197 if (!npages)
1198 kvm_arch_flush_shadow(kvm);
1199
604b38ac
AA
1200 spin_lock(&kvm->mmu_lock);
1201 if (mem->slot >= kvm->nmemslots)
1202 kvm->nmemslots = mem->slot + 1;
1203
3ad82a7e 1204 *memslot = new;
604b38ac 1205 spin_unlock(&kvm->mmu_lock);
3ad82a7e 1206
0de10343
ZX
1207 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
1208 if (r) {
604b38ac 1209 spin_lock(&kvm->mmu_lock);
0de10343 1210 *memslot = old;
604b38ac 1211 spin_unlock(&kvm->mmu_lock);
0de10343 1212 goto out_free;
82ce2c96
IE
1213 }
1214
6f897248
GC
1215 kvm_free_physmem_slot(&old, npages ? &new : NULL);
1216 /* Slot deletion case: we have to update the current slot */
b43b1901 1217 spin_lock(&kvm->mmu_lock);
6f897248
GC
1218 if (!npages)
1219 *memslot = old;
b43b1901 1220 spin_unlock(&kvm->mmu_lock);
8a98f664 1221#ifdef CONFIG_DMAR
62c476c7
BAY
1222 /* map the pages in iommu page table */
1223 r = kvm_iommu_map_pages(kvm, base_gfn, npages);
1224 if (r)
1225 goto out;
8a98f664 1226#endif
6aa8b732
AK
1227 return 0;
1228
f78e0e2e 1229out_free:
6aa8b732
AK
1230 kvm_free_physmem_slot(&new, &old);
1231out:
1232 return r;
210c7c4d
IE
1233
1234}
f78e0e2e
SY
1235EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1236
1237int kvm_set_memory_region(struct kvm *kvm,
1238 struct kvm_userspace_memory_region *mem,
1239 int user_alloc)
1240{
1241 int r;
1242
72dc67a6 1243 down_write(&kvm->slots_lock);
f78e0e2e 1244 r = __kvm_set_memory_region(kvm, mem, user_alloc);
72dc67a6 1245 up_write(&kvm->slots_lock);
f78e0e2e
SY
1246 return r;
1247}
210c7c4d
IE
1248EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1249
1fe779f8
CO
1250int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1251 struct
1252 kvm_userspace_memory_region *mem,
1253 int user_alloc)
210c7c4d 1254{
e0d62c7f
IE
1255 if (mem->slot >= KVM_MEMORY_SLOTS)
1256 return -EINVAL;
210c7c4d 1257 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
1258}
1259
5bb064dc
ZX
1260int kvm_get_dirty_log(struct kvm *kvm,
1261 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
1262{
1263 struct kvm_memory_slot *memslot;
1264 int r, i;
1265 int n;
1266 unsigned long any = 0;
1267
6aa8b732
AK
1268 r = -EINVAL;
1269 if (log->slot >= KVM_MEMORY_SLOTS)
1270 goto out;
1271
1272 memslot = &kvm->memslots[log->slot];
1273 r = -ENOENT;
1274 if (!memslot->dirty_bitmap)
1275 goto out;
1276
cd1a4a98 1277 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 1278
cd1a4a98 1279 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
1280 any = memslot->dirty_bitmap[i];
1281
1282 r = -EFAULT;
1283 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1284 goto out;
1285
5bb064dc
ZX
1286 if (any)
1287 *is_dirty = 1;
6aa8b732
AK
1288
1289 r = 0;
6aa8b732 1290out:
6aa8b732
AK
1291 return r;
1292}
1293
cea7bb21
IE
1294int is_error_page(struct page *page)
1295{
1296 return page == bad_page;
1297}
1298EXPORT_SYMBOL_GPL(is_error_page);
1299
35149e21
AL
1300int is_error_pfn(pfn_t pfn)
1301{
1302 return pfn == bad_pfn;
1303}
1304EXPORT_SYMBOL_GPL(is_error_pfn);
1305
f9d46eb0
IE
1306static inline unsigned long bad_hva(void)
1307{
1308 return PAGE_OFFSET;
1309}
1310
1311int kvm_is_error_hva(unsigned long addr)
1312{
1313 return addr == bad_hva();
1314}
1315EXPORT_SYMBOL_GPL(kvm_is_error_hva);
1316
2843099f 1317struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
1318{
1319 int i;
1320
1321 for (i = 0; i < kvm->nmemslots; ++i) {
1322 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1323
1324 if (gfn >= memslot->base_gfn
1325 && gfn < memslot->base_gfn + memslot->npages)
1326 return memslot;
1327 }
8b6d44c7 1328 return NULL;
6aa8b732 1329}
2843099f 1330EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
e8207547
AK
1331
1332struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1333{
1334 gfn = unalias_gfn(kvm, gfn);
2843099f 1335 return gfn_to_memslot_unaliased(kvm, gfn);
e8207547 1336}
6aa8b732 1337
e0d62c7f
IE
1338int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1339{
1340 int i;
1341
1342 gfn = unalias_gfn(kvm, gfn);
1343 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1344 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1345
1346 if (gfn >= memslot->base_gfn
1347 && gfn < memslot->base_gfn + memslot->npages)
1348 return 1;
1349 }
1350 return 0;
1351}
1352EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1353
05da4558 1354unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
539cb660
IE
1355{
1356 struct kvm_memory_slot *slot;
1357
1358 gfn = unalias_gfn(kvm, gfn);
2843099f 1359 slot = gfn_to_memslot_unaliased(kvm, gfn);
539cb660
IE
1360 if (!slot)
1361 return bad_hva();
1362 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
1363}
0d150298 1364EXPORT_SYMBOL_GPL(gfn_to_hva);
539cb660 1365
35149e21 1366pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
954bbbc2 1367{
8d4e1288 1368 struct page *page[1];
539cb660 1369 unsigned long addr;
8d4e1288 1370 int npages;
2e2e3738 1371 pfn_t pfn;
954bbbc2 1372
60395224
AK
1373 might_sleep();
1374
539cb660
IE
1375 addr = gfn_to_hva(kvm, gfn);
1376 if (kvm_is_error_hva(addr)) {
8a7ae055 1377 get_page(bad_page);
35149e21 1378 return page_to_pfn(bad_page);
8a7ae055 1379 }
8d4e1288 1380
4c2155ce 1381 npages = get_user_pages_fast(addr, 1, 1, page);
539cb660 1382
2e2e3738
AL
1383 if (unlikely(npages != 1)) {
1384 struct vm_area_struct *vma;
1385
4c2155ce 1386 down_read(&current->mm->mmap_sem);
2e2e3738 1387 vma = find_vma(current->mm, addr);
4c2155ce 1388
2e2e3738
AL
1389 if (vma == NULL || addr < vma->vm_start ||
1390 !(vma->vm_flags & VM_PFNMAP)) {
4c2155ce 1391 up_read(&current->mm->mmap_sem);
2e2e3738
AL
1392 get_page(bad_page);
1393 return page_to_pfn(bad_page);
1394 }
1395
1396 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
4c2155ce 1397 up_read(&current->mm->mmap_sem);
c77fb9dc 1398 BUG_ON(!kvm_is_mmio_pfn(pfn));
2e2e3738
AL
1399 } else
1400 pfn = page_to_pfn(page[0]);
8d4e1288 1401
2e2e3738 1402 return pfn;
35149e21
AL
1403}
1404
1405EXPORT_SYMBOL_GPL(gfn_to_pfn);
1406
1407struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1408{
2e2e3738
AL
1409 pfn_t pfn;
1410
1411 pfn = gfn_to_pfn(kvm, gfn);
c77fb9dc 1412 if (!kvm_is_mmio_pfn(pfn))
2e2e3738
AL
1413 return pfn_to_page(pfn);
1414
c77fb9dc 1415 WARN_ON(kvm_is_mmio_pfn(pfn));
2e2e3738
AL
1416
1417 get_page(bad_page);
1418 return bad_page;
954bbbc2 1419}
aab61cc0 1420
954bbbc2
AK
1421EXPORT_SYMBOL_GPL(gfn_to_page);
1422
b4231d61
IE
1423void kvm_release_page_clean(struct page *page)
1424{
35149e21 1425 kvm_release_pfn_clean(page_to_pfn(page));
b4231d61
IE
1426}
1427EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1428
35149e21
AL
1429void kvm_release_pfn_clean(pfn_t pfn)
1430{
c77fb9dc 1431 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1432 put_page(pfn_to_page(pfn));
35149e21
AL
1433}
1434EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1435
b4231d61 1436void kvm_release_page_dirty(struct page *page)
8a7ae055 1437{
35149e21
AL
1438 kvm_release_pfn_dirty(page_to_pfn(page));
1439}
1440EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1441
1442void kvm_release_pfn_dirty(pfn_t pfn)
1443{
1444 kvm_set_pfn_dirty(pfn);
1445 kvm_release_pfn_clean(pfn);
1446}
1447EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1448
1449void kvm_set_page_dirty(struct page *page)
1450{
1451 kvm_set_pfn_dirty(page_to_pfn(page));
1452}
1453EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1454
1455void kvm_set_pfn_dirty(pfn_t pfn)
1456{
c77fb9dc 1457 if (!kvm_is_mmio_pfn(pfn)) {
2e2e3738
AL
1458 struct page *page = pfn_to_page(pfn);
1459 if (!PageReserved(page))
1460 SetPageDirty(page);
1461 }
8a7ae055 1462}
35149e21
AL
1463EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1464
1465void kvm_set_pfn_accessed(pfn_t pfn)
1466{
c77fb9dc 1467 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1468 mark_page_accessed(pfn_to_page(pfn));
35149e21
AL
1469}
1470EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1471
1472void kvm_get_pfn(pfn_t pfn)
1473{
c77fb9dc 1474 if (!kvm_is_mmio_pfn(pfn))
2e2e3738 1475 get_page(pfn_to_page(pfn));
35149e21
AL
1476}
1477EXPORT_SYMBOL_GPL(kvm_get_pfn);
8a7ae055 1478
195aefde
IE
1479static int next_segment(unsigned long len, int offset)
1480{
1481 if (len > PAGE_SIZE - offset)
1482 return PAGE_SIZE - offset;
1483 else
1484 return len;
1485}
1486
1487int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1488 int len)
1489{
e0506bcb
IE
1490 int r;
1491 unsigned long addr;
195aefde 1492
e0506bcb
IE
1493 addr = gfn_to_hva(kvm, gfn);
1494 if (kvm_is_error_hva(addr))
1495 return -EFAULT;
1496 r = copy_from_user(data, (void __user *)addr + offset, len);
1497 if (r)
195aefde 1498 return -EFAULT;
195aefde
IE
1499 return 0;
1500}
1501EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1502
1503int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1504{
1505 gfn_t gfn = gpa >> PAGE_SHIFT;
1506 int seg;
1507 int offset = offset_in_page(gpa);
1508 int ret;
1509
1510 while ((seg = next_segment(len, offset)) != 0) {
1511 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1512 if (ret < 0)
1513 return ret;
1514 offset = 0;
1515 len -= seg;
1516 data += seg;
1517 ++gfn;
1518 }
1519 return 0;
1520}
1521EXPORT_SYMBOL_GPL(kvm_read_guest);
1522
7ec54588
MT
1523int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1524 unsigned long len)
1525{
1526 int r;
1527 unsigned long addr;
1528 gfn_t gfn = gpa >> PAGE_SHIFT;
1529 int offset = offset_in_page(gpa);
1530
1531 addr = gfn_to_hva(kvm, gfn);
1532 if (kvm_is_error_hva(addr))
1533 return -EFAULT;
0aac03f0 1534 pagefault_disable();
7ec54588 1535 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
0aac03f0 1536 pagefault_enable();
7ec54588
MT
1537 if (r)
1538 return -EFAULT;
1539 return 0;
1540}
1541EXPORT_SYMBOL(kvm_read_guest_atomic);
1542
195aefde
IE
1543int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1544 int offset, int len)
1545{
e0506bcb
IE
1546 int r;
1547 unsigned long addr;
195aefde 1548
e0506bcb
IE
1549 addr = gfn_to_hva(kvm, gfn);
1550 if (kvm_is_error_hva(addr))
1551 return -EFAULT;
1552 r = copy_to_user((void __user *)addr + offset, data, len);
1553 if (r)
195aefde 1554 return -EFAULT;
195aefde
IE
1555 mark_page_dirty(kvm, gfn);
1556 return 0;
1557}
1558EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1559
1560int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1561 unsigned long len)
1562{
1563 gfn_t gfn = gpa >> PAGE_SHIFT;
1564 int seg;
1565 int offset = offset_in_page(gpa);
1566 int ret;
1567
1568 while ((seg = next_segment(len, offset)) != 0) {
1569 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1570 if (ret < 0)
1571 return ret;
1572 offset = 0;
1573 len -= seg;
1574 data += seg;
1575 ++gfn;
1576 }
1577 return 0;
1578}
1579
1580int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1581{
3e021bf5 1582 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
1583}
1584EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1585
1586int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1587{
1588 gfn_t gfn = gpa >> PAGE_SHIFT;
1589 int seg;
1590 int offset = offset_in_page(gpa);
1591 int ret;
1592
1593 while ((seg = next_segment(len, offset)) != 0) {
1594 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1595 if (ret < 0)
1596 return ret;
1597 offset = 0;
1598 len -= seg;
1599 ++gfn;
1600 }
1601 return 0;
1602}
1603EXPORT_SYMBOL_GPL(kvm_clear_guest);
1604
6aa8b732
AK
1605void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1606{
31389947 1607 struct kvm_memory_slot *memslot;
6aa8b732 1608
3b6fff19 1609 gfn = unalias_gfn(kvm, gfn);
2843099f 1610 memslot = gfn_to_memslot_unaliased(kvm, gfn);
7e9d619d
RR
1611 if (memslot && memslot->dirty_bitmap) {
1612 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 1613
7e9d619d
RR
1614 /* avoid RMW */
1615 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1616 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
1617 }
1618}
1619
b6958ce4
ED
1620/*
1621 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1622 */
8776e519 1623void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 1624{
e5c239cf
MT
1625 DEFINE_WAIT(wait);
1626
1627 for (;;) {
1628 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1629
78646121
GN
1630 if ((kvm_arch_interrupt_allowed(vcpu) &&
1631 kvm_cpu_has_interrupt(vcpu)) ||
09cec754 1632 kvm_arch_vcpu_runnable(vcpu)) {
d7690175 1633 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
e5c239cf 1634 break;
d7690175 1635 }
09cec754
GN
1636 if (kvm_cpu_has_pending_timer(vcpu))
1637 break;
e5c239cf
MT
1638 if (signal_pending(current))
1639 break;
1640
b6958ce4
ED
1641 vcpu_put(vcpu);
1642 schedule();
1643 vcpu_load(vcpu);
1644 }
d3bef15f 1645
e5c239cf 1646 finish_wait(&vcpu->wq, &wait);
b6958ce4
ED
1647}
1648
6aa8b732
AK
1649void kvm_resched(struct kvm_vcpu *vcpu)
1650{
3fca0365
YD
1651 if (!need_resched())
1652 return;
6aa8b732 1653 cond_resched();
6aa8b732
AK
1654}
1655EXPORT_SYMBOL_GPL(kvm_resched);
1656
e4a533a4 1657static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
1658{
1659 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
1660 struct page *page;
1661
e4a533a4 1662 if (vmf->pgoff == 0)
039576c0 1663 page = virt_to_page(vcpu->run);
09566765 1664#ifdef CONFIG_X86
e4a533a4 1665 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 1666 page = virt_to_page(vcpu->arch.pio_data);
5f94c174
LV
1667#endif
1668#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1669 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1670 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
09566765 1671#endif
039576c0 1672 else
e4a533a4 1673 return VM_FAULT_SIGBUS;
9a2bb7f4 1674 get_page(page);
e4a533a4 1675 vmf->page = page;
1676 return 0;
9a2bb7f4
AK
1677}
1678
1679static struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 1680 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
1681};
1682
1683static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1684{
1685 vma->vm_ops = &kvm_vcpu_vm_ops;
1686 return 0;
1687}
1688
bccf2150
AK
1689static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1690{
1691 struct kvm_vcpu *vcpu = filp->private_data;
1692
66c0b394 1693 kvm_put_kvm(vcpu->kvm);
bccf2150
AK
1694 return 0;
1695}
1696
3d3aab1b 1697static struct file_operations kvm_vcpu_fops = {
bccf2150
AK
1698 .release = kvm_vcpu_release,
1699 .unlocked_ioctl = kvm_vcpu_ioctl,
1700 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 1701 .mmap = kvm_vcpu_mmap,
bccf2150
AK
1702};
1703
1704/*
1705 * Allocates an inode for the vcpu.
1706 */
1707static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1708{
73880c80 1709 return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
bccf2150
AK
1710}
1711
c5ea7660
AK
1712/*
1713 * Creates some virtual cpus. Good luck creating more than one.
1714 */
73880c80 1715static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
c5ea7660
AK
1716{
1717 int r;
988a2cae 1718 struct kvm_vcpu *vcpu, *v;
c5ea7660 1719
73880c80 1720 vcpu = kvm_arch_vcpu_create(kvm, id);
fb3f0f51
RR
1721 if (IS_ERR(vcpu))
1722 return PTR_ERR(vcpu);
c5ea7660 1723
15ad7146
AK
1724 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1725
26e5215f
AK
1726 r = kvm_arch_vcpu_setup(vcpu);
1727 if (r)
7d8fece6 1728 return r;
26e5215f 1729
11ec2804 1730 mutex_lock(&kvm->lock);
73880c80
GN
1731 if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1732 r = -EINVAL;
e9b11c17 1733 goto vcpu_destroy;
fb3f0f51 1734 }
73880c80 1735
988a2cae
GN
1736 kvm_for_each_vcpu(r, v, kvm)
1737 if (v->vcpu_id == id) {
73880c80
GN
1738 r = -EEXIST;
1739 goto vcpu_destroy;
1740 }
1741
1742 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
c5ea7660 1743
fb3f0f51 1744 /* Now it's all set up, let userspace reach it */
66c0b394 1745 kvm_get_kvm(kvm);
bccf2150 1746 r = create_vcpu_fd(vcpu);
73880c80
GN
1747 if (r < 0) {
1748 kvm_put_kvm(kvm);
1749 goto vcpu_destroy;
1750 }
1751
1752 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1753 smp_wmb();
1754 atomic_inc(&kvm->online_vcpus);
1755
1756#ifdef CONFIG_KVM_APIC_ARCHITECTURE
1757 if (kvm->bsp_vcpu_id == id)
1758 kvm->bsp_vcpu = vcpu;
1759#endif
1760 mutex_unlock(&kvm->lock);
fb3f0f51 1761 return r;
39c3b86e 1762
e9b11c17 1763vcpu_destroy:
7d8fece6 1764 mutex_unlock(&kvm->lock);
d40ccc62 1765 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
1766 return r;
1767}
1768
1961d276
AK
1769static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1770{
1771 if (sigset) {
1772 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1773 vcpu->sigset_active = 1;
1774 vcpu->sigset = *sigset;
1775 } else
1776 vcpu->sigset_active = 0;
1777 return 0;
1778}
1779
c1e01514
SY
1780#ifdef __KVM_HAVE_MSIX
1781static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
1782 struct kvm_assigned_msix_nr *entry_nr)
1783{
1784 int r = 0;
1785 struct kvm_assigned_dev_kernel *adev;
1786
1787 mutex_lock(&kvm->lock);
1788
1789 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1790 entry_nr->assigned_dev_id);
1791 if (!adev) {
1792 r = -EINVAL;
1793 goto msix_nr_out;
1794 }
1795
1796 if (adev->entries_nr == 0) {
1797 adev->entries_nr = entry_nr->entry_nr;
1798 if (adev->entries_nr == 0 ||
1799 adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
1800 r = -EINVAL;
1801 goto msix_nr_out;
1802 }
1803
1804 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
1805 entry_nr->entry_nr,
1806 GFP_KERNEL);
1807 if (!adev->host_msix_entries) {
1808 r = -ENOMEM;
1809 goto msix_nr_out;
1810 }
1811 adev->guest_msix_entries = kzalloc(
1812 sizeof(struct kvm_guest_msix_entry) *
1813 entry_nr->entry_nr, GFP_KERNEL);
1814 if (!adev->guest_msix_entries) {
1815 kfree(adev->host_msix_entries);
1816 r = -ENOMEM;
1817 goto msix_nr_out;
1818 }
1819 } else /* Not allowed set MSI-X number twice */
1820 r = -EINVAL;
1821msix_nr_out:
1822 mutex_unlock(&kvm->lock);
1823 return r;
1824}
1825
1826static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
1827 struct kvm_assigned_msix_entry *entry)
1828{
1829 int r = 0, i;
1830 struct kvm_assigned_dev_kernel *adev;
1831
1832 mutex_lock(&kvm->lock);
1833
1834 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1835 entry->assigned_dev_id);
1836
1837 if (!adev) {
1838 r = -EINVAL;
1839 goto msix_entry_out;
1840 }
1841
1842 for (i = 0; i < adev->entries_nr; i++)
1843 if (adev->guest_msix_entries[i].vector == 0 ||
1844 adev->guest_msix_entries[i].entry == entry->entry) {
1845 adev->guest_msix_entries[i].entry = entry->entry;
1846 adev->guest_msix_entries[i].vector = entry->gsi;
1847 adev->host_msix_entries[i].entry = entry->entry;
1848 break;
1849 }
1850 if (i == adev->entries_nr) {
1851 r = -ENOSPC;
1852 goto msix_entry_out;
1853 }
1854
1855msix_entry_out:
1856 mutex_unlock(&kvm->lock);
1857
1858 return r;
1859}
1860#endif
1861
bccf2150
AK
1862static long kvm_vcpu_ioctl(struct file *filp,
1863 unsigned int ioctl, unsigned long arg)
6aa8b732 1864{
bccf2150 1865 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 1866 void __user *argp = (void __user *)arg;
313a3dc7 1867 int r;
fa3795a7
DH
1868 struct kvm_fpu *fpu = NULL;
1869 struct kvm_sregs *kvm_sregs = NULL;
6aa8b732 1870
6d4e4c4f
AK
1871 if (vcpu->kvm->mm != current->mm)
1872 return -EIO;
6aa8b732 1873 switch (ioctl) {
9a2bb7f4 1874 case KVM_RUN:
f0fe5108
AK
1875 r = -EINVAL;
1876 if (arg)
1877 goto out;
b6c7a5dc 1878 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 1879 break;
6aa8b732 1880 case KVM_GET_REGS: {
3e4bb3ac 1881 struct kvm_regs *kvm_regs;
6aa8b732 1882
3e4bb3ac
XZ
1883 r = -ENOMEM;
1884 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1885 if (!kvm_regs)
6aa8b732 1886 goto out;
3e4bb3ac
XZ
1887 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1888 if (r)
1889 goto out_free1;
6aa8b732 1890 r = -EFAULT;
3e4bb3ac
XZ
1891 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1892 goto out_free1;
6aa8b732 1893 r = 0;
3e4bb3ac
XZ
1894out_free1:
1895 kfree(kvm_regs);
6aa8b732
AK
1896 break;
1897 }
1898 case KVM_SET_REGS: {
3e4bb3ac 1899 struct kvm_regs *kvm_regs;
6aa8b732 1900
3e4bb3ac
XZ
1901 r = -ENOMEM;
1902 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1903 if (!kvm_regs)
6aa8b732 1904 goto out;
3e4bb3ac
XZ
1905 r = -EFAULT;
1906 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1907 goto out_free2;
1908 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
6aa8b732 1909 if (r)
3e4bb3ac 1910 goto out_free2;
6aa8b732 1911 r = 0;
3e4bb3ac
XZ
1912out_free2:
1913 kfree(kvm_regs);
6aa8b732
AK
1914 break;
1915 }
1916 case KVM_GET_SREGS: {
fa3795a7
DH
1917 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1918 r = -ENOMEM;
1919 if (!kvm_sregs)
1920 goto out;
1921 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1922 if (r)
1923 goto out;
1924 r = -EFAULT;
fa3795a7 1925 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
6aa8b732
AK
1926 goto out;
1927 r = 0;
1928 break;
1929 }
1930 case KVM_SET_SREGS: {
fa3795a7
DH
1931 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1932 r = -ENOMEM;
1933 if (!kvm_sregs)
1934 goto out;
6aa8b732 1935 r = -EFAULT;
fa3795a7 1936 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
6aa8b732 1937 goto out;
fa3795a7 1938 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
6aa8b732
AK
1939 if (r)
1940 goto out;
1941 r = 0;
1942 break;
1943 }
62d9f0db
MT
1944 case KVM_GET_MP_STATE: {
1945 struct kvm_mp_state mp_state;
1946
1947 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1948 if (r)
1949 goto out;
1950 r = -EFAULT;
1951 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1952 goto out;
1953 r = 0;
1954 break;
1955 }
1956 case KVM_SET_MP_STATE: {
1957 struct kvm_mp_state mp_state;
1958
1959 r = -EFAULT;
1960 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1961 goto out;
1962 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1963 if (r)
1964 goto out;
1965 r = 0;
1966 break;
1967 }
6aa8b732
AK
1968 case KVM_TRANSLATE: {
1969 struct kvm_translation tr;
1970
1971 r = -EFAULT;
2f366987 1972 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 1973 goto out;
8b006791 1974 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
1975 if (r)
1976 goto out;
1977 r = -EFAULT;
2f366987 1978 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
1979 goto out;
1980 r = 0;
1981 break;
1982 }
d0bfb940
JK
1983 case KVM_SET_GUEST_DEBUG: {
1984 struct kvm_guest_debug dbg;
6aa8b732
AK
1985
1986 r = -EFAULT;
2f366987 1987 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 1988 goto out;
d0bfb940 1989 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
6aa8b732
AK
1990 if (r)
1991 goto out;
1992 r = 0;
1993 break;
1994 }
1961d276
AK
1995 case KVM_SET_SIGNAL_MASK: {
1996 struct kvm_signal_mask __user *sigmask_arg = argp;
1997 struct kvm_signal_mask kvm_sigmask;
1998 sigset_t sigset, *p;
1999
2000 p = NULL;
2001 if (argp) {
2002 r = -EFAULT;
2003 if (copy_from_user(&kvm_sigmask, argp,
2004 sizeof kvm_sigmask))
2005 goto out;
2006 r = -EINVAL;
2007 if (kvm_sigmask.len != sizeof sigset)
2008 goto out;
2009 r = -EFAULT;
2010 if (copy_from_user(&sigset, sigmask_arg->sigset,
2011 sizeof sigset))
2012 goto out;
2013 p = &sigset;
2014 }
2015 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2016 break;
2017 }
b8836737 2018 case KVM_GET_FPU: {
fa3795a7
DH
2019 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2020 r = -ENOMEM;
2021 if (!fpu)
2022 goto out;
2023 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
b8836737
AK
2024 if (r)
2025 goto out;
2026 r = -EFAULT;
fa3795a7 2027 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
b8836737
AK
2028 goto out;
2029 r = 0;
2030 break;
2031 }
2032 case KVM_SET_FPU: {
fa3795a7
DH
2033 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2034 r = -ENOMEM;
2035 if (!fpu)
2036 goto out;
b8836737 2037 r = -EFAULT;
fa3795a7 2038 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
b8836737 2039 goto out;
fa3795a7 2040 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
b8836737
AK
2041 if (r)
2042 goto out;
2043 r = 0;
2044 break;
2045 }
bccf2150 2046 default:
313a3dc7 2047 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
2048 }
2049out:
fa3795a7
DH
2050 kfree(fpu);
2051 kfree(kvm_sregs);
bccf2150
AK
2052 return r;
2053}
2054
2055static long kvm_vm_ioctl(struct file *filp,
2056 unsigned int ioctl, unsigned long arg)
2057{
2058 struct kvm *kvm = filp->private_data;
2059 void __user *argp = (void __user *)arg;
1fe779f8 2060 int r;
bccf2150 2061
6d4e4c4f
AK
2062 if (kvm->mm != current->mm)
2063 return -EIO;
bccf2150
AK
2064 switch (ioctl) {
2065 case KVM_CREATE_VCPU:
2066 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2067 if (r < 0)
2068 goto out;
2069 break;
6fc138d2
IE
2070 case KVM_SET_USER_MEMORY_REGION: {
2071 struct kvm_userspace_memory_region kvm_userspace_mem;
2072
2073 r = -EFAULT;
2074 if (copy_from_user(&kvm_userspace_mem, argp,
2075 sizeof kvm_userspace_mem))
2076 goto out;
2077
2078 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
2079 if (r)
2080 goto out;
2081 break;
2082 }
2083 case KVM_GET_DIRTY_LOG: {
2084 struct kvm_dirty_log log;
2085
2086 r = -EFAULT;
2f366987 2087 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 2088 goto out;
2c6f5df9 2089 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
2090 if (r)
2091 goto out;
2092 break;
2093 }
5f94c174
LV
2094#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2095 case KVM_REGISTER_COALESCED_MMIO: {
2096 struct kvm_coalesced_mmio_zone zone;
2097 r = -EFAULT;
2098 if (copy_from_user(&zone, argp, sizeof zone))
2099 goto out;
2100 r = -ENXIO;
2101 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
2102 if (r)
2103 goto out;
2104 r = 0;
2105 break;
2106 }
2107 case KVM_UNREGISTER_COALESCED_MMIO: {
2108 struct kvm_coalesced_mmio_zone zone;
2109 r = -EFAULT;
2110 if (copy_from_user(&zone, argp, sizeof zone))
2111 goto out;
2112 r = -ENXIO;
2113 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
2114 if (r)
2115 goto out;
2116 r = 0;
2117 break;
2118 }
8a98f664
XZ
2119#endif
2120#ifdef KVM_CAP_DEVICE_ASSIGNMENT
2121 case KVM_ASSIGN_PCI_DEVICE: {
2122 struct kvm_assigned_pci_dev assigned_dev;
2123
2124 r = -EFAULT;
2125 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2126 goto out;
2127 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
2128 if (r)
2129 goto out;
2130 break;
2131 }
2132 case KVM_ASSIGN_IRQ: {
e56d532f
SY
2133 r = -EOPNOTSUPP;
2134 break;
2135 }
2136#ifdef KVM_CAP_ASSIGN_DEV_IRQ
2137 case KVM_ASSIGN_DEV_IRQ: {
8a98f664
XZ
2138 struct kvm_assigned_irq assigned_irq;
2139
2140 r = -EFAULT;
2141 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2142 goto out;
2143 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
2144 if (r)
2145 goto out;
2146 break;
2147 }
e56d532f
SY
2148 case KVM_DEASSIGN_DEV_IRQ: {
2149 struct kvm_assigned_irq assigned_irq;
2150
2151 r = -EFAULT;
2152 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2153 goto out;
2154 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
2155 if (r)
2156 goto out;
2157 break;
2158 }
2159#endif
0a920356
WH
2160#endif
2161#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
2162 case KVM_DEASSIGN_PCI_DEVICE: {
2163 struct kvm_assigned_pci_dev assigned_dev;
2164
2165 r = -EFAULT;
2166 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2167 goto out;
2168 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
2169 if (r)
2170 goto out;
2171 break;
2172 }
399ec807
AK
2173#endif
2174#ifdef KVM_CAP_IRQ_ROUTING
2175 case KVM_SET_GSI_ROUTING: {
2176 struct kvm_irq_routing routing;
2177 struct kvm_irq_routing __user *urouting;
2178 struct kvm_irq_routing_entry *entries;
2179
2180 r = -EFAULT;
2181 if (copy_from_user(&routing, argp, sizeof(routing)))
2182 goto out;
2183 r = -EINVAL;
2184 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
2185 goto out;
2186 if (routing.flags)
2187 goto out;
2188 r = -ENOMEM;
2189 entries = vmalloc(routing.nr * sizeof(*entries));
2190 if (!entries)
2191 goto out;
2192 r = -EFAULT;
2193 urouting = argp;
2194 if (copy_from_user(entries, urouting->entries,
2195 routing.nr * sizeof(*entries)))
2196 goto out_free_irq_routing;
2197 r = kvm_set_irq_routing(kvm, entries, routing.nr,
2198 routing.flags);
2199 out_free_irq_routing:
2200 vfree(entries);
2201 break;
2202 }
c1e01514
SY
2203#ifdef __KVM_HAVE_MSIX
2204 case KVM_ASSIGN_SET_MSIX_NR: {
2205 struct kvm_assigned_msix_nr entry_nr;
2206 r = -EFAULT;
2207 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
2208 goto out;
2209 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
2210 if (r)
2211 goto out;
2212 break;
2213 }
2214 case KVM_ASSIGN_SET_MSIX_ENTRY: {
2215 struct kvm_assigned_msix_entry entry;
2216 r = -EFAULT;
2217 if (copy_from_user(&entry, argp, sizeof entry))
2218 goto out;
2219 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
2220 if (r)
2221 goto out;
2222 break;
2223 }
5f94c174 2224#endif
c1e01514 2225#endif /* KVM_CAP_IRQ_ROUTING */
721eecbf
GH
2226 case KVM_IRQFD: {
2227 struct kvm_irqfd data;
2228
2229 r = -EFAULT;
2230 if (copy_from_user(&data, argp, sizeof data))
2231 goto out;
2232 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
2233 break;
2234 }
73880c80
GN
2235#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2236 case KVM_SET_BOOT_CPU_ID:
2237 r = 0;
2238 if (atomic_read(&kvm->online_vcpus) != 0)
2239 r = -EBUSY;
2240 else
2241 kvm->bsp_vcpu_id = arg;
2242 break;
2243#endif
f17abe9a 2244 default:
1fe779f8 2245 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
2246 }
2247out:
2248 return r;
2249}
2250
e4a533a4 2251static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a 2252{
777b3f49
MT
2253 struct page *page[1];
2254 unsigned long addr;
2255 int npages;
2256 gfn_t gfn = vmf->pgoff;
f17abe9a 2257 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a 2258
777b3f49
MT
2259 addr = gfn_to_hva(kvm, gfn);
2260 if (kvm_is_error_hva(addr))
e4a533a4 2261 return VM_FAULT_SIGBUS;
777b3f49
MT
2262
2263 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
2264 NULL);
2265 if (unlikely(npages != 1))
e4a533a4 2266 return VM_FAULT_SIGBUS;
777b3f49
MT
2267
2268 vmf->page = page[0];
e4a533a4 2269 return 0;
f17abe9a
AK
2270}
2271
2272static struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 2273 .fault = kvm_vm_fault,
f17abe9a
AK
2274};
2275
2276static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2277{
2278 vma->vm_ops = &kvm_vm_vm_ops;
2279 return 0;
2280}
2281
3d3aab1b 2282static struct file_operations kvm_vm_fops = {
f17abe9a
AK
2283 .release = kvm_vm_release,
2284 .unlocked_ioctl = kvm_vm_ioctl,
2285 .compat_ioctl = kvm_vm_ioctl,
2286 .mmap = kvm_vm_mmap,
2287};
2288
2289static int kvm_dev_ioctl_create_vm(void)
2290{
2030a42c 2291 int fd;
f17abe9a
AK
2292 struct kvm *kvm;
2293
f17abe9a 2294 kvm = kvm_create_vm();
d6d28168
AK
2295 if (IS_ERR(kvm))
2296 return PTR_ERR(kvm);
7d9dbca3 2297 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
2030a42c 2298 if (fd < 0)
66c0b394 2299 kvm_put_kvm(kvm);
f17abe9a 2300
f17abe9a 2301 return fd;
f17abe9a
AK
2302}
2303
1a811b61
AK
2304static long kvm_dev_ioctl_check_extension_generic(long arg)
2305{
2306 switch (arg) {
ca9edaee 2307 case KVM_CAP_USER_MEMORY:
1a811b61 2308 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4cd481f6 2309 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
73880c80
GN
2310#ifdef CONFIG_KVM_APIC_ARCHITECTURE
2311 case KVM_CAP_SET_BOOT_CPU_ID:
2312#endif
1a811b61 2313 return 1;
399ec807
AK
2314#ifdef CONFIG_HAVE_KVM_IRQCHIP
2315 case KVM_CAP_IRQ_ROUTING:
36463146 2316 return KVM_MAX_IRQ_ROUTES;
399ec807 2317#endif
1a811b61
AK
2318 default:
2319 break;
2320 }
2321 return kvm_dev_ioctl_check_extension(arg);
2322}
2323
f17abe9a
AK
2324static long kvm_dev_ioctl(struct file *filp,
2325 unsigned int ioctl, unsigned long arg)
2326{
07c45a36 2327 long r = -EINVAL;
f17abe9a
AK
2328
2329 switch (ioctl) {
2330 case KVM_GET_API_VERSION:
f0fe5108
AK
2331 r = -EINVAL;
2332 if (arg)
2333 goto out;
f17abe9a
AK
2334 r = KVM_API_VERSION;
2335 break;
2336 case KVM_CREATE_VM:
f0fe5108
AK
2337 r = -EINVAL;
2338 if (arg)
2339 goto out;
f17abe9a
AK
2340 r = kvm_dev_ioctl_create_vm();
2341 break;
018d00d2 2342 case KVM_CHECK_EXTENSION:
1a811b61 2343 r = kvm_dev_ioctl_check_extension_generic(arg);
5d308f45 2344 break;
07c45a36
AK
2345 case KVM_GET_VCPU_MMAP_SIZE:
2346 r = -EINVAL;
2347 if (arg)
2348 goto out;
adb1ff46
AK
2349 r = PAGE_SIZE; /* struct kvm_run */
2350#ifdef CONFIG_X86
2351 r += PAGE_SIZE; /* pio data page */
5f94c174
LV
2352#endif
2353#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2354 r += PAGE_SIZE; /* coalesced mmio ring page */
adb1ff46 2355#endif
07c45a36 2356 break;
d4c9ff2d
FEL
2357 case KVM_TRACE_ENABLE:
2358 case KVM_TRACE_PAUSE:
2359 case KVM_TRACE_DISABLE:
2360 r = kvm_trace_ioctl(ioctl, arg);
2361 break;
6aa8b732 2362 default:
043405e1 2363 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
2364 }
2365out:
2366 return r;
2367}
2368
6aa8b732 2369static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
2370 .unlocked_ioctl = kvm_dev_ioctl,
2371 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
2372};
2373
2374static struct miscdevice kvm_dev = {
bbe4432e 2375 KVM_MINOR,
6aa8b732
AK
2376 "kvm",
2377 &kvm_chardev_ops,
2378};
2379
1b6c0168
AK
2380static void hardware_enable(void *junk)
2381{
2382 int cpu = raw_smp_processor_id();
2383
7f59f492 2384 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 2385 return;
7f59f492 2386 cpumask_set_cpu(cpu, cpus_hardware_enabled);
e9b11c17 2387 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
2388}
2389
2390static void hardware_disable(void *junk)
2391{
2392 int cpu = raw_smp_processor_id();
2393
7f59f492 2394 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
1b6c0168 2395 return;
7f59f492 2396 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
e9b11c17 2397 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
2398}
2399
774c47f1
AK
2400static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2401 void *v)
2402{
2403 int cpu = (long)v;
2404
1a6f4d7f 2405 val &= ~CPU_TASKS_FROZEN;
774c47f1 2406 switch (val) {
cec9ad27 2407 case CPU_DYING:
6ec8a856
AK
2408 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2409 cpu);
2410 hardware_disable(NULL);
2411 break;
774c47f1 2412 case CPU_UP_CANCELED:
43934a38
JK
2413 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2414 cpu);
8691e5a8 2415 smp_call_function_single(cpu, hardware_disable, NULL, 1);
774c47f1 2416 break;
43934a38
JK
2417 case CPU_ONLINE:
2418 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2419 cpu);
8691e5a8 2420 smp_call_function_single(cpu, hardware_enable, NULL, 1);
774c47f1
AK
2421 break;
2422 }
2423 return NOTIFY_OK;
2424}
2425
4ecac3fd
AK
2426
2427asmlinkage void kvm_handle_fault_on_reboot(void)
2428{
2429 if (kvm_rebooting)
2430 /* spin while reset goes on */
2431 while (true)
2432 ;
2433 /* Fault while not rebooting. We want the trace. */
2434 BUG();
2435}
2436EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
2437
9a2b85c6 2438static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 2439 void *v)
9a2b85c6 2440{
8e1c1815
SY
2441 /*
2442 * Some (well, at least mine) BIOSes hang on reboot if
2443 * in vmx root mode.
2444 *
2445 * And Intel TXT required VMX off for all cpu when system shutdown.
2446 */
2447 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2448 kvm_rebooting = true;
2449 on_each_cpu(hardware_disable, NULL, 1);
9a2b85c6
RR
2450 return NOTIFY_OK;
2451}
2452
2453static struct notifier_block kvm_reboot_notifier = {
2454 .notifier_call = kvm_reboot,
2455 .priority = 0,
2456};
2457
2eeb2e94
GH
2458void kvm_io_bus_init(struct kvm_io_bus *bus)
2459{
2460 memset(bus, 0, sizeof(*bus));
2461}
2462
2463void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2464{
2465 int i;
2466
2467 for (i = 0; i < bus->dev_count; i++) {
2468 struct kvm_io_device *pos = bus->devs[i];
2469
2470 kvm_iodevice_destructor(pos);
2471 }
2472}
2473
92760499
LV
2474struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
2475 gpa_t addr, int len, int is_write)
2eeb2e94
GH
2476{
2477 int i;
2478
2479 for (i = 0; i < bus->dev_count; i++) {
2480 struct kvm_io_device *pos = bus->devs[i];
2481
d76685c4 2482 if (kvm_iodevice_in_range(pos, addr, len, is_write))
2eeb2e94
GH
2483 return pos;
2484 }
2485
2486 return NULL;
2487}
2488
2489void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
2490{
2491 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
2492
2493 bus->devs[bus->dev_count++] = dev;
2494}
2495
774c47f1
AK
2496static struct notifier_block kvm_cpu_notifier = {
2497 .notifier_call = kvm_cpu_hotplug,
2498 .priority = 20, /* must be > scheduler priority */
2499};
2500
8b88b099 2501static int vm_stat_get(void *_offset, u64 *val)
ba1389b7
AK
2502{
2503 unsigned offset = (long)_offset;
ba1389b7
AK
2504 struct kvm *kvm;
2505
8b88b099 2506 *val = 0;
ba1389b7
AK
2507 spin_lock(&kvm_lock);
2508 list_for_each_entry(kvm, &vm_list, vm_list)
8b88b099 2509 *val += *(u32 *)((void *)kvm + offset);
ba1389b7 2510 spin_unlock(&kvm_lock);
8b88b099 2511 return 0;
ba1389b7
AK
2512}
2513
2514DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2515
8b88b099 2516static int vcpu_stat_get(void *_offset, u64 *val)
1165f5fe
AK
2517{
2518 unsigned offset = (long)_offset;
1165f5fe
AK
2519 struct kvm *kvm;
2520 struct kvm_vcpu *vcpu;
2521 int i;
2522
8b88b099 2523 *val = 0;
1165f5fe
AK
2524 spin_lock(&kvm_lock);
2525 list_for_each_entry(kvm, &vm_list, vm_list)
988a2cae
GN
2526 kvm_for_each_vcpu(i, vcpu, kvm)
2527 *val += *(u32 *)((void *)vcpu + offset);
2528
1165f5fe 2529 spin_unlock(&kvm_lock);
8b88b099 2530 return 0;
1165f5fe
AK
2531}
2532
ba1389b7
AK
2533DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2534
2535static struct file_operations *stat_fops[] = {
2536 [KVM_STAT_VCPU] = &vcpu_stat_fops,
2537 [KVM_STAT_VM] = &vm_stat_fops,
2538};
1165f5fe 2539
a16b043c 2540static void kvm_init_debug(void)
6aa8b732
AK
2541{
2542 struct kvm_stats_debugfs_item *p;
2543
76f7c879 2544 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 2545 for (p = debugfs_entries; p->name; ++p)
76f7c879 2546 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
1165f5fe 2547 (void *)(long)p->offset,
ba1389b7 2548 stat_fops[p->kind]);
6aa8b732
AK
2549}
2550
2551static void kvm_exit_debug(void)
2552{
2553 struct kvm_stats_debugfs_item *p;
2554
2555 for (p = debugfs_entries; p->name; ++p)
2556 debugfs_remove(p->dentry);
76f7c879 2557 debugfs_remove(kvm_debugfs_dir);
6aa8b732
AK
2558}
2559
59ae6c6b
AK
2560static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2561{
4267c41a 2562 hardware_disable(NULL);
59ae6c6b
AK
2563 return 0;
2564}
2565
2566static int kvm_resume(struct sys_device *dev)
2567{
4267c41a 2568 hardware_enable(NULL);
59ae6c6b
AK
2569 return 0;
2570}
2571
2572static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 2573 .name = "kvm",
59ae6c6b
AK
2574 .suspend = kvm_suspend,
2575 .resume = kvm_resume,
2576};
2577
2578static struct sys_device kvm_sysdev = {
2579 .id = 0,
2580 .cls = &kvm_sysdev_class,
2581};
2582
cea7bb21 2583struct page *bad_page;
35149e21 2584pfn_t bad_pfn;
6aa8b732 2585
15ad7146
AK
2586static inline
2587struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2588{
2589 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2590}
2591
2592static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2593{
2594 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2595
e9b11c17 2596 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
2597}
2598
2599static void kvm_sched_out(struct preempt_notifier *pn,
2600 struct task_struct *next)
2601{
2602 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2603
e9b11c17 2604 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
2605}
2606
f8c16bba 2607int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 2608 struct module *module)
6aa8b732
AK
2609{
2610 int r;
002c7f7c 2611 int cpu;
6aa8b732 2612
cb498ea2
ZX
2613 kvm_init_debug();
2614
f8c16bba
ZX
2615 r = kvm_arch_init(opaque);
2616 if (r)
d2308784 2617 goto out_fail;
cb498ea2
ZX
2618
2619 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2620
2621 if (bad_page == NULL) {
2622 r = -ENOMEM;
2623 goto out;
2624 }
2625
35149e21
AL
2626 bad_pfn = page_to_pfn(bad_page);
2627
8437a617 2628 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
7f59f492
RR
2629 r = -ENOMEM;
2630 goto out_free_0;
2631 }
2632
e9b11c17 2633 r = kvm_arch_hardware_setup();
6aa8b732 2634 if (r < 0)
7f59f492 2635 goto out_free_0a;
6aa8b732 2636
002c7f7c
YS
2637 for_each_online_cpu(cpu) {
2638 smp_call_function_single(cpu,
e9b11c17 2639 kvm_arch_check_processor_compat,
8691e5a8 2640 &r, 1);
002c7f7c 2641 if (r < 0)
d2308784 2642 goto out_free_1;
002c7f7c
YS
2643 }
2644
15c8b6c1 2645 on_each_cpu(hardware_enable, NULL, 1);
774c47f1
AK
2646 r = register_cpu_notifier(&kvm_cpu_notifier);
2647 if (r)
d2308784 2648 goto out_free_2;
6aa8b732
AK
2649 register_reboot_notifier(&kvm_reboot_notifier);
2650
59ae6c6b
AK
2651 r = sysdev_class_register(&kvm_sysdev_class);
2652 if (r)
d2308784 2653 goto out_free_3;
59ae6c6b
AK
2654
2655 r = sysdev_register(&kvm_sysdev);
2656 if (r)
d2308784 2657 goto out_free_4;
59ae6c6b 2658
c16f862d
RR
2659 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2660 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
2661 __alignof__(struct kvm_vcpu),
2662 0, NULL);
c16f862d
RR
2663 if (!kvm_vcpu_cache) {
2664 r = -ENOMEM;
d2308784 2665 goto out_free_5;
c16f862d
RR
2666 }
2667
6aa8b732 2668 kvm_chardev_ops.owner = module;
3d3aab1b
CB
2669 kvm_vm_fops.owner = module;
2670 kvm_vcpu_fops.owner = module;
6aa8b732
AK
2671
2672 r = misc_register(&kvm_dev);
2673 if (r) {
d77c26fc 2674 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
2675 goto out_free;
2676 }
2677
15ad7146
AK
2678 kvm_preempt_ops.sched_in = kvm_sched_in;
2679 kvm_preempt_ops.sched_out = kvm_sched_out;
2680
c7addb90 2681 return 0;
6aa8b732
AK
2682
2683out_free:
c16f862d 2684 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 2685out_free_5:
59ae6c6b 2686 sysdev_unregister(&kvm_sysdev);
d2308784 2687out_free_4:
59ae6c6b 2688 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 2689out_free_3:
6aa8b732 2690 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 2691 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 2692out_free_2:
15c8b6c1 2693 on_each_cpu(hardware_disable, NULL, 1);
d2308784 2694out_free_1:
e9b11c17 2695 kvm_arch_hardware_unsetup();
7f59f492
RR
2696out_free_0a:
2697 free_cpumask_var(cpus_hardware_enabled);
d2308784
ZX
2698out_free_0:
2699 __free_page(bad_page);
ca45aaae 2700out:
f8c16bba 2701 kvm_arch_exit();
cb498ea2 2702 kvm_exit_debug();
d2308784 2703out_fail:
6aa8b732
AK
2704 return r;
2705}
cb498ea2 2706EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 2707
cb498ea2 2708void kvm_exit(void)
6aa8b732 2709{
d4c9ff2d 2710 kvm_trace_cleanup();
6aa8b732 2711 misc_deregister(&kvm_dev);
c16f862d 2712 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
2713 sysdev_unregister(&kvm_sysdev);
2714 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 2715 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 2716 unregister_cpu_notifier(&kvm_cpu_notifier);
15c8b6c1 2717 on_each_cpu(hardware_disable, NULL, 1);
e9b11c17 2718 kvm_arch_hardware_unsetup();
f8c16bba 2719 kvm_arch_exit();
6aa8b732 2720 kvm_exit_debug();
7f59f492 2721 free_cpumask_var(cpus_hardware_enabled);
cea7bb21 2722 __free_page(bad_page);
6aa8b732 2723}
cb498ea2 2724EXPORT_SYMBOL_GPL(kvm_exit);
This page took 0.544224 seconds and 5 git commands to generate.