KVM: Make unloading of FPU state when putting vcpu arch-independent
[deliverable/linux.git] / drivers / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
043405e1 19#include "x86.h"
85f455f7 20#include "irq.h"
6aa8b732
AK
21
22#include <linux/kvm.h>
23#include <linux/module.h>
24#include <linux/errno.h>
6aa8b732
AK
25#include <linux/percpu.h>
26#include <linux/gfp.h>
6aa8b732
AK
27#include <linux/mm.h>
28#include <linux/miscdevice.h>
29#include <linux/vmalloc.h>
6aa8b732 30#include <linux/reboot.h>
6aa8b732
AK
31#include <linux/debugfs.h>
32#include <linux/highmem.h>
33#include <linux/file.h>
59ae6c6b 34#include <linux/sysdev.h>
774c47f1 35#include <linux/cpu.h>
e8edc6e0 36#include <linux/sched.h>
d9e368d6
AK
37#include <linux/cpumask.h>
38#include <linux/smp.h>
d6d28168 39#include <linux/anon_inodes.h>
04d2cc77 40#include <linux/profile.h>
7aa81cc0 41#include <linux/kvm_para.h>
6fc138d2 42#include <linux/pagemap.h>
8d4e1288 43#include <linux/mman.h>
6aa8b732 44
e495606d 45#include <asm/processor.h>
e495606d
AK
46#include <asm/io.h>
47#include <asm/uaccess.h>
48#include <asm/desc.h>
6aa8b732
AK
49
50MODULE_AUTHOR("Qumranet");
51MODULE_LICENSE("GPL");
52
e9b11c17
ZX
53DEFINE_SPINLOCK(kvm_lock);
54LIST_HEAD(vm_list);
133de902 55
1b6c0168
AK
56static cpumask_t cpus_hardware_enabled;
57
c16f862d
RR
58struct kmem_cache *kvm_vcpu_cache;
59EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 60
15ad7146
AK
61static __read_mostly struct preempt_ops kvm_preempt_ops;
62
6aa8b732
AK
63static struct dentry *debugfs_dir;
64
bccf2150
AK
65static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
66 unsigned long arg);
67
5aacf0ca
JM
68static inline int valid_vcpu(int n)
69{
70 return likely(n >= 0 && n < KVM_MAX_VCPUS);
71}
72
bccf2150
AK
73/*
74 * Switches to specified vcpu, until a matching vcpu_put()
75 */
313a3dc7 76void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 77{
15ad7146
AK
78 int cpu;
79
bccf2150 80 mutex_lock(&vcpu->mutex);
15ad7146
AK
81 cpu = get_cpu();
82 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 83 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 84 put_cpu();
6aa8b732
AK
85}
86
313a3dc7 87void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 88{
15ad7146 89 preempt_disable();
313a3dc7 90 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
91 preempt_notifier_unregister(&vcpu->preempt_notifier);
92 preempt_enable();
6aa8b732
AK
93 mutex_unlock(&vcpu->mutex);
94}
95
d9e368d6
AK
96static void ack_flush(void *_completed)
97{
d9e368d6
AK
98}
99
100void kvm_flush_remote_tlbs(struct kvm *kvm)
101{
49d3bd7e 102 int i, cpu;
d9e368d6
AK
103 cpumask_t cpus;
104 struct kvm_vcpu *vcpu;
d9e368d6 105
d9e368d6 106 cpus_clear(cpus);
fb3f0f51
RR
107 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
108 vcpu = kvm->vcpus[i];
109 if (!vcpu)
110 continue;
3176bc3e 111 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
112 continue;
113 cpu = vcpu->cpu;
114 if (cpu != -1 && cpu != raw_smp_processor_id())
49d3bd7e 115 cpu_set(cpu, cpus);
d9e368d6 116 }
49d3bd7e 117 smp_call_function_mask(cpus, ack_flush, NULL, 1);
d9e368d6
AK
118}
119
fb3f0f51
RR
120int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
121{
122 struct page *page;
123 int r;
124
125 mutex_init(&vcpu->mutex);
126 vcpu->cpu = -1;
fb3f0f51
RR
127 vcpu->kvm = kvm;
128 vcpu->vcpu_id = id;
b6958ce4 129 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
130
131 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
132 if (!page) {
133 r = -ENOMEM;
134 goto fail;
135 }
136 vcpu->run = page_address(page);
137
e9b11c17 138 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 139 if (r < 0)
e9b11c17 140 goto fail_free_run;
fb3f0f51
RR
141 return 0;
142
fb3f0f51
RR
143fail_free_run:
144 free_page((unsigned long)vcpu->run);
145fail:
76fafa5e 146 return r;
fb3f0f51
RR
147}
148EXPORT_SYMBOL_GPL(kvm_vcpu_init);
149
150void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
151{
e9b11c17 152 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
153 free_page((unsigned long)vcpu->run);
154}
155EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
156
f17abe9a 157static struct kvm *kvm_create_vm(void)
6aa8b732 158{
d19a9cd2 159 struct kvm *kvm = kvm_arch_create_vm();
6aa8b732 160
d19a9cd2
ZX
161 if (IS_ERR(kvm))
162 goto out;
6aa8b732 163
74906345 164 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 165 mutex_init(&kvm->lock);
2eeb2e94 166 kvm_io_bus_init(&kvm->mmio_bus);
5e58cfe4
RR
167 spin_lock(&kvm_lock);
168 list_add(&kvm->vm_list, &vm_list);
169 spin_unlock(&kvm_lock);
d19a9cd2 170out:
f17abe9a
AK
171 return kvm;
172}
173
6aa8b732
AK
174/*
175 * Free any memory in @free but not in @dont.
176 */
177static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
178 struct kvm_memory_slot *dont)
179{
290fc38d
IE
180 if (!dont || free->rmap != dont->rmap)
181 vfree(free->rmap);
6aa8b732
AK
182
183 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
184 vfree(free->dirty_bitmap);
185
6aa8b732 186 free->npages = 0;
8b6d44c7 187 free->dirty_bitmap = NULL;
8d4e1288 188 free->rmap = NULL;
6aa8b732
AK
189}
190
d19a9cd2 191void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
192{
193 int i;
194
195 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 196 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
197}
198
f17abe9a
AK
199static void kvm_destroy_vm(struct kvm *kvm)
200{
133de902
AK
201 spin_lock(&kvm_lock);
202 list_del(&kvm->vm_list);
203 spin_unlock(&kvm_lock);
74906345 204 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 205 kvm_io_bus_destroy(&kvm->mmio_bus);
d19a9cd2 206 kvm_arch_destroy_vm(kvm);
f17abe9a
AK
207}
208
209static int kvm_vm_release(struct inode *inode, struct file *filp)
210{
211 struct kvm *kvm = filp->private_data;
212
213 kvm_destroy_vm(kvm);
6aa8b732
AK
214 return 0;
215}
216
6aa8b732
AK
217/*
218 * Allocate some memory and give it an address in the guest physical address
219 * space.
220 *
221 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e
SY
222 *
223 * Must be called holding kvm->lock.
6aa8b732 224 */
f78e0e2e
SY
225int __kvm_set_memory_region(struct kvm *kvm,
226 struct kvm_userspace_memory_region *mem,
227 int user_alloc)
6aa8b732
AK
228{
229 int r;
230 gfn_t base_gfn;
231 unsigned long npages;
232 unsigned long i;
233 struct kvm_memory_slot *memslot;
234 struct kvm_memory_slot old, new;
6aa8b732
AK
235
236 r = -EINVAL;
237 /* General sanity checks */
238 if (mem->memory_size & (PAGE_SIZE - 1))
239 goto out;
240 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
241 goto out;
e0d62c7f 242 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
243 goto out;
244 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
245 goto out;
246
247 memslot = &kvm->memslots[mem->slot];
248 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
249 npages = mem->memory_size >> PAGE_SHIFT;
250
251 if (!npages)
252 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
253
6aa8b732
AK
254 new = old = *memslot;
255
256 new.base_gfn = base_gfn;
257 new.npages = npages;
258 new.flags = mem->flags;
259
260 /* Disallow changing a memory slot's size. */
261 r = -EINVAL;
262 if (npages && old.npages && npages != old.npages)
f78e0e2e 263 goto out_free;
6aa8b732
AK
264
265 /* Check for overlaps */
266 r = -EEXIST;
267 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
268 struct kvm_memory_slot *s = &kvm->memslots[i];
269
270 if (s == memslot)
271 continue;
272 if (!((base_gfn + npages <= s->base_gfn) ||
273 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 274 goto out_free;
6aa8b732 275 }
6aa8b732 276
6aa8b732
AK
277 /* Free page dirty bitmap if unneeded */
278 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 279 new.dirty_bitmap = NULL;
6aa8b732
AK
280
281 r = -ENOMEM;
282
283 /* Allocate if a slot is being created */
8d4e1288 284 if (npages && !new.rmap) {
d77c26fc 285 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
286
287 if (!new.rmap)
f78e0e2e 288 goto out_free;
290fc38d 289
290fc38d 290 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 291
80b14b5b 292 new.user_alloc = user_alloc;
8d4e1288 293 if (user_alloc)
8a7ae055 294 new.userspace_addr = mem->userspace_addr;
8d4e1288
AL
295 else {
296 down_write(&current->mm->mmap_sem);
297 new.userspace_addr = do_mmap(NULL, 0,
298 npages * PAGE_SIZE,
299 PROT_READ | PROT_WRITE,
300 MAP_SHARED | MAP_ANONYMOUS,
301 0);
302 up_write(&current->mm->mmap_sem);
303
304 if (IS_ERR((void *)new.userspace_addr))
f78e0e2e 305 goto out_free;
6aa8b732 306 }
80b14b5b
IE
307 } else {
308 if (!old.user_alloc && old.rmap) {
309 int ret;
310
311 down_write(&current->mm->mmap_sem);
312 ret = do_munmap(current->mm, old.userspace_addr,
313 old.npages * PAGE_SIZE);
314 up_write(&current->mm->mmap_sem);
315 if (ret < 0)
316 printk(KERN_WARNING
317 "kvm_vm_ioctl_set_memory_region: "
318 "failed to munmap memory\n");
319 }
6aa8b732
AK
320 }
321
322 /* Allocate page dirty bitmap if needed */
323 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
324 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
325
326 new.dirty_bitmap = vmalloc(dirty_bytes);
327 if (!new.dirty_bitmap)
f78e0e2e 328 goto out_free;
6aa8b732
AK
329 memset(new.dirty_bitmap, 0, dirty_bytes);
330 }
331
6aa8b732
AK
332 if (mem->slot >= kvm->nmemslots)
333 kvm->nmemslots = mem->slot + 1;
334
82ce2c96
IE
335 if (!kvm->n_requested_mmu_pages) {
336 unsigned int n_pages;
337
338 if (npages) {
339 n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
340 kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
341 n_pages);
342 } else {
343 unsigned int nr_mmu_pages;
344
345 n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
346 nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
347 nr_mmu_pages = max(nr_mmu_pages,
348 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
349 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
350 }
351 }
352
6aa8b732 353 *memslot = new;
6aa8b732 354
90cb0529
AK
355 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
356 kvm_flush_remote_tlbs(kvm);
6aa8b732 357
6aa8b732
AK
358 kvm_free_physmem_slot(&old, &new);
359 return 0;
360
f78e0e2e 361out_free:
6aa8b732
AK
362 kvm_free_physmem_slot(&new, &old);
363out:
364 return r;
210c7c4d
IE
365
366}
f78e0e2e
SY
367EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
368
369int kvm_set_memory_region(struct kvm *kvm,
370 struct kvm_userspace_memory_region *mem,
371 int user_alloc)
372{
373 int r;
374
375 mutex_lock(&kvm->lock);
376 r = __kvm_set_memory_region(kvm, mem, user_alloc);
377 mutex_unlock(&kvm->lock);
378 return r;
379}
210c7c4d
IE
380EXPORT_SYMBOL_GPL(kvm_set_memory_region);
381
1fe779f8
CO
382int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
383 struct
384 kvm_userspace_memory_region *mem,
385 int user_alloc)
210c7c4d 386{
e0d62c7f
IE
387 if (mem->slot >= KVM_MEMORY_SLOTS)
388 return -EINVAL;
210c7c4d 389 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
390}
391
392/*
393 * Get (and clear) the dirty memory log for a memory slot.
394 */
2c6f5df9
AK
395static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
396 struct kvm_dirty_log *log)
6aa8b732
AK
397{
398 struct kvm_memory_slot *memslot;
399 int r, i;
400 int n;
401 unsigned long any = 0;
402
11ec2804 403 mutex_lock(&kvm->lock);
6aa8b732 404
6aa8b732
AK
405 r = -EINVAL;
406 if (log->slot >= KVM_MEMORY_SLOTS)
407 goto out;
408
409 memslot = &kvm->memslots[log->slot];
410 r = -ENOENT;
411 if (!memslot->dirty_bitmap)
412 goto out;
413
cd1a4a98 414 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 415
cd1a4a98 416 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
417 any = memslot->dirty_bitmap[i];
418
419 r = -EFAULT;
420 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
421 goto out;
422
39214915
RR
423 /* If nothing is dirty, don't bother messing with page tables. */
424 if (any) {
39214915
RR
425 kvm_mmu_slot_remove_write_access(kvm, log->slot);
426 kvm_flush_remote_tlbs(kvm);
427 memset(memslot->dirty_bitmap, 0, n);
39214915 428 }
6aa8b732
AK
429
430 r = 0;
431
432out:
11ec2804 433 mutex_unlock(&kvm->lock);
6aa8b732
AK
434 return r;
435}
436
cea7bb21
IE
437int is_error_page(struct page *page)
438{
439 return page == bad_page;
440}
441EXPORT_SYMBOL_GPL(is_error_page);
442
f9d46eb0
IE
443static inline unsigned long bad_hva(void)
444{
445 return PAGE_OFFSET;
446}
447
448int kvm_is_error_hva(unsigned long addr)
449{
450 return addr == bad_hva();
451}
452EXPORT_SYMBOL_GPL(kvm_is_error_hva);
453
290fc38d 454gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
e8207547
AK
455{
456 int i;
457 struct kvm_mem_alias *alias;
458
459 for (i = 0; i < kvm->naliases; ++i) {
460 alias = &kvm->aliases[i];
461 if (gfn >= alias->base_gfn
462 && gfn < alias->base_gfn + alias->npages)
463 return alias->target_gfn + gfn - alias->base_gfn;
464 }
465 return gfn;
466}
467
468static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
469{
470 int i;
471
472 for (i = 0; i < kvm->nmemslots; ++i) {
473 struct kvm_memory_slot *memslot = &kvm->memslots[i];
474
475 if (gfn >= memslot->base_gfn
476 && gfn < memslot->base_gfn + memslot->npages)
477 return memslot;
478 }
8b6d44c7 479 return NULL;
6aa8b732 480}
e8207547
AK
481
482struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
483{
484 gfn = unalias_gfn(kvm, gfn);
485 return __gfn_to_memslot(kvm, gfn);
486}
6aa8b732 487
e0d62c7f
IE
488int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
489{
490 int i;
491
492 gfn = unalias_gfn(kvm, gfn);
493 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
494 struct kvm_memory_slot *memslot = &kvm->memslots[i];
495
496 if (gfn >= memslot->base_gfn
497 && gfn < memslot->base_gfn + memslot->npages)
498 return 1;
499 }
500 return 0;
501}
502EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
503
539cb660
IE
504static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
505{
506 struct kvm_memory_slot *slot;
507
508 gfn = unalias_gfn(kvm, gfn);
509 slot = __gfn_to_memslot(kvm, gfn);
510 if (!slot)
511 return bad_hva();
512 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
513}
514
aab61cc0
AL
515/*
516 * Requires current->mm->mmap_sem to be held
517 */
518static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
954bbbc2 519{
8d4e1288 520 struct page *page[1];
539cb660 521 unsigned long addr;
8d4e1288 522 int npages;
954bbbc2 523
60395224
AK
524 might_sleep();
525
539cb660
IE
526 addr = gfn_to_hva(kvm, gfn);
527 if (kvm_is_error_hva(addr)) {
8a7ae055 528 get_page(bad_page);
cea7bb21 529 return bad_page;
8a7ae055 530 }
8d4e1288 531
539cb660
IE
532 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
533 NULL);
534
8d4e1288
AL
535 if (npages != 1) {
536 get_page(bad_page);
537 return bad_page;
8a7ae055 538 }
8d4e1288
AL
539
540 return page[0];
954bbbc2 541}
aab61cc0
AL
542
543struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
544{
545 struct page *page;
546
547 down_read(&current->mm->mmap_sem);
548 page = __gfn_to_page(kvm, gfn);
549 up_read(&current->mm->mmap_sem);
550
551 return page;
552}
553
954bbbc2
AK
554EXPORT_SYMBOL_GPL(gfn_to_page);
555
8a7ae055
IE
556void kvm_release_page(struct page *page)
557{
558 if (!PageReserved(page))
559 SetPageDirty(page);
560 put_page(page);
561}
562EXPORT_SYMBOL_GPL(kvm_release_page);
563
195aefde
IE
564static int next_segment(unsigned long len, int offset)
565{
566 if (len > PAGE_SIZE - offset)
567 return PAGE_SIZE - offset;
568 else
569 return len;
570}
571
572int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
573 int len)
574{
e0506bcb
IE
575 int r;
576 unsigned long addr;
195aefde 577
e0506bcb
IE
578 addr = gfn_to_hva(kvm, gfn);
579 if (kvm_is_error_hva(addr))
580 return -EFAULT;
581 r = copy_from_user(data, (void __user *)addr + offset, len);
582 if (r)
195aefde 583 return -EFAULT;
195aefde
IE
584 return 0;
585}
586EXPORT_SYMBOL_GPL(kvm_read_guest_page);
587
588int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
589{
590 gfn_t gfn = gpa >> PAGE_SHIFT;
591 int seg;
592 int offset = offset_in_page(gpa);
593 int ret;
594
595 while ((seg = next_segment(len, offset)) != 0) {
596 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
597 if (ret < 0)
598 return ret;
599 offset = 0;
600 len -= seg;
601 data += seg;
602 ++gfn;
603 }
604 return 0;
605}
606EXPORT_SYMBOL_GPL(kvm_read_guest);
607
608int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
609 int offset, int len)
610{
e0506bcb
IE
611 int r;
612 unsigned long addr;
195aefde 613
e0506bcb
IE
614 addr = gfn_to_hva(kvm, gfn);
615 if (kvm_is_error_hva(addr))
616 return -EFAULT;
617 r = copy_to_user((void __user *)addr + offset, data, len);
618 if (r)
195aefde 619 return -EFAULT;
195aefde
IE
620 mark_page_dirty(kvm, gfn);
621 return 0;
622}
623EXPORT_SYMBOL_GPL(kvm_write_guest_page);
624
625int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
626 unsigned long len)
627{
628 gfn_t gfn = gpa >> PAGE_SHIFT;
629 int seg;
630 int offset = offset_in_page(gpa);
631 int ret;
632
633 while ((seg = next_segment(len, offset)) != 0) {
634 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
635 if (ret < 0)
636 return ret;
637 offset = 0;
638 len -= seg;
639 data += seg;
640 ++gfn;
641 }
642 return 0;
643}
644
645int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
646{
647 void *page_virt;
648 struct page *page;
649
650 page = gfn_to_page(kvm, gfn);
8a7ae055
IE
651 if (is_error_page(page)) {
652 kvm_release_page(page);
195aefde 653 return -EFAULT;
8a7ae055 654 }
195aefde
IE
655 page_virt = kmap_atomic(page, KM_USER0);
656
657 memset(page_virt + offset, 0, len);
658
659 kunmap_atomic(page_virt, KM_USER0);
8a7ae055 660 kvm_release_page(page);
12264760 661 mark_page_dirty(kvm, gfn);
195aefde
IE
662 return 0;
663}
664EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
665
666int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
667{
668 gfn_t gfn = gpa >> PAGE_SHIFT;
669 int seg;
670 int offset = offset_in_page(gpa);
671 int ret;
672
673 while ((seg = next_segment(len, offset)) != 0) {
674 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
675 if (ret < 0)
676 return ret;
677 offset = 0;
678 len -= seg;
679 ++gfn;
680 }
681 return 0;
682}
683EXPORT_SYMBOL_GPL(kvm_clear_guest);
684
6aa8b732
AK
685void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
686{
31389947 687 struct kvm_memory_slot *memslot;
6aa8b732 688
3b6fff19 689 gfn = unalias_gfn(kvm, gfn);
7e9d619d
RR
690 memslot = __gfn_to_memslot(kvm, gfn);
691 if (memslot && memslot->dirty_bitmap) {
692 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 693
7e9d619d
RR
694 /* avoid RMW */
695 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
696 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
697 }
698}
699
b6958ce4
ED
700/*
701 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
702 */
8776e519 703void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 704{
b6958ce4
ED
705 DECLARE_WAITQUEUE(wait, current);
706
707 add_wait_queue(&vcpu->wq, &wait);
708
709 /*
710 * We will block until either an interrupt or a signal wakes us up
711 */
c5ec1534
HQ
712 while (!kvm_cpu_has_interrupt(vcpu)
713 && !signal_pending(current)
714 && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
715 && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
b6958ce4
ED
716 set_current_state(TASK_INTERRUPTIBLE);
717 vcpu_put(vcpu);
718 schedule();
719 vcpu_load(vcpu);
720 }
d3bef15f 721
c5ec1534 722 __set_current_state(TASK_RUNNING);
b6958ce4 723 remove_wait_queue(&vcpu->wq, &wait);
b6958ce4
ED
724}
725
6aa8b732
AK
726void kvm_resched(struct kvm_vcpu *vcpu)
727{
3fca0365
YD
728 if (!need_resched())
729 return;
6aa8b732 730 cond_resched();
6aa8b732
AK
731}
732EXPORT_SYMBOL_GPL(kvm_resched);
733
bccf2150
AK
734static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
735 struct kvm_interrupt *irq)
6aa8b732 736{
6aa8b732
AK
737 if (irq->irq < 0 || irq->irq >= 256)
738 return -EINVAL;
97222cc8
ED
739 if (irqchip_in_kernel(vcpu->kvm))
740 return -ENXIO;
bccf2150 741 vcpu_load(vcpu);
6aa8b732
AK
742
743 set_bit(irq->irq, vcpu->irq_pending);
744 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
745
746 vcpu_put(vcpu);
747
748 return 0;
749}
750
9a2bb7f4
AK
751static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
752 unsigned long address,
753 int *type)
754{
755 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
756 unsigned long pgoff;
757 struct page *page;
758
9a2bb7f4 759 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
039576c0
AK
760 if (pgoff == 0)
761 page = virt_to_page(vcpu->run);
762 else if (pgoff == KVM_PIO_PAGE_OFFSET)
763 page = virt_to_page(vcpu->pio_data);
764 else
9a2bb7f4 765 return NOPAGE_SIGBUS;
9a2bb7f4 766 get_page(page);
cd0d9137
NAQ
767 if (type != NULL)
768 *type = VM_FAULT_MINOR;
769
9a2bb7f4
AK
770 return page;
771}
772
773static struct vm_operations_struct kvm_vcpu_vm_ops = {
774 .nopage = kvm_vcpu_nopage,
775};
776
777static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
778{
779 vma->vm_ops = &kvm_vcpu_vm_ops;
780 return 0;
781}
782
bccf2150
AK
783static int kvm_vcpu_release(struct inode *inode, struct file *filp)
784{
785 struct kvm_vcpu *vcpu = filp->private_data;
786
787 fput(vcpu->kvm->filp);
788 return 0;
789}
790
791static struct file_operations kvm_vcpu_fops = {
792 .release = kvm_vcpu_release,
793 .unlocked_ioctl = kvm_vcpu_ioctl,
794 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 795 .mmap = kvm_vcpu_mmap,
bccf2150
AK
796};
797
798/*
799 * Allocates an inode for the vcpu.
800 */
801static int create_vcpu_fd(struct kvm_vcpu *vcpu)
802{
803 int fd, r;
804 struct inode *inode;
805 struct file *file;
806
d6d28168
AK
807 r = anon_inode_getfd(&fd, &inode, &file,
808 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
809 if (r)
810 return r;
bccf2150 811 atomic_inc(&vcpu->kvm->filp->f_count);
bccf2150 812 return fd;
bccf2150
AK
813}
814
c5ea7660
AK
815/*
816 * Creates some virtual cpus. Good luck creating more than one.
817 */
818static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
819{
820 int r;
821 struct kvm_vcpu *vcpu;
822
c5ea7660 823 if (!valid_vcpu(n))
fb3f0f51 824 return -EINVAL;
c5ea7660 825
e9b11c17 826 vcpu = kvm_arch_vcpu_create(kvm, n);
fb3f0f51
RR
827 if (IS_ERR(vcpu))
828 return PTR_ERR(vcpu);
c5ea7660 829
15ad7146
AK
830 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
831
11ec2804 832 mutex_lock(&kvm->lock);
fb3f0f51
RR
833 if (kvm->vcpus[n]) {
834 r = -EEXIST;
11ec2804 835 mutex_unlock(&kvm->lock);
e9b11c17 836 goto vcpu_destroy;
fb3f0f51
RR
837 }
838 kvm->vcpus[n] = vcpu;
11ec2804 839 mutex_unlock(&kvm->lock);
c5ea7660 840
fb3f0f51 841 /* Now it's all set up, let userspace reach it */
bccf2150
AK
842 r = create_vcpu_fd(vcpu);
843 if (r < 0)
fb3f0f51
RR
844 goto unlink;
845 return r;
39c3b86e 846
fb3f0f51 847unlink:
11ec2804 848 mutex_lock(&kvm->lock);
fb3f0f51 849 kvm->vcpus[n] = NULL;
11ec2804 850 mutex_unlock(&kvm->lock);
e9b11c17
ZX
851vcpu_destroy:
852 kvm_arch_vcpu_destory(vcpu);
c5ea7660
AK
853 return r;
854}
855
1961d276
AK
856static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
857{
858 if (sigset) {
859 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
860 vcpu->sigset_active = 1;
861 vcpu->sigset = *sigset;
862 } else
863 vcpu->sigset_active = 0;
864 return 0;
865}
866
bccf2150
AK
867static long kvm_vcpu_ioctl(struct file *filp,
868 unsigned int ioctl, unsigned long arg)
6aa8b732 869{
bccf2150 870 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 871 void __user *argp = (void __user *)arg;
313a3dc7 872 int r;
6aa8b732
AK
873
874 switch (ioctl) {
9a2bb7f4 875 case KVM_RUN:
f0fe5108
AK
876 r = -EINVAL;
877 if (arg)
878 goto out;
b6c7a5dc 879 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 880 break;
6aa8b732
AK
881 case KVM_GET_REGS: {
882 struct kvm_regs kvm_regs;
883
bccf2150 884 memset(&kvm_regs, 0, sizeof kvm_regs);
b6c7a5dc 885 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
6aa8b732
AK
886 if (r)
887 goto out;
888 r = -EFAULT;
2f366987 889 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
6aa8b732
AK
890 goto out;
891 r = 0;
892 break;
893 }
894 case KVM_SET_REGS: {
895 struct kvm_regs kvm_regs;
896
897 r = -EFAULT;
2f366987 898 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732 899 goto out;
b6c7a5dc 900 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
6aa8b732
AK
901 if (r)
902 goto out;
903 r = 0;
904 break;
905 }
906 case KVM_GET_SREGS: {
907 struct kvm_sregs kvm_sregs;
908
bccf2150 909 memset(&kvm_sregs, 0, sizeof kvm_sregs);
b6c7a5dc 910 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
911 if (r)
912 goto out;
913 r = -EFAULT;
2f366987 914 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
915 goto out;
916 r = 0;
917 break;
918 }
919 case KVM_SET_SREGS: {
920 struct kvm_sregs kvm_sregs;
921
922 r = -EFAULT;
2f366987 923 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 924 goto out;
b6c7a5dc 925 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
926 if (r)
927 goto out;
928 r = 0;
929 break;
930 }
931 case KVM_TRANSLATE: {
932 struct kvm_translation tr;
933
934 r = -EFAULT;
2f366987 935 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 936 goto out;
8b006791 937 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
938 if (r)
939 goto out;
940 r = -EFAULT;
2f366987 941 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
942 goto out;
943 r = 0;
944 break;
945 }
946 case KVM_INTERRUPT: {
947 struct kvm_interrupt irq;
948
949 r = -EFAULT;
2f366987 950 if (copy_from_user(&irq, argp, sizeof irq))
6aa8b732 951 goto out;
bccf2150 952 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
6aa8b732
AK
953 if (r)
954 goto out;
955 r = 0;
956 break;
957 }
958 case KVM_DEBUG_GUEST: {
959 struct kvm_debug_guest dbg;
960
961 r = -EFAULT;
2f366987 962 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 963 goto out;
b6c7a5dc 964 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
965 if (r)
966 goto out;
967 r = 0;
968 break;
969 }
1961d276
AK
970 case KVM_SET_SIGNAL_MASK: {
971 struct kvm_signal_mask __user *sigmask_arg = argp;
972 struct kvm_signal_mask kvm_sigmask;
973 sigset_t sigset, *p;
974
975 p = NULL;
976 if (argp) {
977 r = -EFAULT;
978 if (copy_from_user(&kvm_sigmask, argp,
979 sizeof kvm_sigmask))
980 goto out;
981 r = -EINVAL;
982 if (kvm_sigmask.len != sizeof sigset)
983 goto out;
984 r = -EFAULT;
985 if (copy_from_user(&sigset, sigmask_arg->sigset,
986 sizeof sigset))
987 goto out;
988 p = &sigset;
989 }
990 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
991 break;
992 }
b8836737
AK
993 case KVM_GET_FPU: {
994 struct kvm_fpu fpu;
995
996 memset(&fpu, 0, sizeof fpu);
d0752060 997 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
b8836737
AK
998 if (r)
999 goto out;
1000 r = -EFAULT;
1001 if (copy_to_user(argp, &fpu, sizeof fpu))
1002 goto out;
1003 r = 0;
1004 break;
1005 }
1006 case KVM_SET_FPU: {
1007 struct kvm_fpu fpu;
1008
1009 r = -EFAULT;
1010 if (copy_from_user(&fpu, argp, sizeof fpu))
1011 goto out;
d0752060 1012 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
b8836737
AK
1013 if (r)
1014 goto out;
1015 r = 0;
1016 break;
1017 }
bccf2150 1018 default:
313a3dc7 1019 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1020 }
1021out:
1022 return r;
1023}
1024
1025static long kvm_vm_ioctl(struct file *filp,
1026 unsigned int ioctl, unsigned long arg)
1027{
1028 struct kvm *kvm = filp->private_data;
1029 void __user *argp = (void __user *)arg;
1fe779f8 1030 int r;
bccf2150
AK
1031
1032 switch (ioctl) {
1033 case KVM_CREATE_VCPU:
1034 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1035 if (r < 0)
1036 goto out;
1037 break;
6fc138d2
IE
1038 case KVM_SET_USER_MEMORY_REGION: {
1039 struct kvm_userspace_memory_region kvm_userspace_mem;
1040
1041 r = -EFAULT;
1042 if (copy_from_user(&kvm_userspace_mem, argp,
1043 sizeof kvm_userspace_mem))
1044 goto out;
1045
1046 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1047 if (r)
1048 goto out;
1049 break;
1050 }
1051 case KVM_GET_DIRTY_LOG: {
1052 struct kvm_dirty_log log;
1053
1054 r = -EFAULT;
2f366987 1055 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1056 goto out;
2c6f5df9 1057 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1058 if (r)
1059 goto out;
1060 break;
1061 }
f17abe9a 1062 default:
1fe779f8 1063 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
1064 }
1065out:
1066 return r;
1067}
1068
1069static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
1070 unsigned long address,
1071 int *type)
1072{
1073 struct kvm *kvm = vma->vm_file->private_data;
1074 unsigned long pgoff;
f17abe9a
AK
1075 struct page *page;
1076
f17abe9a 1077 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
e0d62c7f
IE
1078 if (!kvm_is_visible_gfn(kvm, pgoff))
1079 return NOPAGE_SIGBUS;
aab61cc0
AL
1080 /* current->mm->mmap_sem is already held so call lockless version */
1081 page = __gfn_to_page(kvm, pgoff);
8a7ae055
IE
1082 if (is_error_page(page)) {
1083 kvm_release_page(page);
f17abe9a 1084 return NOPAGE_SIGBUS;
8a7ae055 1085 }
cd0d9137
NAQ
1086 if (type != NULL)
1087 *type = VM_FAULT_MINOR;
1088
f17abe9a
AK
1089 return page;
1090}
1091
1092static struct vm_operations_struct kvm_vm_vm_ops = {
1093 .nopage = kvm_vm_nopage,
1094};
1095
1096static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1097{
1098 vma->vm_ops = &kvm_vm_vm_ops;
1099 return 0;
1100}
1101
1102static struct file_operations kvm_vm_fops = {
1103 .release = kvm_vm_release,
1104 .unlocked_ioctl = kvm_vm_ioctl,
1105 .compat_ioctl = kvm_vm_ioctl,
1106 .mmap = kvm_vm_mmap,
1107};
1108
1109static int kvm_dev_ioctl_create_vm(void)
1110{
1111 int fd, r;
1112 struct inode *inode;
1113 struct file *file;
1114 struct kvm *kvm;
1115
f17abe9a 1116 kvm = kvm_create_vm();
d6d28168
AK
1117 if (IS_ERR(kvm))
1118 return PTR_ERR(kvm);
1119 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
1120 if (r) {
1121 kvm_destroy_vm(kvm);
1122 return r;
f17abe9a
AK
1123 }
1124
bccf2150 1125 kvm->filp = file;
f17abe9a 1126
f17abe9a 1127 return fd;
f17abe9a
AK
1128}
1129
1130static long kvm_dev_ioctl(struct file *filp,
1131 unsigned int ioctl, unsigned long arg)
1132{
1133 void __user *argp = (void __user *)arg;
07c45a36 1134 long r = -EINVAL;
f17abe9a
AK
1135
1136 switch (ioctl) {
1137 case KVM_GET_API_VERSION:
f0fe5108
AK
1138 r = -EINVAL;
1139 if (arg)
1140 goto out;
f17abe9a
AK
1141 r = KVM_API_VERSION;
1142 break;
1143 case KVM_CREATE_VM:
f0fe5108
AK
1144 r = -EINVAL;
1145 if (arg)
1146 goto out;
f17abe9a
AK
1147 r = kvm_dev_ioctl_create_vm();
1148 break;
018d00d2
ZX
1149 case KVM_CHECK_EXTENSION:
1150 r = kvm_dev_ioctl_check_extension((long)argp);
5d308f45 1151 break;
07c45a36
AK
1152 case KVM_GET_VCPU_MMAP_SIZE:
1153 r = -EINVAL;
1154 if (arg)
1155 goto out;
039576c0 1156 r = 2 * PAGE_SIZE;
07c45a36 1157 break;
6aa8b732 1158 default:
043405e1 1159 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1160 }
1161out:
1162 return r;
1163}
1164
6aa8b732 1165static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1166 .unlocked_ioctl = kvm_dev_ioctl,
1167 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1168};
1169
1170static struct miscdevice kvm_dev = {
bbe4432e 1171 KVM_MINOR,
6aa8b732
AK
1172 "kvm",
1173 &kvm_chardev_ops,
1174};
1175
1b6c0168
AK
1176static void hardware_enable(void *junk)
1177{
1178 int cpu = raw_smp_processor_id();
1179
1180 if (cpu_isset(cpu, cpus_hardware_enabled))
1181 return;
1182 cpu_set(cpu, cpus_hardware_enabled);
e9b11c17 1183 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
1184}
1185
1186static void hardware_disable(void *junk)
1187{
1188 int cpu = raw_smp_processor_id();
1189
1190 if (!cpu_isset(cpu, cpus_hardware_enabled))
1191 return;
1192 cpu_clear(cpu, cpus_hardware_enabled);
1193 decache_vcpus_on_cpu(cpu);
e9b11c17 1194 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1195}
1196
774c47f1
AK
1197static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1198 void *v)
1199{
1200 int cpu = (long)v;
1201
1a6f4d7f 1202 val &= ~CPU_TASKS_FROZEN;
774c47f1 1203 switch (val) {
cec9ad27 1204 case CPU_DYING:
6ec8a856
AK
1205 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1206 cpu);
1207 hardware_disable(NULL);
1208 break;
774c47f1 1209 case CPU_UP_CANCELED:
43934a38
JK
1210 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1211 cpu);
1b6c0168 1212 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
774c47f1 1213 break;
43934a38
JK
1214 case CPU_ONLINE:
1215 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1216 cpu);
1b6c0168 1217 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
774c47f1
AK
1218 break;
1219 }
1220 return NOTIFY_OK;
1221}
1222
9a2b85c6 1223static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1224 void *v)
9a2b85c6
RR
1225{
1226 if (val == SYS_RESTART) {
1227 /*
1228 * Some (well, at least mine) BIOSes hang on reboot if
1229 * in vmx root mode.
1230 */
1231 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1232 on_each_cpu(hardware_disable, NULL, 0, 1);
1233 }
1234 return NOTIFY_OK;
1235}
1236
1237static struct notifier_block kvm_reboot_notifier = {
1238 .notifier_call = kvm_reboot,
1239 .priority = 0,
1240};
1241
2eeb2e94
GH
1242void kvm_io_bus_init(struct kvm_io_bus *bus)
1243{
1244 memset(bus, 0, sizeof(*bus));
1245}
1246
1247void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1248{
1249 int i;
1250
1251 for (i = 0; i < bus->dev_count; i++) {
1252 struct kvm_io_device *pos = bus->devs[i];
1253
1254 kvm_iodevice_destructor(pos);
1255 }
1256}
1257
1258struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
1259{
1260 int i;
1261
1262 for (i = 0; i < bus->dev_count; i++) {
1263 struct kvm_io_device *pos = bus->devs[i];
1264
1265 if (pos->in_range(pos, addr))
1266 return pos;
1267 }
1268
1269 return NULL;
1270}
1271
1272void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1273{
1274 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1275
1276 bus->devs[bus->dev_count++] = dev;
1277}
1278
774c47f1
AK
1279static struct notifier_block kvm_cpu_notifier = {
1280 .notifier_call = kvm_cpu_hotplug,
1281 .priority = 20, /* must be > scheduler priority */
1282};
1283
ba1389b7
AK
1284static u64 vm_stat_get(void *_offset)
1285{
1286 unsigned offset = (long)_offset;
1287 u64 total = 0;
1288 struct kvm *kvm;
1289
1290 spin_lock(&kvm_lock);
1291 list_for_each_entry(kvm, &vm_list, vm_list)
1292 total += *(u32 *)((void *)kvm + offset);
1293 spin_unlock(&kvm_lock);
1294 return total;
1295}
1296
1297DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1298
1299static u64 vcpu_stat_get(void *_offset)
1165f5fe
AK
1300{
1301 unsigned offset = (long)_offset;
1302 u64 total = 0;
1303 struct kvm *kvm;
1304 struct kvm_vcpu *vcpu;
1305 int i;
1306
1307 spin_lock(&kvm_lock);
1308 list_for_each_entry(kvm, &vm_list, vm_list)
1309 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
1310 vcpu = kvm->vcpus[i];
1311 if (vcpu)
1312 total += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
1313 }
1314 spin_unlock(&kvm_lock);
1315 return total;
1316}
1317
ba1389b7
AK
1318DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1319
1320static struct file_operations *stat_fops[] = {
1321 [KVM_STAT_VCPU] = &vcpu_stat_fops,
1322 [KVM_STAT_VM] = &vm_stat_fops,
1323};
1165f5fe 1324
a16b043c 1325static void kvm_init_debug(void)
6aa8b732
AK
1326{
1327 struct kvm_stats_debugfs_item *p;
1328
8b6d44c7 1329 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1330 for (p = debugfs_entries; p->name; ++p)
1165f5fe
AK
1331 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
1332 (void *)(long)p->offset,
ba1389b7 1333 stat_fops[p->kind]);
6aa8b732
AK
1334}
1335
1336static void kvm_exit_debug(void)
1337{
1338 struct kvm_stats_debugfs_item *p;
1339
1340 for (p = debugfs_entries; p->name; ++p)
1341 debugfs_remove(p->dentry);
1342 debugfs_remove(debugfs_dir);
1343}
1344
59ae6c6b
AK
1345static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1346{
4267c41a 1347 hardware_disable(NULL);
59ae6c6b
AK
1348 return 0;
1349}
1350
1351static int kvm_resume(struct sys_device *dev)
1352{
4267c41a 1353 hardware_enable(NULL);
59ae6c6b
AK
1354 return 0;
1355}
1356
1357static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 1358 .name = "kvm",
59ae6c6b
AK
1359 .suspend = kvm_suspend,
1360 .resume = kvm_resume,
1361};
1362
1363static struct sys_device kvm_sysdev = {
1364 .id = 0,
1365 .cls = &kvm_sysdev_class,
1366};
1367
cea7bb21 1368struct page *bad_page;
6aa8b732 1369
15ad7146
AK
1370static inline
1371struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1372{
1373 return container_of(pn, struct kvm_vcpu, preempt_notifier);
1374}
1375
1376static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1377{
1378 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1379
e9b11c17 1380 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
1381}
1382
1383static void kvm_sched_out(struct preempt_notifier *pn,
1384 struct task_struct *next)
1385{
1386 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1387
e9b11c17 1388 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
1389}
1390
f8c16bba 1391int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 1392 struct module *module)
6aa8b732
AK
1393{
1394 int r;
002c7f7c 1395 int cpu;
6aa8b732 1396
cb498ea2
ZX
1397 r = kvm_mmu_module_init();
1398 if (r)
1399 goto out4;
1400
1401 kvm_init_debug();
1402
f8c16bba
ZX
1403 r = kvm_arch_init(opaque);
1404 if (r)
1405 goto out4;
cb498ea2
ZX
1406
1407 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1408
1409 if (bad_page == NULL) {
1410 r = -ENOMEM;
1411 goto out;
1412 }
1413
e9b11c17 1414 r = kvm_arch_hardware_setup();
6aa8b732 1415 if (r < 0)
ca45aaae 1416 goto out;
6aa8b732 1417
002c7f7c
YS
1418 for_each_online_cpu(cpu) {
1419 smp_call_function_single(cpu,
e9b11c17 1420 kvm_arch_check_processor_compat,
002c7f7c
YS
1421 &r, 0, 1);
1422 if (r < 0)
1423 goto out_free_0;
1424 }
1425
1b6c0168 1426 on_each_cpu(hardware_enable, NULL, 0, 1);
774c47f1
AK
1427 r = register_cpu_notifier(&kvm_cpu_notifier);
1428 if (r)
1429 goto out_free_1;
6aa8b732
AK
1430 register_reboot_notifier(&kvm_reboot_notifier);
1431
59ae6c6b
AK
1432 r = sysdev_class_register(&kvm_sysdev_class);
1433 if (r)
1434 goto out_free_2;
1435
1436 r = sysdev_register(&kvm_sysdev);
1437 if (r)
1438 goto out_free_3;
1439
c16f862d
RR
1440 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1441 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
1442 __alignof__(struct kvm_vcpu),
1443 0, NULL);
c16f862d
RR
1444 if (!kvm_vcpu_cache) {
1445 r = -ENOMEM;
1446 goto out_free_4;
1447 }
1448
6aa8b732
AK
1449 kvm_chardev_ops.owner = module;
1450
1451 r = misc_register(&kvm_dev);
1452 if (r) {
d77c26fc 1453 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
1454 goto out_free;
1455 }
1456
15ad7146
AK
1457 kvm_preempt_ops.sched_in = kvm_sched_in;
1458 kvm_preempt_ops.sched_out = kvm_sched_out;
1459
c7addb90
AK
1460 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
1461
1462 return 0;
6aa8b732
AK
1463
1464out_free:
c16f862d
RR
1465 kmem_cache_destroy(kvm_vcpu_cache);
1466out_free_4:
59ae6c6b
AK
1467 sysdev_unregister(&kvm_sysdev);
1468out_free_3:
1469 sysdev_class_unregister(&kvm_sysdev_class);
1470out_free_2:
6aa8b732 1471 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1
AK
1472 unregister_cpu_notifier(&kvm_cpu_notifier);
1473out_free_1:
1b6c0168 1474 on_each_cpu(hardware_disable, NULL, 0, 1);
002c7f7c 1475out_free_0:
e9b11c17 1476 kvm_arch_hardware_unsetup();
ca45aaae 1477out:
f8c16bba 1478 kvm_arch_exit();
cb498ea2
ZX
1479 kvm_exit_debug();
1480 kvm_mmu_module_exit();
1481out4:
6aa8b732
AK
1482 return r;
1483}
cb498ea2 1484EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 1485
cb498ea2 1486void kvm_exit(void)
6aa8b732
AK
1487{
1488 misc_deregister(&kvm_dev);
c16f862d 1489 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
1490 sysdev_unregister(&kvm_sysdev);
1491 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 1492 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 1493 unregister_cpu_notifier(&kvm_cpu_notifier);
1b6c0168 1494 on_each_cpu(hardware_disable, NULL, 0, 1);
e9b11c17 1495 kvm_arch_hardware_unsetup();
f8c16bba 1496 kvm_arch_exit();
6aa8b732 1497 kvm_exit_debug();
cea7bb21 1498 __free_page(bad_page);
b5a33a75 1499 kvm_mmu_module_exit();
6aa8b732 1500}
cb498ea2 1501EXPORT_SYMBOL_GPL(kvm_exit);
This page took 0.296088 seconds and 5 git commands to generate.