KVM: Portability: Move address types to their own header file
[deliverable/linux.git] / drivers / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
043405e1 19#include "x86.h"
85f455f7 20#include "irq.h"
6aa8b732
AK
21
22#include <linux/kvm.h>
23#include <linux/module.h>
24#include <linux/errno.h>
6aa8b732
AK
25#include <linux/percpu.h>
26#include <linux/gfp.h>
6aa8b732
AK
27#include <linux/mm.h>
28#include <linux/miscdevice.h>
29#include <linux/vmalloc.h>
6aa8b732 30#include <linux/reboot.h>
6aa8b732
AK
31#include <linux/debugfs.h>
32#include <linux/highmem.h>
33#include <linux/file.h>
59ae6c6b 34#include <linux/sysdev.h>
774c47f1 35#include <linux/cpu.h>
e8edc6e0 36#include <linux/sched.h>
d9e368d6
AK
37#include <linux/cpumask.h>
38#include <linux/smp.h>
d6d28168 39#include <linux/anon_inodes.h>
04d2cc77 40#include <linux/profile.h>
7aa81cc0 41#include <linux/kvm_para.h>
6fc138d2 42#include <linux/pagemap.h>
8d4e1288 43#include <linux/mman.h>
6aa8b732 44
e495606d 45#include <asm/processor.h>
e495606d
AK
46#include <asm/io.h>
47#include <asm/uaccess.h>
48#include <asm/desc.h>
3e021bf5 49#include <asm/pgtable.h>
6aa8b732
AK
50
51MODULE_AUTHOR("Qumranet");
52MODULE_LICENSE("GPL");
53
e9b11c17
ZX
54DEFINE_SPINLOCK(kvm_lock);
55LIST_HEAD(vm_list);
133de902 56
1b6c0168
AK
57static cpumask_t cpus_hardware_enabled;
58
c16f862d
RR
59struct kmem_cache *kvm_vcpu_cache;
60EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 61
15ad7146
AK
62static __read_mostly struct preempt_ops kvm_preempt_ops;
63
6aa8b732
AK
64static struct dentry *debugfs_dir;
65
bccf2150
AK
66static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
67 unsigned long arg);
68
5aacf0ca
JM
69static inline int valid_vcpu(int n)
70{
71 return likely(n >= 0 && n < KVM_MAX_VCPUS);
72}
73
bccf2150
AK
74/*
75 * Switches to specified vcpu, until a matching vcpu_put()
76 */
313a3dc7 77void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 78{
15ad7146
AK
79 int cpu;
80
bccf2150 81 mutex_lock(&vcpu->mutex);
15ad7146
AK
82 cpu = get_cpu();
83 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 84 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 85 put_cpu();
6aa8b732
AK
86}
87
313a3dc7 88void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 89{
15ad7146 90 preempt_disable();
313a3dc7 91 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
92 preempt_notifier_unregister(&vcpu->preempt_notifier);
93 preempt_enable();
6aa8b732
AK
94 mutex_unlock(&vcpu->mutex);
95}
96
d9e368d6
AK
97static void ack_flush(void *_completed)
98{
d9e368d6
AK
99}
100
101void kvm_flush_remote_tlbs(struct kvm *kvm)
102{
49d3bd7e 103 int i, cpu;
d9e368d6
AK
104 cpumask_t cpus;
105 struct kvm_vcpu *vcpu;
d9e368d6 106
d9e368d6 107 cpus_clear(cpus);
fb3f0f51
RR
108 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
109 vcpu = kvm->vcpus[i];
110 if (!vcpu)
111 continue;
3176bc3e 112 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
113 continue;
114 cpu = vcpu->cpu;
115 if (cpu != -1 && cpu != raw_smp_processor_id())
49d3bd7e 116 cpu_set(cpu, cpus);
d9e368d6 117 }
0f74a24c
AK
118 if (cpus_empty(cpus))
119 return;
120 ++kvm->stat.remote_tlb_flush;
49d3bd7e 121 smp_call_function_mask(cpus, ack_flush, NULL, 1);
d9e368d6
AK
122}
123
fb3f0f51
RR
124int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
125{
126 struct page *page;
127 int r;
128
129 mutex_init(&vcpu->mutex);
130 vcpu->cpu = -1;
fb3f0f51
RR
131 vcpu->kvm = kvm;
132 vcpu->vcpu_id = id;
b6958ce4 133 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
134
135 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
136 if (!page) {
137 r = -ENOMEM;
138 goto fail;
139 }
140 vcpu->run = page_address(page);
141
e9b11c17 142 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 143 if (r < 0)
e9b11c17 144 goto fail_free_run;
fb3f0f51
RR
145 return 0;
146
fb3f0f51
RR
147fail_free_run:
148 free_page((unsigned long)vcpu->run);
149fail:
76fafa5e 150 return r;
fb3f0f51
RR
151}
152EXPORT_SYMBOL_GPL(kvm_vcpu_init);
153
154void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
155{
e9b11c17 156 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
157 free_page((unsigned long)vcpu->run);
158}
159EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
160
f17abe9a 161static struct kvm *kvm_create_vm(void)
6aa8b732 162{
d19a9cd2 163 struct kvm *kvm = kvm_arch_create_vm();
6aa8b732 164
d19a9cd2
ZX
165 if (IS_ERR(kvm))
166 goto out;
6aa8b732 167
6d4e4c4f
AK
168 kvm->mm = current->mm;
169 atomic_inc(&kvm->mm->mm_count);
74906345 170 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 171 mutex_init(&kvm->lock);
2eeb2e94 172 kvm_io_bus_init(&kvm->mmio_bus);
5e58cfe4
RR
173 spin_lock(&kvm_lock);
174 list_add(&kvm->vm_list, &vm_list);
175 spin_unlock(&kvm_lock);
d19a9cd2 176out:
f17abe9a
AK
177 return kvm;
178}
179
6aa8b732
AK
180/*
181 * Free any memory in @free but not in @dont.
182 */
183static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
184 struct kvm_memory_slot *dont)
185{
290fc38d
IE
186 if (!dont || free->rmap != dont->rmap)
187 vfree(free->rmap);
6aa8b732
AK
188
189 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
190 vfree(free->dirty_bitmap);
191
6aa8b732 192 free->npages = 0;
8b6d44c7 193 free->dirty_bitmap = NULL;
8d4e1288 194 free->rmap = NULL;
6aa8b732
AK
195}
196
d19a9cd2 197void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
198{
199 int i;
200
201 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 202 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
203}
204
f17abe9a
AK
205static void kvm_destroy_vm(struct kvm *kvm)
206{
6d4e4c4f
AK
207 struct mm_struct *mm = kvm->mm;
208
133de902
AK
209 spin_lock(&kvm_lock);
210 list_del(&kvm->vm_list);
211 spin_unlock(&kvm_lock);
74906345 212 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 213 kvm_io_bus_destroy(&kvm->mmio_bus);
d19a9cd2 214 kvm_arch_destroy_vm(kvm);
6d4e4c4f 215 mmdrop(mm);
f17abe9a
AK
216}
217
218static int kvm_vm_release(struct inode *inode, struct file *filp)
219{
220 struct kvm *kvm = filp->private_data;
221
222 kvm_destroy_vm(kvm);
6aa8b732
AK
223 return 0;
224}
225
6aa8b732
AK
226/*
227 * Allocate some memory and give it an address in the guest physical address
228 * space.
229 *
230 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e
SY
231 *
232 * Must be called holding kvm->lock.
6aa8b732 233 */
f78e0e2e
SY
234int __kvm_set_memory_region(struct kvm *kvm,
235 struct kvm_userspace_memory_region *mem,
236 int user_alloc)
6aa8b732
AK
237{
238 int r;
239 gfn_t base_gfn;
240 unsigned long npages;
241 unsigned long i;
242 struct kvm_memory_slot *memslot;
243 struct kvm_memory_slot old, new;
6aa8b732
AK
244
245 r = -EINVAL;
246 /* General sanity checks */
247 if (mem->memory_size & (PAGE_SIZE - 1))
248 goto out;
249 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
250 goto out;
e0d62c7f 251 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
252 goto out;
253 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
254 goto out;
255
256 memslot = &kvm->memslots[mem->slot];
257 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
258 npages = mem->memory_size >> PAGE_SHIFT;
259
260 if (!npages)
261 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
262
6aa8b732
AK
263 new = old = *memslot;
264
265 new.base_gfn = base_gfn;
266 new.npages = npages;
267 new.flags = mem->flags;
268
269 /* Disallow changing a memory slot's size. */
270 r = -EINVAL;
271 if (npages && old.npages && npages != old.npages)
f78e0e2e 272 goto out_free;
6aa8b732
AK
273
274 /* Check for overlaps */
275 r = -EEXIST;
276 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
277 struct kvm_memory_slot *s = &kvm->memslots[i];
278
279 if (s == memslot)
280 continue;
281 if (!((base_gfn + npages <= s->base_gfn) ||
282 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 283 goto out_free;
6aa8b732 284 }
6aa8b732 285
6aa8b732
AK
286 /* Free page dirty bitmap if unneeded */
287 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 288 new.dirty_bitmap = NULL;
6aa8b732
AK
289
290 r = -ENOMEM;
291
292 /* Allocate if a slot is being created */
8d4e1288 293 if (npages && !new.rmap) {
d77c26fc 294 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
295
296 if (!new.rmap)
f78e0e2e 297 goto out_free;
290fc38d 298
290fc38d 299 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 300
80b14b5b 301 new.user_alloc = user_alloc;
0de10343 302 new.userspace_addr = mem->userspace_addr;
6aa8b732
AK
303 }
304
305 /* Allocate page dirty bitmap if needed */
306 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
307 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
308
309 new.dirty_bitmap = vmalloc(dirty_bytes);
310 if (!new.dirty_bitmap)
f78e0e2e 311 goto out_free;
6aa8b732
AK
312 memset(new.dirty_bitmap, 0, dirty_bytes);
313 }
314
6aa8b732
AK
315 if (mem->slot >= kvm->nmemslots)
316 kvm->nmemslots = mem->slot + 1;
317
3ad82a7e
ZX
318 *memslot = new;
319
0de10343
ZX
320 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
321 if (r) {
322 *memslot = old;
323 goto out_free;
82ce2c96
IE
324 }
325
6aa8b732
AK
326 kvm_free_physmem_slot(&old, &new);
327 return 0;
328
f78e0e2e 329out_free:
6aa8b732
AK
330 kvm_free_physmem_slot(&new, &old);
331out:
332 return r;
210c7c4d
IE
333
334}
f78e0e2e
SY
335EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
336
337int kvm_set_memory_region(struct kvm *kvm,
338 struct kvm_userspace_memory_region *mem,
339 int user_alloc)
340{
341 int r;
342
343 mutex_lock(&kvm->lock);
344 r = __kvm_set_memory_region(kvm, mem, user_alloc);
345 mutex_unlock(&kvm->lock);
346 return r;
347}
210c7c4d
IE
348EXPORT_SYMBOL_GPL(kvm_set_memory_region);
349
1fe779f8
CO
350int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
351 struct
352 kvm_userspace_memory_region *mem,
353 int user_alloc)
210c7c4d 354{
e0d62c7f
IE
355 if (mem->slot >= KVM_MEMORY_SLOTS)
356 return -EINVAL;
210c7c4d 357 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
358}
359
5bb064dc
ZX
360int kvm_get_dirty_log(struct kvm *kvm,
361 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
362{
363 struct kvm_memory_slot *memslot;
364 int r, i;
365 int n;
366 unsigned long any = 0;
367
6aa8b732
AK
368 r = -EINVAL;
369 if (log->slot >= KVM_MEMORY_SLOTS)
370 goto out;
371
372 memslot = &kvm->memslots[log->slot];
373 r = -ENOENT;
374 if (!memslot->dirty_bitmap)
375 goto out;
376
cd1a4a98 377 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 378
cd1a4a98 379 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
380 any = memslot->dirty_bitmap[i];
381
382 r = -EFAULT;
383 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
384 goto out;
385
5bb064dc
ZX
386 if (any)
387 *is_dirty = 1;
6aa8b732
AK
388
389 r = 0;
6aa8b732 390out:
6aa8b732
AK
391 return r;
392}
393
cea7bb21
IE
394int is_error_page(struct page *page)
395{
396 return page == bad_page;
397}
398EXPORT_SYMBOL_GPL(is_error_page);
399
f9d46eb0
IE
400static inline unsigned long bad_hva(void)
401{
402 return PAGE_OFFSET;
403}
404
405int kvm_is_error_hva(unsigned long addr)
406{
407 return addr == bad_hva();
408}
409EXPORT_SYMBOL_GPL(kvm_is_error_hva);
410
e8207547 411static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
412{
413 int i;
414
415 for (i = 0; i < kvm->nmemslots; ++i) {
416 struct kvm_memory_slot *memslot = &kvm->memslots[i];
417
418 if (gfn >= memslot->base_gfn
419 && gfn < memslot->base_gfn + memslot->npages)
420 return memslot;
421 }
8b6d44c7 422 return NULL;
6aa8b732 423}
e8207547
AK
424
425struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
426{
427 gfn = unalias_gfn(kvm, gfn);
428 return __gfn_to_memslot(kvm, gfn);
429}
6aa8b732 430
e0d62c7f
IE
431int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
432{
433 int i;
434
435 gfn = unalias_gfn(kvm, gfn);
436 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
437 struct kvm_memory_slot *memslot = &kvm->memslots[i];
438
439 if (gfn >= memslot->base_gfn
440 && gfn < memslot->base_gfn + memslot->npages)
441 return 1;
442 }
443 return 0;
444}
445EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
446
539cb660
IE
447static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
448{
449 struct kvm_memory_slot *slot;
450
451 gfn = unalias_gfn(kvm, gfn);
452 slot = __gfn_to_memslot(kvm, gfn);
453 if (!slot)
454 return bad_hva();
455 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
456}
457
aab61cc0
AL
458/*
459 * Requires current->mm->mmap_sem to be held
460 */
461static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
954bbbc2 462{
8d4e1288 463 struct page *page[1];
539cb660 464 unsigned long addr;
8d4e1288 465 int npages;
954bbbc2 466
60395224
AK
467 might_sleep();
468
539cb660
IE
469 addr = gfn_to_hva(kvm, gfn);
470 if (kvm_is_error_hva(addr)) {
8a7ae055 471 get_page(bad_page);
cea7bb21 472 return bad_page;
8a7ae055 473 }
8d4e1288 474
539cb660
IE
475 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
476 NULL);
477
8d4e1288
AL
478 if (npages != 1) {
479 get_page(bad_page);
480 return bad_page;
8a7ae055 481 }
8d4e1288
AL
482
483 return page[0];
954bbbc2 484}
aab61cc0
AL
485
486struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
487{
488 struct page *page;
489
490 down_read(&current->mm->mmap_sem);
491 page = __gfn_to_page(kvm, gfn);
492 up_read(&current->mm->mmap_sem);
493
494 return page;
495}
496
954bbbc2
AK
497EXPORT_SYMBOL_GPL(gfn_to_page);
498
b4231d61
IE
499void kvm_release_page_clean(struct page *page)
500{
501 put_page(page);
502}
503EXPORT_SYMBOL_GPL(kvm_release_page_clean);
504
505void kvm_release_page_dirty(struct page *page)
8a7ae055
IE
506{
507 if (!PageReserved(page))
508 SetPageDirty(page);
509 put_page(page);
510}
b4231d61 511EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
8a7ae055 512
195aefde
IE
513static int next_segment(unsigned long len, int offset)
514{
515 if (len > PAGE_SIZE - offset)
516 return PAGE_SIZE - offset;
517 else
518 return len;
519}
520
521int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
522 int len)
523{
e0506bcb
IE
524 int r;
525 unsigned long addr;
195aefde 526
e0506bcb
IE
527 addr = gfn_to_hva(kvm, gfn);
528 if (kvm_is_error_hva(addr))
529 return -EFAULT;
530 r = copy_from_user(data, (void __user *)addr + offset, len);
531 if (r)
195aefde 532 return -EFAULT;
195aefde
IE
533 return 0;
534}
535EXPORT_SYMBOL_GPL(kvm_read_guest_page);
536
537int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
538{
539 gfn_t gfn = gpa >> PAGE_SHIFT;
540 int seg;
541 int offset = offset_in_page(gpa);
542 int ret;
543
544 while ((seg = next_segment(len, offset)) != 0) {
545 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
546 if (ret < 0)
547 return ret;
548 offset = 0;
549 len -= seg;
550 data += seg;
551 ++gfn;
552 }
553 return 0;
554}
555EXPORT_SYMBOL_GPL(kvm_read_guest);
556
557int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
558 int offset, int len)
559{
e0506bcb
IE
560 int r;
561 unsigned long addr;
195aefde 562
e0506bcb
IE
563 addr = gfn_to_hva(kvm, gfn);
564 if (kvm_is_error_hva(addr))
565 return -EFAULT;
566 r = copy_to_user((void __user *)addr + offset, data, len);
567 if (r)
195aefde 568 return -EFAULT;
195aefde
IE
569 mark_page_dirty(kvm, gfn);
570 return 0;
571}
572EXPORT_SYMBOL_GPL(kvm_write_guest_page);
573
574int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
575 unsigned long len)
576{
577 gfn_t gfn = gpa >> PAGE_SHIFT;
578 int seg;
579 int offset = offset_in_page(gpa);
580 int ret;
581
582 while ((seg = next_segment(len, offset)) != 0) {
583 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
584 if (ret < 0)
585 return ret;
586 offset = 0;
587 len -= seg;
588 data += seg;
589 ++gfn;
590 }
591 return 0;
592}
593
594int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
595{
3e021bf5 596 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
597}
598EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
599
600int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
601{
602 gfn_t gfn = gpa >> PAGE_SHIFT;
603 int seg;
604 int offset = offset_in_page(gpa);
605 int ret;
606
607 while ((seg = next_segment(len, offset)) != 0) {
608 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
609 if (ret < 0)
610 return ret;
611 offset = 0;
612 len -= seg;
613 ++gfn;
614 }
615 return 0;
616}
617EXPORT_SYMBOL_GPL(kvm_clear_guest);
618
6aa8b732
AK
619void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
620{
31389947 621 struct kvm_memory_slot *memslot;
6aa8b732 622
3b6fff19 623 gfn = unalias_gfn(kvm, gfn);
7e9d619d
RR
624 memslot = __gfn_to_memslot(kvm, gfn);
625 if (memslot && memslot->dirty_bitmap) {
626 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 627
7e9d619d
RR
628 /* avoid RMW */
629 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
630 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
631 }
632}
633
b6958ce4
ED
634/*
635 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
636 */
8776e519 637void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 638{
b6958ce4
ED
639 DECLARE_WAITQUEUE(wait, current);
640
641 add_wait_queue(&vcpu->wq, &wait);
642
643 /*
644 * We will block until either an interrupt or a signal wakes us up
645 */
c5ec1534
HQ
646 while (!kvm_cpu_has_interrupt(vcpu)
647 && !signal_pending(current)
648 && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
649 && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
b6958ce4
ED
650 set_current_state(TASK_INTERRUPTIBLE);
651 vcpu_put(vcpu);
652 schedule();
653 vcpu_load(vcpu);
654 }
d3bef15f 655
c5ec1534 656 __set_current_state(TASK_RUNNING);
b6958ce4 657 remove_wait_queue(&vcpu->wq, &wait);
b6958ce4
ED
658}
659
6aa8b732
AK
660void kvm_resched(struct kvm_vcpu *vcpu)
661{
3fca0365
YD
662 if (!need_resched())
663 return;
6aa8b732 664 cond_resched();
6aa8b732
AK
665}
666EXPORT_SYMBOL_GPL(kvm_resched);
667
9a2bb7f4
AK
668static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
669 unsigned long address,
670 int *type)
671{
672 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
673 unsigned long pgoff;
674 struct page *page;
675
9a2bb7f4 676 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
039576c0
AK
677 if (pgoff == 0)
678 page = virt_to_page(vcpu->run);
679 else if (pgoff == KVM_PIO_PAGE_OFFSET)
680 page = virt_to_page(vcpu->pio_data);
681 else
9a2bb7f4 682 return NOPAGE_SIGBUS;
9a2bb7f4 683 get_page(page);
cd0d9137
NAQ
684 if (type != NULL)
685 *type = VM_FAULT_MINOR;
686
9a2bb7f4
AK
687 return page;
688}
689
690static struct vm_operations_struct kvm_vcpu_vm_ops = {
691 .nopage = kvm_vcpu_nopage,
692};
693
694static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
695{
696 vma->vm_ops = &kvm_vcpu_vm_ops;
697 return 0;
698}
699
bccf2150
AK
700static int kvm_vcpu_release(struct inode *inode, struct file *filp)
701{
702 struct kvm_vcpu *vcpu = filp->private_data;
703
704 fput(vcpu->kvm->filp);
705 return 0;
706}
707
708static struct file_operations kvm_vcpu_fops = {
709 .release = kvm_vcpu_release,
710 .unlocked_ioctl = kvm_vcpu_ioctl,
711 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 712 .mmap = kvm_vcpu_mmap,
bccf2150
AK
713};
714
715/*
716 * Allocates an inode for the vcpu.
717 */
718static int create_vcpu_fd(struct kvm_vcpu *vcpu)
719{
720 int fd, r;
721 struct inode *inode;
722 struct file *file;
723
d6d28168
AK
724 r = anon_inode_getfd(&fd, &inode, &file,
725 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
726 if (r)
727 return r;
bccf2150 728 atomic_inc(&vcpu->kvm->filp->f_count);
bccf2150 729 return fd;
bccf2150
AK
730}
731
c5ea7660
AK
732/*
733 * Creates some virtual cpus. Good luck creating more than one.
734 */
735static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
736{
737 int r;
738 struct kvm_vcpu *vcpu;
739
c5ea7660 740 if (!valid_vcpu(n))
fb3f0f51 741 return -EINVAL;
c5ea7660 742
e9b11c17 743 vcpu = kvm_arch_vcpu_create(kvm, n);
fb3f0f51
RR
744 if (IS_ERR(vcpu))
745 return PTR_ERR(vcpu);
c5ea7660 746
15ad7146
AK
747 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
748
26e5215f
AK
749 r = kvm_arch_vcpu_setup(vcpu);
750 if (r)
751 goto vcpu_destroy;
752
11ec2804 753 mutex_lock(&kvm->lock);
fb3f0f51
RR
754 if (kvm->vcpus[n]) {
755 r = -EEXIST;
11ec2804 756 mutex_unlock(&kvm->lock);
e9b11c17 757 goto vcpu_destroy;
fb3f0f51
RR
758 }
759 kvm->vcpus[n] = vcpu;
11ec2804 760 mutex_unlock(&kvm->lock);
c5ea7660 761
fb3f0f51 762 /* Now it's all set up, let userspace reach it */
bccf2150
AK
763 r = create_vcpu_fd(vcpu);
764 if (r < 0)
fb3f0f51
RR
765 goto unlink;
766 return r;
39c3b86e 767
fb3f0f51 768unlink:
11ec2804 769 mutex_lock(&kvm->lock);
fb3f0f51 770 kvm->vcpus[n] = NULL;
11ec2804 771 mutex_unlock(&kvm->lock);
e9b11c17 772vcpu_destroy:
d40ccc62 773 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
774 return r;
775}
776
1961d276
AK
777static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
778{
779 if (sigset) {
780 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
781 vcpu->sigset_active = 1;
782 vcpu->sigset = *sigset;
783 } else
784 vcpu->sigset_active = 0;
785 return 0;
786}
787
bccf2150
AK
788static long kvm_vcpu_ioctl(struct file *filp,
789 unsigned int ioctl, unsigned long arg)
6aa8b732 790{
bccf2150 791 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 792 void __user *argp = (void __user *)arg;
313a3dc7 793 int r;
6aa8b732 794
6d4e4c4f
AK
795 if (vcpu->kvm->mm != current->mm)
796 return -EIO;
6aa8b732 797 switch (ioctl) {
9a2bb7f4 798 case KVM_RUN:
f0fe5108
AK
799 r = -EINVAL;
800 if (arg)
801 goto out;
b6c7a5dc 802 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 803 break;
6aa8b732
AK
804 case KVM_GET_REGS: {
805 struct kvm_regs kvm_regs;
806
bccf2150 807 memset(&kvm_regs, 0, sizeof kvm_regs);
b6c7a5dc 808 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
6aa8b732
AK
809 if (r)
810 goto out;
811 r = -EFAULT;
2f366987 812 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
6aa8b732
AK
813 goto out;
814 r = 0;
815 break;
816 }
817 case KVM_SET_REGS: {
818 struct kvm_regs kvm_regs;
819
820 r = -EFAULT;
2f366987 821 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732 822 goto out;
b6c7a5dc 823 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
6aa8b732
AK
824 if (r)
825 goto out;
826 r = 0;
827 break;
828 }
829 case KVM_GET_SREGS: {
830 struct kvm_sregs kvm_sregs;
831
bccf2150 832 memset(&kvm_sregs, 0, sizeof kvm_sregs);
b6c7a5dc 833 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
834 if (r)
835 goto out;
836 r = -EFAULT;
2f366987 837 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
838 goto out;
839 r = 0;
840 break;
841 }
842 case KVM_SET_SREGS: {
843 struct kvm_sregs kvm_sregs;
844
845 r = -EFAULT;
2f366987 846 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 847 goto out;
b6c7a5dc 848 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
849 if (r)
850 goto out;
851 r = 0;
852 break;
853 }
854 case KVM_TRANSLATE: {
855 struct kvm_translation tr;
856
857 r = -EFAULT;
2f366987 858 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 859 goto out;
8b006791 860 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
861 if (r)
862 goto out;
863 r = -EFAULT;
2f366987 864 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
865 goto out;
866 r = 0;
867 break;
868 }
6aa8b732
AK
869 case KVM_DEBUG_GUEST: {
870 struct kvm_debug_guest dbg;
871
872 r = -EFAULT;
2f366987 873 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 874 goto out;
b6c7a5dc 875 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
876 if (r)
877 goto out;
878 r = 0;
879 break;
880 }
1961d276
AK
881 case KVM_SET_SIGNAL_MASK: {
882 struct kvm_signal_mask __user *sigmask_arg = argp;
883 struct kvm_signal_mask kvm_sigmask;
884 sigset_t sigset, *p;
885
886 p = NULL;
887 if (argp) {
888 r = -EFAULT;
889 if (copy_from_user(&kvm_sigmask, argp,
890 sizeof kvm_sigmask))
891 goto out;
892 r = -EINVAL;
893 if (kvm_sigmask.len != sizeof sigset)
894 goto out;
895 r = -EFAULT;
896 if (copy_from_user(&sigset, sigmask_arg->sigset,
897 sizeof sigset))
898 goto out;
899 p = &sigset;
900 }
901 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
902 break;
903 }
b8836737
AK
904 case KVM_GET_FPU: {
905 struct kvm_fpu fpu;
906
907 memset(&fpu, 0, sizeof fpu);
d0752060 908 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
b8836737
AK
909 if (r)
910 goto out;
911 r = -EFAULT;
912 if (copy_to_user(argp, &fpu, sizeof fpu))
913 goto out;
914 r = 0;
915 break;
916 }
917 case KVM_SET_FPU: {
918 struct kvm_fpu fpu;
919
920 r = -EFAULT;
921 if (copy_from_user(&fpu, argp, sizeof fpu))
922 goto out;
d0752060 923 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
b8836737
AK
924 if (r)
925 goto out;
926 r = 0;
927 break;
928 }
bccf2150 929 default:
313a3dc7 930 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
931 }
932out:
933 return r;
934}
935
936static long kvm_vm_ioctl(struct file *filp,
937 unsigned int ioctl, unsigned long arg)
938{
939 struct kvm *kvm = filp->private_data;
940 void __user *argp = (void __user *)arg;
1fe779f8 941 int r;
bccf2150 942
6d4e4c4f
AK
943 if (kvm->mm != current->mm)
944 return -EIO;
bccf2150
AK
945 switch (ioctl) {
946 case KVM_CREATE_VCPU:
947 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
948 if (r < 0)
949 goto out;
950 break;
6fc138d2
IE
951 case KVM_SET_USER_MEMORY_REGION: {
952 struct kvm_userspace_memory_region kvm_userspace_mem;
953
954 r = -EFAULT;
955 if (copy_from_user(&kvm_userspace_mem, argp,
956 sizeof kvm_userspace_mem))
957 goto out;
958
959 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
960 if (r)
961 goto out;
962 break;
963 }
964 case KVM_GET_DIRTY_LOG: {
965 struct kvm_dirty_log log;
966
967 r = -EFAULT;
2f366987 968 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 969 goto out;
2c6f5df9 970 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
971 if (r)
972 goto out;
973 break;
974 }
f17abe9a 975 default:
1fe779f8 976 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
977 }
978out:
979 return r;
980}
981
982static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
983 unsigned long address,
984 int *type)
985{
986 struct kvm *kvm = vma->vm_file->private_data;
987 unsigned long pgoff;
f17abe9a
AK
988 struct page *page;
989
f17abe9a 990 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
e0d62c7f
IE
991 if (!kvm_is_visible_gfn(kvm, pgoff))
992 return NOPAGE_SIGBUS;
aab61cc0
AL
993 /* current->mm->mmap_sem is already held so call lockless version */
994 page = __gfn_to_page(kvm, pgoff);
8a7ae055 995 if (is_error_page(page)) {
b4231d61 996 kvm_release_page_clean(page);
f17abe9a 997 return NOPAGE_SIGBUS;
8a7ae055 998 }
cd0d9137
NAQ
999 if (type != NULL)
1000 *type = VM_FAULT_MINOR;
1001
f17abe9a
AK
1002 return page;
1003}
1004
1005static struct vm_operations_struct kvm_vm_vm_ops = {
1006 .nopage = kvm_vm_nopage,
1007};
1008
1009static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1010{
1011 vma->vm_ops = &kvm_vm_vm_ops;
1012 return 0;
1013}
1014
1015static struct file_operations kvm_vm_fops = {
1016 .release = kvm_vm_release,
1017 .unlocked_ioctl = kvm_vm_ioctl,
1018 .compat_ioctl = kvm_vm_ioctl,
1019 .mmap = kvm_vm_mmap,
1020};
1021
1022static int kvm_dev_ioctl_create_vm(void)
1023{
1024 int fd, r;
1025 struct inode *inode;
1026 struct file *file;
1027 struct kvm *kvm;
1028
f17abe9a 1029 kvm = kvm_create_vm();
d6d28168
AK
1030 if (IS_ERR(kvm))
1031 return PTR_ERR(kvm);
1032 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
1033 if (r) {
1034 kvm_destroy_vm(kvm);
1035 return r;
f17abe9a
AK
1036 }
1037
bccf2150 1038 kvm->filp = file;
f17abe9a 1039
f17abe9a 1040 return fd;
f17abe9a
AK
1041}
1042
1043static long kvm_dev_ioctl(struct file *filp,
1044 unsigned int ioctl, unsigned long arg)
1045{
1046 void __user *argp = (void __user *)arg;
07c45a36 1047 long r = -EINVAL;
f17abe9a
AK
1048
1049 switch (ioctl) {
1050 case KVM_GET_API_VERSION:
f0fe5108
AK
1051 r = -EINVAL;
1052 if (arg)
1053 goto out;
f17abe9a
AK
1054 r = KVM_API_VERSION;
1055 break;
1056 case KVM_CREATE_VM:
f0fe5108
AK
1057 r = -EINVAL;
1058 if (arg)
1059 goto out;
f17abe9a
AK
1060 r = kvm_dev_ioctl_create_vm();
1061 break;
018d00d2
ZX
1062 case KVM_CHECK_EXTENSION:
1063 r = kvm_dev_ioctl_check_extension((long)argp);
5d308f45 1064 break;
07c45a36
AK
1065 case KVM_GET_VCPU_MMAP_SIZE:
1066 r = -EINVAL;
1067 if (arg)
1068 goto out;
039576c0 1069 r = 2 * PAGE_SIZE;
07c45a36 1070 break;
6aa8b732 1071 default:
043405e1 1072 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1073 }
1074out:
1075 return r;
1076}
1077
6aa8b732 1078static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1079 .unlocked_ioctl = kvm_dev_ioctl,
1080 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1081};
1082
1083static struct miscdevice kvm_dev = {
bbe4432e 1084 KVM_MINOR,
6aa8b732
AK
1085 "kvm",
1086 &kvm_chardev_ops,
1087};
1088
1b6c0168
AK
1089static void hardware_enable(void *junk)
1090{
1091 int cpu = raw_smp_processor_id();
1092
1093 if (cpu_isset(cpu, cpus_hardware_enabled))
1094 return;
1095 cpu_set(cpu, cpus_hardware_enabled);
e9b11c17 1096 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
1097}
1098
1099static void hardware_disable(void *junk)
1100{
1101 int cpu = raw_smp_processor_id();
1102
1103 if (!cpu_isset(cpu, cpus_hardware_enabled))
1104 return;
1105 cpu_clear(cpu, cpus_hardware_enabled);
1106 decache_vcpus_on_cpu(cpu);
e9b11c17 1107 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1108}
1109
774c47f1
AK
1110static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1111 void *v)
1112{
1113 int cpu = (long)v;
1114
1a6f4d7f 1115 val &= ~CPU_TASKS_FROZEN;
774c47f1 1116 switch (val) {
cec9ad27 1117 case CPU_DYING:
6ec8a856
AK
1118 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1119 cpu);
1120 hardware_disable(NULL);
1121 break;
774c47f1 1122 case CPU_UP_CANCELED:
43934a38
JK
1123 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1124 cpu);
1b6c0168 1125 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
774c47f1 1126 break;
43934a38
JK
1127 case CPU_ONLINE:
1128 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1129 cpu);
1b6c0168 1130 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
774c47f1
AK
1131 break;
1132 }
1133 return NOTIFY_OK;
1134}
1135
9a2b85c6 1136static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1137 void *v)
9a2b85c6
RR
1138{
1139 if (val == SYS_RESTART) {
1140 /*
1141 * Some (well, at least mine) BIOSes hang on reboot if
1142 * in vmx root mode.
1143 */
1144 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1145 on_each_cpu(hardware_disable, NULL, 0, 1);
1146 }
1147 return NOTIFY_OK;
1148}
1149
1150static struct notifier_block kvm_reboot_notifier = {
1151 .notifier_call = kvm_reboot,
1152 .priority = 0,
1153};
1154
2eeb2e94
GH
1155void kvm_io_bus_init(struct kvm_io_bus *bus)
1156{
1157 memset(bus, 0, sizeof(*bus));
1158}
1159
1160void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1161{
1162 int i;
1163
1164 for (i = 0; i < bus->dev_count; i++) {
1165 struct kvm_io_device *pos = bus->devs[i];
1166
1167 kvm_iodevice_destructor(pos);
1168 }
1169}
1170
1171struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
1172{
1173 int i;
1174
1175 for (i = 0; i < bus->dev_count; i++) {
1176 struct kvm_io_device *pos = bus->devs[i];
1177
1178 if (pos->in_range(pos, addr))
1179 return pos;
1180 }
1181
1182 return NULL;
1183}
1184
1185void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1186{
1187 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1188
1189 bus->devs[bus->dev_count++] = dev;
1190}
1191
774c47f1
AK
1192static struct notifier_block kvm_cpu_notifier = {
1193 .notifier_call = kvm_cpu_hotplug,
1194 .priority = 20, /* must be > scheduler priority */
1195};
1196
ba1389b7
AK
1197static u64 vm_stat_get(void *_offset)
1198{
1199 unsigned offset = (long)_offset;
1200 u64 total = 0;
1201 struct kvm *kvm;
1202
1203 spin_lock(&kvm_lock);
1204 list_for_each_entry(kvm, &vm_list, vm_list)
1205 total += *(u32 *)((void *)kvm + offset);
1206 spin_unlock(&kvm_lock);
1207 return total;
1208}
1209
1210DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1211
1212static u64 vcpu_stat_get(void *_offset)
1165f5fe
AK
1213{
1214 unsigned offset = (long)_offset;
1215 u64 total = 0;
1216 struct kvm *kvm;
1217 struct kvm_vcpu *vcpu;
1218 int i;
1219
1220 spin_lock(&kvm_lock);
1221 list_for_each_entry(kvm, &vm_list, vm_list)
1222 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
1223 vcpu = kvm->vcpus[i];
1224 if (vcpu)
1225 total += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
1226 }
1227 spin_unlock(&kvm_lock);
1228 return total;
1229}
1230
ba1389b7
AK
1231DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1232
1233static struct file_operations *stat_fops[] = {
1234 [KVM_STAT_VCPU] = &vcpu_stat_fops,
1235 [KVM_STAT_VM] = &vm_stat_fops,
1236};
1165f5fe 1237
a16b043c 1238static void kvm_init_debug(void)
6aa8b732
AK
1239{
1240 struct kvm_stats_debugfs_item *p;
1241
8b6d44c7 1242 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1243 for (p = debugfs_entries; p->name; ++p)
1165f5fe
AK
1244 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
1245 (void *)(long)p->offset,
ba1389b7 1246 stat_fops[p->kind]);
6aa8b732
AK
1247}
1248
1249static void kvm_exit_debug(void)
1250{
1251 struct kvm_stats_debugfs_item *p;
1252
1253 for (p = debugfs_entries; p->name; ++p)
1254 debugfs_remove(p->dentry);
1255 debugfs_remove(debugfs_dir);
1256}
1257
59ae6c6b
AK
1258static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1259{
4267c41a 1260 hardware_disable(NULL);
59ae6c6b
AK
1261 return 0;
1262}
1263
1264static int kvm_resume(struct sys_device *dev)
1265{
4267c41a 1266 hardware_enable(NULL);
59ae6c6b
AK
1267 return 0;
1268}
1269
1270static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 1271 .name = "kvm",
59ae6c6b
AK
1272 .suspend = kvm_suspend,
1273 .resume = kvm_resume,
1274};
1275
1276static struct sys_device kvm_sysdev = {
1277 .id = 0,
1278 .cls = &kvm_sysdev_class,
1279};
1280
cea7bb21 1281struct page *bad_page;
6aa8b732 1282
15ad7146
AK
1283static inline
1284struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1285{
1286 return container_of(pn, struct kvm_vcpu, preempt_notifier);
1287}
1288
1289static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1290{
1291 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1292
e9b11c17 1293 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
1294}
1295
1296static void kvm_sched_out(struct preempt_notifier *pn,
1297 struct task_struct *next)
1298{
1299 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1300
e9b11c17 1301 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
1302}
1303
f8c16bba 1304int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 1305 struct module *module)
6aa8b732
AK
1306{
1307 int r;
002c7f7c 1308 int cpu;
6aa8b732 1309
cb498ea2
ZX
1310 kvm_init_debug();
1311
f8c16bba
ZX
1312 r = kvm_arch_init(opaque);
1313 if (r)
d2308784 1314 goto out_fail;
cb498ea2
ZX
1315
1316 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1317
1318 if (bad_page == NULL) {
1319 r = -ENOMEM;
1320 goto out;
1321 }
1322
e9b11c17 1323 r = kvm_arch_hardware_setup();
6aa8b732 1324 if (r < 0)
d2308784 1325 goto out_free_0;
6aa8b732 1326
002c7f7c
YS
1327 for_each_online_cpu(cpu) {
1328 smp_call_function_single(cpu,
e9b11c17 1329 kvm_arch_check_processor_compat,
002c7f7c
YS
1330 &r, 0, 1);
1331 if (r < 0)
d2308784 1332 goto out_free_1;
002c7f7c
YS
1333 }
1334
1b6c0168 1335 on_each_cpu(hardware_enable, NULL, 0, 1);
774c47f1
AK
1336 r = register_cpu_notifier(&kvm_cpu_notifier);
1337 if (r)
d2308784 1338 goto out_free_2;
6aa8b732
AK
1339 register_reboot_notifier(&kvm_reboot_notifier);
1340
59ae6c6b
AK
1341 r = sysdev_class_register(&kvm_sysdev_class);
1342 if (r)
d2308784 1343 goto out_free_3;
59ae6c6b
AK
1344
1345 r = sysdev_register(&kvm_sysdev);
1346 if (r)
d2308784 1347 goto out_free_4;
59ae6c6b 1348
c16f862d
RR
1349 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1350 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
1351 __alignof__(struct kvm_vcpu),
1352 0, NULL);
c16f862d
RR
1353 if (!kvm_vcpu_cache) {
1354 r = -ENOMEM;
d2308784 1355 goto out_free_5;
c16f862d
RR
1356 }
1357
6aa8b732
AK
1358 kvm_chardev_ops.owner = module;
1359
1360 r = misc_register(&kvm_dev);
1361 if (r) {
d77c26fc 1362 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
1363 goto out_free;
1364 }
1365
15ad7146
AK
1366 kvm_preempt_ops.sched_in = kvm_sched_in;
1367 kvm_preempt_ops.sched_out = kvm_sched_out;
1368
c7addb90 1369 return 0;
6aa8b732
AK
1370
1371out_free:
c16f862d 1372 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 1373out_free_5:
59ae6c6b 1374 sysdev_unregister(&kvm_sysdev);
d2308784 1375out_free_4:
59ae6c6b 1376 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 1377out_free_3:
6aa8b732 1378 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 1379 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 1380out_free_2:
1b6c0168 1381 on_each_cpu(hardware_disable, NULL, 0, 1);
d2308784 1382out_free_1:
e9b11c17 1383 kvm_arch_hardware_unsetup();
d2308784
ZX
1384out_free_0:
1385 __free_page(bad_page);
ca45aaae 1386out:
f8c16bba 1387 kvm_arch_exit();
cb498ea2 1388 kvm_exit_debug();
d2308784 1389out_fail:
6aa8b732
AK
1390 return r;
1391}
cb498ea2 1392EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 1393
cb498ea2 1394void kvm_exit(void)
6aa8b732
AK
1395{
1396 misc_deregister(&kvm_dev);
c16f862d 1397 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
1398 sysdev_unregister(&kvm_sysdev);
1399 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 1400 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 1401 unregister_cpu_notifier(&kvm_cpu_notifier);
1b6c0168 1402 on_each_cpu(hardware_disable, NULL, 0, 1);
e9b11c17 1403 kvm_arch_hardware_unsetup();
f8c16bba 1404 kvm_arch_exit();
6aa8b732 1405 kvm_exit_debug();
cea7bb21 1406 __free_page(bad_page);
6aa8b732 1407}
cb498ea2 1408EXPORT_SYMBOL_GPL(kvm_exit);
This page took 0.268903 seconds and 5 git commands to generate.