KVM: Portability: Make kvm_vcpu_ioctl_translate arch dependent
[deliverable/linux.git] / drivers / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
043405e1 19#include "x86.h"
85f455f7 20#include "irq.h"
6aa8b732
AK
21
22#include <linux/kvm.h>
23#include <linux/module.h>
24#include <linux/errno.h>
6aa8b732
AK
25#include <linux/percpu.h>
26#include <linux/gfp.h>
6aa8b732
AK
27#include <linux/mm.h>
28#include <linux/miscdevice.h>
29#include <linux/vmalloc.h>
6aa8b732 30#include <linux/reboot.h>
6aa8b732
AK
31#include <linux/debugfs.h>
32#include <linux/highmem.h>
33#include <linux/file.h>
59ae6c6b 34#include <linux/sysdev.h>
774c47f1 35#include <linux/cpu.h>
e8edc6e0 36#include <linux/sched.h>
d9e368d6
AK
37#include <linux/cpumask.h>
38#include <linux/smp.h>
d6d28168 39#include <linux/anon_inodes.h>
04d2cc77 40#include <linux/profile.h>
7aa81cc0 41#include <linux/kvm_para.h>
6fc138d2 42#include <linux/pagemap.h>
8d4e1288 43#include <linux/mman.h>
6aa8b732 44
e495606d 45#include <asm/processor.h>
e495606d
AK
46#include <asm/io.h>
47#include <asm/uaccess.h>
48#include <asm/desc.h>
6aa8b732
AK
49
50MODULE_AUTHOR("Qumranet");
51MODULE_LICENSE("GPL");
52
e9b11c17
ZX
53DEFINE_SPINLOCK(kvm_lock);
54LIST_HEAD(vm_list);
133de902 55
1b6c0168
AK
56static cpumask_t cpus_hardware_enabled;
57
c16f862d
RR
58struct kmem_cache *kvm_vcpu_cache;
59EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 60
15ad7146
AK
61static __read_mostly struct preempt_ops kvm_preempt_ops;
62
6aa8b732
AK
63static struct dentry *debugfs_dir;
64
bccf2150
AK
65static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
66 unsigned long arg);
67
5aacf0ca
JM
68static inline int valid_vcpu(int n)
69{
70 return likely(n >= 0 && n < KVM_MAX_VCPUS);
71}
72
bccf2150
AK
73/*
74 * Switches to specified vcpu, until a matching vcpu_put()
75 */
313a3dc7 76void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 77{
15ad7146
AK
78 int cpu;
79
bccf2150 80 mutex_lock(&vcpu->mutex);
15ad7146
AK
81 cpu = get_cpu();
82 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 83 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 84 put_cpu();
6aa8b732
AK
85}
86
313a3dc7 87void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 88{
15ad7146 89 preempt_disable();
313a3dc7 90 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
91 preempt_notifier_unregister(&vcpu->preempt_notifier);
92 preempt_enable();
6aa8b732
AK
93 mutex_unlock(&vcpu->mutex);
94}
95
d9e368d6
AK
96static void ack_flush(void *_completed)
97{
d9e368d6
AK
98}
99
100void kvm_flush_remote_tlbs(struct kvm *kvm)
101{
49d3bd7e 102 int i, cpu;
d9e368d6
AK
103 cpumask_t cpus;
104 struct kvm_vcpu *vcpu;
d9e368d6 105
d9e368d6 106 cpus_clear(cpus);
fb3f0f51
RR
107 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
108 vcpu = kvm->vcpus[i];
109 if (!vcpu)
110 continue;
3176bc3e 111 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
112 continue;
113 cpu = vcpu->cpu;
114 if (cpu != -1 && cpu != raw_smp_processor_id())
49d3bd7e 115 cpu_set(cpu, cpus);
d9e368d6 116 }
49d3bd7e 117 smp_call_function_mask(cpus, ack_flush, NULL, 1);
d9e368d6
AK
118}
119
fb3f0f51
RR
120int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
121{
122 struct page *page;
123 int r;
124
125 mutex_init(&vcpu->mutex);
126 vcpu->cpu = -1;
fb3f0f51
RR
127 vcpu->kvm = kvm;
128 vcpu->vcpu_id = id;
b6958ce4 129 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
130
131 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
132 if (!page) {
133 r = -ENOMEM;
134 goto fail;
135 }
136 vcpu->run = page_address(page);
137
e9b11c17 138 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 139 if (r < 0)
e9b11c17 140 goto fail_free_run;
fb3f0f51
RR
141 return 0;
142
fb3f0f51
RR
143fail_free_run:
144 free_page((unsigned long)vcpu->run);
145fail:
76fafa5e 146 return r;
fb3f0f51
RR
147}
148EXPORT_SYMBOL_GPL(kvm_vcpu_init);
149
150void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
151{
e9b11c17 152 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
153 free_page((unsigned long)vcpu->run);
154}
155EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
156
f17abe9a 157static struct kvm *kvm_create_vm(void)
6aa8b732
AK
158{
159 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
6aa8b732
AK
160
161 if (!kvm)
f17abe9a 162 return ERR_PTR(-ENOMEM);
6aa8b732 163
74906345 164 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 165 mutex_init(&kvm->lock);
6aa8b732 166 INIT_LIST_HEAD(&kvm->active_mmu_pages);
2eeb2e94 167 kvm_io_bus_init(&kvm->mmio_bus);
5e58cfe4
RR
168 spin_lock(&kvm_lock);
169 list_add(&kvm->vm_list, &vm_list);
170 spin_unlock(&kvm_lock);
f17abe9a
AK
171 return kvm;
172}
173
6aa8b732
AK
174/*
175 * Free any memory in @free but not in @dont.
176 */
177static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
178 struct kvm_memory_slot *dont)
179{
290fc38d
IE
180 if (!dont || free->rmap != dont->rmap)
181 vfree(free->rmap);
6aa8b732
AK
182
183 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
184 vfree(free->dirty_bitmap);
185
6aa8b732 186 free->npages = 0;
8b6d44c7 187 free->dirty_bitmap = NULL;
8d4e1288 188 free->rmap = NULL;
6aa8b732
AK
189}
190
191static void kvm_free_physmem(struct kvm *kvm)
192{
193 int i;
194
195 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 196 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
197}
198
7b53aa56
AK
199static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
200{
7b53aa56
AK
201 vcpu_load(vcpu);
202 kvm_mmu_unload(vcpu);
203 vcpu_put(vcpu);
204}
205
6aa8b732
AK
206static void kvm_free_vcpus(struct kvm *kvm)
207{
208 unsigned int i;
209
7b53aa56
AK
210 /*
211 * Unpin any mmu pages first.
212 */
213 for (i = 0; i < KVM_MAX_VCPUS; ++i)
fb3f0f51
RR
214 if (kvm->vcpus[i])
215 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
216 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
217 if (kvm->vcpus[i]) {
e9b11c17 218 kvm_arch_vcpu_free(kvm->vcpus[i]);
fb3f0f51
RR
219 kvm->vcpus[i] = NULL;
220 }
221 }
222
6aa8b732
AK
223}
224
f17abe9a
AK
225static void kvm_destroy_vm(struct kvm *kvm)
226{
133de902
AK
227 spin_lock(&kvm_lock);
228 list_del(&kvm->vm_list);
229 spin_unlock(&kvm_lock);
74906345 230 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 231 kvm_io_bus_destroy(&kvm->mmio_bus);
85f455f7 232 kfree(kvm->vpic);
1fd4f2a5 233 kfree(kvm->vioapic);
6aa8b732
AK
234 kvm_free_vcpus(kvm);
235 kvm_free_physmem(kvm);
236 kfree(kvm);
f17abe9a
AK
237}
238
239static int kvm_vm_release(struct inode *inode, struct file *filp)
240{
241 struct kvm *kvm = filp->private_data;
242
243 kvm_destroy_vm(kvm);
6aa8b732
AK
244 return 0;
245}
246
6aa8b732
AK
247/*
248 * Allocate some memory and give it an address in the guest physical address
249 * space.
250 *
251 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e
SY
252 *
253 * Must be called holding kvm->lock.
6aa8b732 254 */
f78e0e2e
SY
255int __kvm_set_memory_region(struct kvm *kvm,
256 struct kvm_userspace_memory_region *mem,
257 int user_alloc)
6aa8b732
AK
258{
259 int r;
260 gfn_t base_gfn;
261 unsigned long npages;
262 unsigned long i;
263 struct kvm_memory_slot *memslot;
264 struct kvm_memory_slot old, new;
6aa8b732
AK
265
266 r = -EINVAL;
267 /* General sanity checks */
268 if (mem->memory_size & (PAGE_SIZE - 1))
269 goto out;
270 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
271 goto out;
e0d62c7f 272 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
273 goto out;
274 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
275 goto out;
276
277 memslot = &kvm->memslots[mem->slot];
278 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
279 npages = mem->memory_size >> PAGE_SHIFT;
280
281 if (!npages)
282 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
283
6aa8b732
AK
284 new = old = *memslot;
285
286 new.base_gfn = base_gfn;
287 new.npages = npages;
288 new.flags = mem->flags;
289
290 /* Disallow changing a memory slot's size. */
291 r = -EINVAL;
292 if (npages && old.npages && npages != old.npages)
f78e0e2e 293 goto out_free;
6aa8b732
AK
294
295 /* Check for overlaps */
296 r = -EEXIST;
297 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
298 struct kvm_memory_slot *s = &kvm->memslots[i];
299
300 if (s == memslot)
301 continue;
302 if (!((base_gfn + npages <= s->base_gfn) ||
303 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 304 goto out_free;
6aa8b732 305 }
6aa8b732 306
6aa8b732
AK
307 /* Free page dirty bitmap if unneeded */
308 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 309 new.dirty_bitmap = NULL;
6aa8b732
AK
310
311 r = -ENOMEM;
312
313 /* Allocate if a slot is being created */
8d4e1288 314 if (npages && !new.rmap) {
d77c26fc 315 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
316
317 if (!new.rmap)
f78e0e2e 318 goto out_free;
290fc38d 319
290fc38d 320 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 321
80b14b5b 322 new.user_alloc = user_alloc;
8d4e1288 323 if (user_alloc)
8a7ae055 324 new.userspace_addr = mem->userspace_addr;
8d4e1288
AL
325 else {
326 down_write(&current->mm->mmap_sem);
327 new.userspace_addr = do_mmap(NULL, 0,
328 npages * PAGE_SIZE,
329 PROT_READ | PROT_WRITE,
330 MAP_SHARED | MAP_ANONYMOUS,
331 0);
332 up_write(&current->mm->mmap_sem);
333
334 if (IS_ERR((void *)new.userspace_addr))
f78e0e2e 335 goto out_free;
6aa8b732 336 }
80b14b5b
IE
337 } else {
338 if (!old.user_alloc && old.rmap) {
339 int ret;
340
341 down_write(&current->mm->mmap_sem);
342 ret = do_munmap(current->mm, old.userspace_addr,
343 old.npages * PAGE_SIZE);
344 up_write(&current->mm->mmap_sem);
345 if (ret < 0)
346 printk(KERN_WARNING
347 "kvm_vm_ioctl_set_memory_region: "
348 "failed to munmap memory\n");
349 }
6aa8b732
AK
350 }
351
352 /* Allocate page dirty bitmap if needed */
353 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
354 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
355
356 new.dirty_bitmap = vmalloc(dirty_bytes);
357 if (!new.dirty_bitmap)
f78e0e2e 358 goto out_free;
6aa8b732
AK
359 memset(new.dirty_bitmap, 0, dirty_bytes);
360 }
361
6aa8b732
AK
362 if (mem->slot >= kvm->nmemslots)
363 kvm->nmemslots = mem->slot + 1;
364
82ce2c96
IE
365 if (!kvm->n_requested_mmu_pages) {
366 unsigned int n_pages;
367
368 if (npages) {
369 n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
370 kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
371 n_pages);
372 } else {
373 unsigned int nr_mmu_pages;
374
375 n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
376 nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
377 nr_mmu_pages = max(nr_mmu_pages,
378 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
379 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
380 }
381 }
382
6aa8b732 383 *memslot = new;
6aa8b732 384
90cb0529
AK
385 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
386 kvm_flush_remote_tlbs(kvm);
6aa8b732 387
6aa8b732
AK
388 kvm_free_physmem_slot(&old, &new);
389 return 0;
390
f78e0e2e 391out_free:
6aa8b732
AK
392 kvm_free_physmem_slot(&new, &old);
393out:
394 return r;
210c7c4d
IE
395
396}
f78e0e2e
SY
397EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
398
399int kvm_set_memory_region(struct kvm *kvm,
400 struct kvm_userspace_memory_region *mem,
401 int user_alloc)
402{
403 int r;
404
405 mutex_lock(&kvm->lock);
406 r = __kvm_set_memory_region(kvm, mem, user_alloc);
407 mutex_unlock(&kvm->lock);
408 return r;
409}
210c7c4d
IE
410EXPORT_SYMBOL_GPL(kvm_set_memory_region);
411
1fe779f8
CO
412int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
413 struct
414 kvm_userspace_memory_region *mem,
415 int user_alloc)
210c7c4d 416{
e0d62c7f
IE
417 if (mem->slot >= KVM_MEMORY_SLOTS)
418 return -EINVAL;
210c7c4d 419 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
420}
421
422/*
423 * Get (and clear) the dirty memory log for a memory slot.
424 */
2c6f5df9
AK
425static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
426 struct kvm_dirty_log *log)
6aa8b732
AK
427{
428 struct kvm_memory_slot *memslot;
429 int r, i;
430 int n;
431 unsigned long any = 0;
432
11ec2804 433 mutex_lock(&kvm->lock);
6aa8b732 434
6aa8b732
AK
435 r = -EINVAL;
436 if (log->slot >= KVM_MEMORY_SLOTS)
437 goto out;
438
439 memslot = &kvm->memslots[log->slot];
440 r = -ENOENT;
441 if (!memslot->dirty_bitmap)
442 goto out;
443
cd1a4a98 444 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 445
cd1a4a98 446 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
447 any = memslot->dirty_bitmap[i];
448
449 r = -EFAULT;
450 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
451 goto out;
452
39214915
RR
453 /* If nothing is dirty, don't bother messing with page tables. */
454 if (any) {
39214915
RR
455 kvm_mmu_slot_remove_write_access(kvm, log->slot);
456 kvm_flush_remote_tlbs(kvm);
457 memset(memslot->dirty_bitmap, 0, n);
39214915 458 }
6aa8b732
AK
459
460 r = 0;
461
462out:
11ec2804 463 mutex_unlock(&kvm->lock);
6aa8b732
AK
464 return r;
465}
466
cea7bb21
IE
467int is_error_page(struct page *page)
468{
469 return page == bad_page;
470}
471EXPORT_SYMBOL_GPL(is_error_page);
472
f9d46eb0
IE
473static inline unsigned long bad_hva(void)
474{
475 return PAGE_OFFSET;
476}
477
478int kvm_is_error_hva(unsigned long addr)
479{
480 return addr == bad_hva();
481}
482EXPORT_SYMBOL_GPL(kvm_is_error_hva);
483
290fc38d 484gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
e8207547
AK
485{
486 int i;
487 struct kvm_mem_alias *alias;
488
489 for (i = 0; i < kvm->naliases; ++i) {
490 alias = &kvm->aliases[i];
491 if (gfn >= alias->base_gfn
492 && gfn < alias->base_gfn + alias->npages)
493 return alias->target_gfn + gfn - alias->base_gfn;
494 }
495 return gfn;
496}
497
498static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
499{
500 int i;
501
502 for (i = 0; i < kvm->nmemslots; ++i) {
503 struct kvm_memory_slot *memslot = &kvm->memslots[i];
504
505 if (gfn >= memslot->base_gfn
506 && gfn < memslot->base_gfn + memslot->npages)
507 return memslot;
508 }
8b6d44c7 509 return NULL;
6aa8b732 510}
e8207547
AK
511
512struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
513{
514 gfn = unalias_gfn(kvm, gfn);
515 return __gfn_to_memslot(kvm, gfn);
516}
6aa8b732 517
e0d62c7f
IE
518int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
519{
520 int i;
521
522 gfn = unalias_gfn(kvm, gfn);
523 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
524 struct kvm_memory_slot *memslot = &kvm->memslots[i];
525
526 if (gfn >= memslot->base_gfn
527 && gfn < memslot->base_gfn + memslot->npages)
528 return 1;
529 }
530 return 0;
531}
532EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
533
539cb660
IE
534static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
535{
536 struct kvm_memory_slot *slot;
537
538 gfn = unalias_gfn(kvm, gfn);
539 slot = __gfn_to_memslot(kvm, gfn);
540 if (!slot)
541 return bad_hva();
542 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
543}
544
aab61cc0
AL
545/*
546 * Requires current->mm->mmap_sem to be held
547 */
548static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
954bbbc2 549{
8d4e1288 550 struct page *page[1];
539cb660 551 unsigned long addr;
8d4e1288 552 int npages;
954bbbc2 553
60395224
AK
554 might_sleep();
555
539cb660
IE
556 addr = gfn_to_hva(kvm, gfn);
557 if (kvm_is_error_hva(addr)) {
8a7ae055 558 get_page(bad_page);
cea7bb21 559 return bad_page;
8a7ae055 560 }
8d4e1288 561
539cb660
IE
562 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
563 NULL);
564
8d4e1288
AL
565 if (npages != 1) {
566 get_page(bad_page);
567 return bad_page;
8a7ae055 568 }
8d4e1288
AL
569
570 return page[0];
954bbbc2 571}
aab61cc0
AL
572
573struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
574{
575 struct page *page;
576
577 down_read(&current->mm->mmap_sem);
578 page = __gfn_to_page(kvm, gfn);
579 up_read(&current->mm->mmap_sem);
580
581 return page;
582}
583
954bbbc2
AK
584EXPORT_SYMBOL_GPL(gfn_to_page);
585
8a7ae055
IE
586void kvm_release_page(struct page *page)
587{
588 if (!PageReserved(page))
589 SetPageDirty(page);
590 put_page(page);
591}
592EXPORT_SYMBOL_GPL(kvm_release_page);
593
195aefde
IE
594static int next_segment(unsigned long len, int offset)
595{
596 if (len > PAGE_SIZE - offset)
597 return PAGE_SIZE - offset;
598 else
599 return len;
600}
601
602int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
603 int len)
604{
e0506bcb
IE
605 int r;
606 unsigned long addr;
195aefde 607
e0506bcb
IE
608 addr = gfn_to_hva(kvm, gfn);
609 if (kvm_is_error_hva(addr))
610 return -EFAULT;
611 r = copy_from_user(data, (void __user *)addr + offset, len);
612 if (r)
195aefde 613 return -EFAULT;
195aefde
IE
614 return 0;
615}
616EXPORT_SYMBOL_GPL(kvm_read_guest_page);
617
618int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
619{
620 gfn_t gfn = gpa >> PAGE_SHIFT;
621 int seg;
622 int offset = offset_in_page(gpa);
623 int ret;
624
625 while ((seg = next_segment(len, offset)) != 0) {
626 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
627 if (ret < 0)
628 return ret;
629 offset = 0;
630 len -= seg;
631 data += seg;
632 ++gfn;
633 }
634 return 0;
635}
636EXPORT_SYMBOL_GPL(kvm_read_guest);
637
638int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
639 int offset, int len)
640{
e0506bcb
IE
641 int r;
642 unsigned long addr;
195aefde 643
e0506bcb
IE
644 addr = gfn_to_hva(kvm, gfn);
645 if (kvm_is_error_hva(addr))
646 return -EFAULT;
647 r = copy_to_user((void __user *)addr + offset, data, len);
648 if (r)
195aefde 649 return -EFAULT;
195aefde
IE
650 mark_page_dirty(kvm, gfn);
651 return 0;
652}
653EXPORT_SYMBOL_GPL(kvm_write_guest_page);
654
655int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
656 unsigned long len)
657{
658 gfn_t gfn = gpa >> PAGE_SHIFT;
659 int seg;
660 int offset = offset_in_page(gpa);
661 int ret;
662
663 while ((seg = next_segment(len, offset)) != 0) {
664 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
665 if (ret < 0)
666 return ret;
667 offset = 0;
668 len -= seg;
669 data += seg;
670 ++gfn;
671 }
672 return 0;
673}
674
675int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
676{
677 void *page_virt;
678 struct page *page;
679
680 page = gfn_to_page(kvm, gfn);
8a7ae055
IE
681 if (is_error_page(page)) {
682 kvm_release_page(page);
195aefde 683 return -EFAULT;
8a7ae055 684 }
195aefde
IE
685 page_virt = kmap_atomic(page, KM_USER0);
686
687 memset(page_virt + offset, 0, len);
688
689 kunmap_atomic(page_virt, KM_USER0);
8a7ae055 690 kvm_release_page(page);
12264760 691 mark_page_dirty(kvm, gfn);
195aefde
IE
692 return 0;
693}
694EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
695
696int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
697{
698 gfn_t gfn = gpa >> PAGE_SHIFT;
699 int seg;
700 int offset = offset_in_page(gpa);
701 int ret;
702
703 while ((seg = next_segment(len, offset)) != 0) {
704 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
705 if (ret < 0)
706 return ret;
707 offset = 0;
708 len -= seg;
709 ++gfn;
710 }
711 return 0;
712}
713EXPORT_SYMBOL_GPL(kvm_clear_guest);
714
6aa8b732
AK
715void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
716{
31389947 717 struct kvm_memory_slot *memslot;
6aa8b732 718
3b6fff19 719 gfn = unalias_gfn(kvm, gfn);
7e9d619d
RR
720 memslot = __gfn_to_memslot(kvm, gfn);
721 if (memslot && memslot->dirty_bitmap) {
722 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 723
7e9d619d
RR
724 /* avoid RMW */
725 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
726 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
727 }
728}
729
b6958ce4
ED
730/*
731 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
732 */
8776e519 733void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 734{
b6958ce4
ED
735 DECLARE_WAITQUEUE(wait, current);
736
737 add_wait_queue(&vcpu->wq, &wait);
738
739 /*
740 * We will block until either an interrupt or a signal wakes us up
741 */
c5ec1534
HQ
742 while (!kvm_cpu_has_interrupt(vcpu)
743 && !signal_pending(current)
744 && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
745 && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
b6958ce4
ED
746 set_current_state(TASK_INTERRUPTIBLE);
747 vcpu_put(vcpu);
748 schedule();
749 vcpu_load(vcpu);
750 }
d3bef15f 751
c5ec1534 752 __set_current_state(TASK_RUNNING);
b6958ce4 753 remove_wait_queue(&vcpu->wq, &wait);
b6958ce4
ED
754}
755
6aa8b732
AK
756void kvm_resched(struct kvm_vcpu *vcpu)
757{
3fca0365
YD
758 if (!need_resched())
759 return;
6aa8b732 760 cond_resched();
6aa8b732
AK
761}
762EXPORT_SYMBOL_GPL(kvm_resched);
763
bccf2150
AK
764static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
765 struct kvm_interrupt *irq)
6aa8b732 766{
6aa8b732
AK
767 if (irq->irq < 0 || irq->irq >= 256)
768 return -EINVAL;
97222cc8
ED
769 if (irqchip_in_kernel(vcpu->kvm))
770 return -ENXIO;
bccf2150 771 vcpu_load(vcpu);
6aa8b732
AK
772
773 set_bit(irq->irq, vcpu->irq_pending);
774 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
775
776 vcpu_put(vcpu);
777
778 return 0;
779}
780
9a2bb7f4
AK
781static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
782 unsigned long address,
783 int *type)
784{
785 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
786 unsigned long pgoff;
787 struct page *page;
788
9a2bb7f4 789 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
039576c0
AK
790 if (pgoff == 0)
791 page = virt_to_page(vcpu->run);
792 else if (pgoff == KVM_PIO_PAGE_OFFSET)
793 page = virt_to_page(vcpu->pio_data);
794 else
9a2bb7f4 795 return NOPAGE_SIGBUS;
9a2bb7f4 796 get_page(page);
cd0d9137
NAQ
797 if (type != NULL)
798 *type = VM_FAULT_MINOR;
799
9a2bb7f4
AK
800 return page;
801}
802
803static struct vm_operations_struct kvm_vcpu_vm_ops = {
804 .nopage = kvm_vcpu_nopage,
805};
806
807static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
808{
809 vma->vm_ops = &kvm_vcpu_vm_ops;
810 return 0;
811}
812
bccf2150
AK
813static int kvm_vcpu_release(struct inode *inode, struct file *filp)
814{
815 struct kvm_vcpu *vcpu = filp->private_data;
816
817 fput(vcpu->kvm->filp);
818 return 0;
819}
820
821static struct file_operations kvm_vcpu_fops = {
822 .release = kvm_vcpu_release,
823 .unlocked_ioctl = kvm_vcpu_ioctl,
824 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 825 .mmap = kvm_vcpu_mmap,
bccf2150
AK
826};
827
828/*
829 * Allocates an inode for the vcpu.
830 */
831static int create_vcpu_fd(struct kvm_vcpu *vcpu)
832{
833 int fd, r;
834 struct inode *inode;
835 struct file *file;
836
d6d28168
AK
837 r = anon_inode_getfd(&fd, &inode, &file,
838 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
839 if (r)
840 return r;
bccf2150 841 atomic_inc(&vcpu->kvm->filp->f_count);
bccf2150 842 return fd;
bccf2150
AK
843}
844
c5ea7660
AK
845/*
846 * Creates some virtual cpus. Good luck creating more than one.
847 */
848static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
849{
850 int r;
851 struct kvm_vcpu *vcpu;
852
c5ea7660 853 if (!valid_vcpu(n))
fb3f0f51 854 return -EINVAL;
c5ea7660 855
e9b11c17 856 vcpu = kvm_arch_vcpu_create(kvm, n);
fb3f0f51
RR
857 if (IS_ERR(vcpu))
858 return PTR_ERR(vcpu);
c5ea7660 859
15ad7146
AK
860 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
861
11ec2804 862 mutex_lock(&kvm->lock);
fb3f0f51
RR
863 if (kvm->vcpus[n]) {
864 r = -EEXIST;
11ec2804 865 mutex_unlock(&kvm->lock);
e9b11c17 866 goto vcpu_destroy;
fb3f0f51
RR
867 }
868 kvm->vcpus[n] = vcpu;
11ec2804 869 mutex_unlock(&kvm->lock);
c5ea7660 870
fb3f0f51 871 /* Now it's all set up, let userspace reach it */
bccf2150
AK
872 r = create_vcpu_fd(vcpu);
873 if (r < 0)
fb3f0f51
RR
874 goto unlink;
875 return r;
39c3b86e 876
fb3f0f51 877unlink:
11ec2804 878 mutex_lock(&kvm->lock);
fb3f0f51 879 kvm->vcpus[n] = NULL;
11ec2804 880 mutex_unlock(&kvm->lock);
e9b11c17
ZX
881vcpu_destroy:
882 kvm_arch_vcpu_destory(vcpu);
c5ea7660
AK
883 return r;
884}
885
1961d276
AK
886static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
887{
888 if (sigset) {
889 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
890 vcpu->sigset_active = 1;
891 vcpu->sigset = *sigset;
892 } else
893 vcpu->sigset_active = 0;
894 return 0;
895}
896
bccf2150
AK
897static long kvm_vcpu_ioctl(struct file *filp,
898 unsigned int ioctl, unsigned long arg)
6aa8b732 899{
bccf2150 900 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 901 void __user *argp = (void __user *)arg;
313a3dc7 902 int r;
6aa8b732
AK
903
904 switch (ioctl) {
9a2bb7f4 905 case KVM_RUN:
f0fe5108
AK
906 r = -EINVAL;
907 if (arg)
908 goto out;
b6c7a5dc 909 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 910 break;
6aa8b732
AK
911 case KVM_GET_REGS: {
912 struct kvm_regs kvm_regs;
913
bccf2150 914 memset(&kvm_regs, 0, sizeof kvm_regs);
b6c7a5dc 915 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
6aa8b732
AK
916 if (r)
917 goto out;
918 r = -EFAULT;
2f366987 919 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
6aa8b732
AK
920 goto out;
921 r = 0;
922 break;
923 }
924 case KVM_SET_REGS: {
925 struct kvm_regs kvm_regs;
926
927 r = -EFAULT;
2f366987 928 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732 929 goto out;
b6c7a5dc 930 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
6aa8b732
AK
931 if (r)
932 goto out;
933 r = 0;
934 break;
935 }
936 case KVM_GET_SREGS: {
937 struct kvm_sregs kvm_sregs;
938
bccf2150 939 memset(&kvm_sregs, 0, sizeof kvm_sregs);
b6c7a5dc 940 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
941 if (r)
942 goto out;
943 r = -EFAULT;
2f366987 944 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
945 goto out;
946 r = 0;
947 break;
948 }
949 case KVM_SET_SREGS: {
950 struct kvm_sregs kvm_sregs;
951
952 r = -EFAULT;
2f366987 953 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 954 goto out;
b6c7a5dc 955 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
956 if (r)
957 goto out;
958 r = 0;
959 break;
960 }
961 case KVM_TRANSLATE: {
962 struct kvm_translation tr;
963
964 r = -EFAULT;
2f366987 965 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 966 goto out;
8b006791 967 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
968 if (r)
969 goto out;
970 r = -EFAULT;
2f366987 971 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
972 goto out;
973 r = 0;
974 break;
975 }
976 case KVM_INTERRUPT: {
977 struct kvm_interrupt irq;
978
979 r = -EFAULT;
2f366987 980 if (copy_from_user(&irq, argp, sizeof irq))
6aa8b732 981 goto out;
bccf2150 982 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
6aa8b732
AK
983 if (r)
984 goto out;
985 r = 0;
986 break;
987 }
988 case KVM_DEBUG_GUEST: {
989 struct kvm_debug_guest dbg;
990
991 r = -EFAULT;
2f366987 992 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 993 goto out;
b6c7a5dc 994 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
995 if (r)
996 goto out;
997 r = 0;
998 break;
999 }
1961d276
AK
1000 case KVM_SET_SIGNAL_MASK: {
1001 struct kvm_signal_mask __user *sigmask_arg = argp;
1002 struct kvm_signal_mask kvm_sigmask;
1003 sigset_t sigset, *p;
1004
1005 p = NULL;
1006 if (argp) {
1007 r = -EFAULT;
1008 if (copy_from_user(&kvm_sigmask, argp,
1009 sizeof kvm_sigmask))
1010 goto out;
1011 r = -EINVAL;
1012 if (kvm_sigmask.len != sizeof sigset)
1013 goto out;
1014 r = -EFAULT;
1015 if (copy_from_user(&sigset, sigmask_arg->sigset,
1016 sizeof sigset))
1017 goto out;
1018 p = &sigset;
1019 }
1020 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1021 break;
1022 }
b8836737
AK
1023 case KVM_GET_FPU: {
1024 struct kvm_fpu fpu;
1025
1026 memset(&fpu, 0, sizeof fpu);
d0752060 1027 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
b8836737
AK
1028 if (r)
1029 goto out;
1030 r = -EFAULT;
1031 if (copy_to_user(argp, &fpu, sizeof fpu))
1032 goto out;
1033 r = 0;
1034 break;
1035 }
1036 case KVM_SET_FPU: {
1037 struct kvm_fpu fpu;
1038
1039 r = -EFAULT;
1040 if (copy_from_user(&fpu, argp, sizeof fpu))
1041 goto out;
d0752060 1042 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
b8836737
AK
1043 if (r)
1044 goto out;
1045 r = 0;
1046 break;
1047 }
bccf2150 1048 default:
313a3dc7 1049 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1050 }
1051out:
1052 return r;
1053}
1054
1055static long kvm_vm_ioctl(struct file *filp,
1056 unsigned int ioctl, unsigned long arg)
1057{
1058 struct kvm *kvm = filp->private_data;
1059 void __user *argp = (void __user *)arg;
1fe779f8 1060 int r;
bccf2150
AK
1061
1062 switch (ioctl) {
1063 case KVM_CREATE_VCPU:
1064 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1065 if (r < 0)
1066 goto out;
1067 break;
6fc138d2
IE
1068 case KVM_SET_USER_MEMORY_REGION: {
1069 struct kvm_userspace_memory_region kvm_userspace_mem;
1070
1071 r = -EFAULT;
1072 if (copy_from_user(&kvm_userspace_mem, argp,
1073 sizeof kvm_userspace_mem))
1074 goto out;
1075
1076 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1077 if (r)
1078 goto out;
1079 break;
1080 }
1081 case KVM_GET_DIRTY_LOG: {
1082 struct kvm_dirty_log log;
1083
1084 r = -EFAULT;
2f366987 1085 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1086 goto out;
2c6f5df9 1087 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1088 if (r)
1089 goto out;
1090 break;
1091 }
f17abe9a 1092 default:
1fe779f8 1093 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
1094 }
1095out:
1096 return r;
1097}
1098
1099static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
1100 unsigned long address,
1101 int *type)
1102{
1103 struct kvm *kvm = vma->vm_file->private_data;
1104 unsigned long pgoff;
f17abe9a
AK
1105 struct page *page;
1106
f17abe9a 1107 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
e0d62c7f
IE
1108 if (!kvm_is_visible_gfn(kvm, pgoff))
1109 return NOPAGE_SIGBUS;
aab61cc0
AL
1110 /* current->mm->mmap_sem is already held so call lockless version */
1111 page = __gfn_to_page(kvm, pgoff);
8a7ae055
IE
1112 if (is_error_page(page)) {
1113 kvm_release_page(page);
f17abe9a 1114 return NOPAGE_SIGBUS;
8a7ae055 1115 }
cd0d9137
NAQ
1116 if (type != NULL)
1117 *type = VM_FAULT_MINOR;
1118
f17abe9a
AK
1119 return page;
1120}
1121
1122static struct vm_operations_struct kvm_vm_vm_ops = {
1123 .nopage = kvm_vm_nopage,
1124};
1125
1126static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1127{
1128 vma->vm_ops = &kvm_vm_vm_ops;
1129 return 0;
1130}
1131
1132static struct file_operations kvm_vm_fops = {
1133 .release = kvm_vm_release,
1134 .unlocked_ioctl = kvm_vm_ioctl,
1135 .compat_ioctl = kvm_vm_ioctl,
1136 .mmap = kvm_vm_mmap,
1137};
1138
1139static int kvm_dev_ioctl_create_vm(void)
1140{
1141 int fd, r;
1142 struct inode *inode;
1143 struct file *file;
1144 struct kvm *kvm;
1145
f17abe9a 1146 kvm = kvm_create_vm();
d6d28168
AK
1147 if (IS_ERR(kvm))
1148 return PTR_ERR(kvm);
1149 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
1150 if (r) {
1151 kvm_destroy_vm(kvm);
1152 return r;
f17abe9a
AK
1153 }
1154
bccf2150 1155 kvm->filp = file;
f17abe9a 1156
f17abe9a 1157 return fd;
f17abe9a
AK
1158}
1159
1160static long kvm_dev_ioctl(struct file *filp,
1161 unsigned int ioctl, unsigned long arg)
1162{
1163 void __user *argp = (void __user *)arg;
07c45a36 1164 long r = -EINVAL;
f17abe9a
AK
1165
1166 switch (ioctl) {
1167 case KVM_GET_API_VERSION:
f0fe5108
AK
1168 r = -EINVAL;
1169 if (arg)
1170 goto out;
f17abe9a
AK
1171 r = KVM_API_VERSION;
1172 break;
1173 case KVM_CREATE_VM:
f0fe5108
AK
1174 r = -EINVAL;
1175 if (arg)
1176 goto out;
f17abe9a
AK
1177 r = kvm_dev_ioctl_create_vm();
1178 break;
018d00d2
ZX
1179 case KVM_CHECK_EXTENSION:
1180 r = kvm_dev_ioctl_check_extension((long)argp);
5d308f45 1181 break;
07c45a36
AK
1182 case KVM_GET_VCPU_MMAP_SIZE:
1183 r = -EINVAL;
1184 if (arg)
1185 goto out;
039576c0 1186 r = 2 * PAGE_SIZE;
07c45a36 1187 break;
6aa8b732 1188 default:
043405e1 1189 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1190 }
1191out:
1192 return r;
1193}
1194
6aa8b732 1195static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1196 .unlocked_ioctl = kvm_dev_ioctl,
1197 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1198};
1199
1200static struct miscdevice kvm_dev = {
bbe4432e 1201 KVM_MINOR,
6aa8b732
AK
1202 "kvm",
1203 &kvm_chardev_ops,
1204};
1205
1b6c0168
AK
1206static void hardware_enable(void *junk)
1207{
1208 int cpu = raw_smp_processor_id();
1209
1210 if (cpu_isset(cpu, cpus_hardware_enabled))
1211 return;
1212 cpu_set(cpu, cpus_hardware_enabled);
e9b11c17 1213 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
1214}
1215
1216static void hardware_disable(void *junk)
1217{
1218 int cpu = raw_smp_processor_id();
1219
1220 if (!cpu_isset(cpu, cpus_hardware_enabled))
1221 return;
1222 cpu_clear(cpu, cpus_hardware_enabled);
1223 decache_vcpus_on_cpu(cpu);
e9b11c17 1224 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1225}
1226
774c47f1
AK
1227static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1228 void *v)
1229{
1230 int cpu = (long)v;
1231
1a6f4d7f 1232 val &= ~CPU_TASKS_FROZEN;
774c47f1 1233 switch (val) {
cec9ad27 1234 case CPU_DYING:
6ec8a856
AK
1235 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1236 cpu);
1237 hardware_disable(NULL);
1238 break;
774c47f1 1239 case CPU_UP_CANCELED:
43934a38
JK
1240 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1241 cpu);
1b6c0168 1242 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
774c47f1 1243 break;
43934a38
JK
1244 case CPU_ONLINE:
1245 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1246 cpu);
1b6c0168 1247 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
774c47f1
AK
1248 break;
1249 }
1250 return NOTIFY_OK;
1251}
1252
9a2b85c6 1253static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1254 void *v)
9a2b85c6
RR
1255{
1256 if (val == SYS_RESTART) {
1257 /*
1258 * Some (well, at least mine) BIOSes hang on reboot if
1259 * in vmx root mode.
1260 */
1261 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1262 on_each_cpu(hardware_disable, NULL, 0, 1);
1263 }
1264 return NOTIFY_OK;
1265}
1266
1267static struct notifier_block kvm_reboot_notifier = {
1268 .notifier_call = kvm_reboot,
1269 .priority = 0,
1270};
1271
2eeb2e94
GH
1272void kvm_io_bus_init(struct kvm_io_bus *bus)
1273{
1274 memset(bus, 0, sizeof(*bus));
1275}
1276
1277void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1278{
1279 int i;
1280
1281 for (i = 0; i < bus->dev_count; i++) {
1282 struct kvm_io_device *pos = bus->devs[i];
1283
1284 kvm_iodevice_destructor(pos);
1285 }
1286}
1287
1288struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
1289{
1290 int i;
1291
1292 for (i = 0; i < bus->dev_count; i++) {
1293 struct kvm_io_device *pos = bus->devs[i];
1294
1295 if (pos->in_range(pos, addr))
1296 return pos;
1297 }
1298
1299 return NULL;
1300}
1301
1302void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1303{
1304 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1305
1306 bus->devs[bus->dev_count++] = dev;
1307}
1308
774c47f1
AK
1309static struct notifier_block kvm_cpu_notifier = {
1310 .notifier_call = kvm_cpu_hotplug,
1311 .priority = 20, /* must be > scheduler priority */
1312};
1313
1165f5fe
AK
1314static u64 stat_get(void *_offset)
1315{
1316 unsigned offset = (long)_offset;
1317 u64 total = 0;
1318 struct kvm *kvm;
1319 struct kvm_vcpu *vcpu;
1320 int i;
1321
1322 spin_lock(&kvm_lock);
1323 list_for_each_entry(kvm, &vm_list, vm_list)
1324 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
1325 vcpu = kvm->vcpus[i];
1326 if (vcpu)
1327 total += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
1328 }
1329 spin_unlock(&kvm_lock);
1330 return total;
1331}
1332
3dea7ca7 1333DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, NULL, "%llu\n");
1165f5fe 1334
6aa8b732
AK
1335static __init void kvm_init_debug(void)
1336{
1337 struct kvm_stats_debugfs_item *p;
1338
8b6d44c7 1339 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1340 for (p = debugfs_entries; p->name; ++p)
1165f5fe
AK
1341 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
1342 (void *)(long)p->offset,
1343 &stat_fops);
6aa8b732
AK
1344}
1345
1346static void kvm_exit_debug(void)
1347{
1348 struct kvm_stats_debugfs_item *p;
1349
1350 for (p = debugfs_entries; p->name; ++p)
1351 debugfs_remove(p->dentry);
1352 debugfs_remove(debugfs_dir);
1353}
1354
59ae6c6b
AK
1355static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1356{
4267c41a 1357 hardware_disable(NULL);
59ae6c6b
AK
1358 return 0;
1359}
1360
1361static int kvm_resume(struct sys_device *dev)
1362{
4267c41a 1363 hardware_enable(NULL);
59ae6c6b
AK
1364 return 0;
1365}
1366
1367static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 1368 .name = "kvm",
59ae6c6b
AK
1369 .suspend = kvm_suspend,
1370 .resume = kvm_resume,
1371};
1372
1373static struct sys_device kvm_sysdev = {
1374 .id = 0,
1375 .cls = &kvm_sysdev_class,
1376};
1377
cea7bb21 1378struct page *bad_page;
6aa8b732 1379
15ad7146
AK
1380static inline
1381struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1382{
1383 return container_of(pn, struct kvm_vcpu, preempt_notifier);
1384}
1385
1386static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1387{
1388 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1389
e9b11c17 1390 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
1391}
1392
1393static void kvm_sched_out(struct preempt_notifier *pn,
1394 struct task_struct *next)
1395{
1396 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1397
e9b11c17 1398 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
1399}
1400
f8c16bba 1401int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 1402 struct module *module)
6aa8b732
AK
1403{
1404 int r;
002c7f7c 1405 int cpu;
6aa8b732 1406
cb498ea2
ZX
1407 r = kvm_mmu_module_init();
1408 if (r)
1409 goto out4;
1410
1411 kvm_init_debug();
1412
f8c16bba
ZX
1413 r = kvm_arch_init(opaque);
1414 if (r)
1415 goto out4;
cb498ea2
ZX
1416
1417 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1418
1419 if (bad_page == NULL) {
1420 r = -ENOMEM;
1421 goto out;
1422 }
1423
e9b11c17 1424 r = kvm_arch_hardware_setup();
6aa8b732 1425 if (r < 0)
ca45aaae 1426 goto out;
6aa8b732 1427
002c7f7c
YS
1428 for_each_online_cpu(cpu) {
1429 smp_call_function_single(cpu,
e9b11c17 1430 kvm_arch_check_processor_compat,
002c7f7c
YS
1431 &r, 0, 1);
1432 if (r < 0)
1433 goto out_free_0;
1434 }
1435
1b6c0168 1436 on_each_cpu(hardware_enable, NULL, 0, 1);
774c47f1
AK
1437 r = register_cpu_notifier(&kvm_cpu_notifier);
1438 if (r)
1439 goto out_free_1;
6aa8b732
AK
1440 register_reboot_notifier(&kvm_reboot_notifier);
1441
59ae6c6b
AK
1442 r = sysdev_class_register(&kvm_sysdev_class);
1443 if (r)
1444 goto out_free_2;
1445
1446 r = sysdev_register(&kvm_sysdev);
1447 if (r)
1448 goto out_free_3;
1449
c16f862d
RR
1450 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1451 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
1452 __alignof__(struct kvm_vcpu), 0, 0);
1453 if (!kvm_vcpu_cache) {
1454 r = -ENOMEM;
1455 goto out_free_4;
1456 }
1457
6aa8b732
AK
1458 kvm_chardev_ops.owner = module;
1459
1460 r = misc_register(&kvm_dev);
1461 if (r) {
d77c26fc 1462 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
1463 goto out_free;
1464 }
1465
15ad7146
AK
1466 kvm_preempt_ops.sched_in = kvm_sched_in;
1467 kvm_preempt_ops.sched_out = kvm_sched_out;
1468
c7addb90
AK
1469 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
1470
1471 return 0;
6aa8b732
AK
1472
1473out_free:
c16f862d
RR
1474 kmem_cache_destroy(kvm_vcpu_cache);
1475out_free_4:
59ae6c6b
AK
1476 sysdev_unregister(&kvm_sysdev);
1477out_free_3:
1478 sysdev_class_unregister(&kvm_sysdev_class);
1479out_free_2:
6aa8b732 1480 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1
AK
1481 unregister_cpu_notifier(&kvm_cpu_notifier);
1482out_free_1:
1b6c0168 1483 on_each_cpu(hardware_disable, NULL, 0, 1);
002c7f7c 1484out_free_0:
e9b11c17 1485 kvm_arch_hardware_unsetup();
ca45aaae 1486out:
f8c16bba 1487 kvm_arch_exit();
cb498ea2
ZX
1488 kvm_exit_debug();
1489 kvm_mmu_module_exit();
1490out4:
6aa8b732
AK
1491 return r;
1492}
cb498ea2 1493EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 1494
cb498ea2 1495void kvm_exit(void)
6aa8b732
AK
1496{
1497 misc_deregister(&kvm_dev);
c16f862d 1498 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
1499 sysdev_unregister(&kvm_sysdev);
1500 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 1501 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 1502 unregister_cpu_notifier(&kvm_cpu_notifier);
1b6c0168 1503 on_each_cpu(hardware_disable, NULL, 0, 1);
e9b11c17 1504 kvm_arch_hardware_unsetup();
f8c16bba 1505 kvm_arch_exit();
6aa8b732 1506 kvm_exit_debug();
cea7bb21 1507 __free_page(bad_page);
b5a33a75 1508 kvm_mmu_module_exit();
6aa8b732 1509}
cb498ea2 1510EXPORT_SYMBOL_GPL(kvm_exit);
This page took 0.27827 seconds and 5 git commands to generate.