KVM: Portability: Move kvm_x86_ops to x86.c
[deliverable/linux.git] / drivers / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
043405e1 19#include "x86.h"
85f455f7 20#include "irq.h"
6aa8b732
AK
21
22#include <linux/kvm.h>
23#include <linux/module.h>
24#include <linux/errno.h>
6aa8b732
AK
25#include <linux/percpu.h>
26#include <linux/gfp.h>
6aa8b732
AK
27#include <linux/mm.h>
28#include <linux/miscdevice.h>
29#include <linux/vmalloc.h>
6aa8b732 30#include <linux/reboot.h>
6aa8b732
AK
31#include <linux/debugfs.h>
32#include <linux/highmem.h>
33#include <linux/file.h>
59ae6c6b 34#include <linux/sysdev.h>
774c47f1 35#include <linux/cpu.h>
e8edc6e0 36#include <linux/sched.h>
d9e368d6
AK
37#include <linux/cpumask.h>
38#include <linux/smp.h>
d6d28168 39#include <linux/anon_inodes.h>
04d2cc77 40#include <linux/profile.h>
7aa81cc0 41#include <linux/kvm_para.h>
6fc138d2 42#include <linux/pagemap.h>
8d4e1288 43#include <linux/mman.h>
6aa8b732 44
e495606d 45#include <asm/processor.h>
e495606d
AK
46#include <asm/io.h>
47#include <asm/uaccess.h>
48#include <asm/desc.h>
6aa8b732
AK
49
50MODULE_AUTHOR("Qumranet");
51MODULE_LICENSE("GPL");
52
133de902
AK
53static DEFINE_SPINLOCK(kvm_lock);
54static LIST_HEAD(vm_list);
55
1b6c0168
AK
56static cpumask_t cpus_hardware_enabled;
57
c16f862d
RR
58struct kmem_cache *kvm_vcpu_cache;
59EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 60
15ad7146
AK
61static __read_mostly struct preempt_ops kvm_preempt_ops;
62
6aa8b732
AK
63static struct dentry *debugfs_dir;
64
bccf2150
AK
65static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
66 unsigned long arg);
67
5aacf0ca
JM
68static inline int valid_vcpu(int n)
69{
70 return likely(n >= 0 && n < KVM_MAX_VCPUS);
71}
72
bccf2150
AK
73/*
74 * Switches to specified vcpu, until a matching vcpu_put()
75 */
313a3dc7 76void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 77{
15ad7146
AK
78 int cpu;
79
bccf2150 80 mutex_lock(&vcpu->mutex);
15ad7146
AK
81 cpu = get_cpu();
82 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 83 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 84 put_cpu();
6aa8b732
AK
85}
86
313a3dc7 87void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 88{
15ad7146 89 preempt_disable();
313a3dc7 90 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
91 preempt_notifier_unregister(&vcpu->preempt_notifier);
92 preempt_enable();
6aa8b732
AK
93 mutex_unlock(&vcpu->mutex);
94}
95
d9e368d6
AK
96static void ack_flush(void *_completed)
97{
d9e368d6
AK
98}
99
100void kvm_flush_remote_tlbs(struct kvm *kvm)
101{
49d3bd7e 102 int i, cpu;
d9e368d6
AK
103 cpumask_t cpus;
104 struct kvm_vcpu *vcpu;
d9e368d6 105
d9e368d6 106 cpus_clear(cpus);
fb3f0f51
RR
107 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
108 vcpu = kvm->vcpus[i];
109 if (!vcpu)
110 continue;
3176bc3e 111 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
112 continue;
113 cpu = vcpu->cpu;
114 if (cpu != -1 && cpu != raw_smp_processor_id())
49d3bd7e 115 cpu_set(cpu, cpus);
d9e368d6 116 }
49d3bd7e 117 smp_call_function_mask(cpus, ack_flush, NULL, 1);
d9e368d6
AK
118}
119
fb3f0f51
RR
120int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
121{
122 struct page *page;
123 int r;
124
125 mutex_init(&vcpu->mutex);
126 vcpu->cpu = -1;
127 vcpu->mmu.root_hpa = INVALID_PAGE;
128 vcpu->kvm = kvm;
129 vcpu->vcpu_id = id;
c5ec1534
HQ
130 if (!irqchip_in_kernel(kvm) || id == 0)
131 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
132 else
133 vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
b6958ce4 134 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
135
136 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
137 if (!page) {
138 r = -ENOMEM;
139 goto fail;
140 }
141 vcpu->run = page_address(page);
142
143 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
144 if (!page) {
145 r = -ENOMEM;
146 goto fail_free_run;
147 }
148 vcpu->pio_data = page_address(page);
149
fb3f0f51
RR
150 r = kvm_mmu_create(vcpu);
151 if (r < 0)
152 goto fail_free_pio_data;
153
76fafa5e
RR
154 if (irqchip_in_kernel(kvm)) {
155 r = kvm_create_lapic(vcpu);
156 if (r < 0)
157 goto fail_mmu_destroy;
158 }
159
fb3f0f51
RR
160 return 0;
161
76fafa5e
RR
162fail_mmu_destroy:
163 kvm_mmu_destroy(vcpu);
fb3f0f51
RR
164fail_free_pio_data:
165 free_page((unsigned long)vcpu->pio_data);
166fail_free_run:
167 free_page((unsigned long)vcpu->run);
168fail:
76fafa5e 169 return r;
fb3f0f51
RR
170}
171EXPORT_SYMBOL_GPL(kvm_vcpu_init);
172
173void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
174{
d589444e 175 kvm_free_lapic(vcpu);
fb3f0f51
RR
176 kvm_mmu_destroy(vcpu);
177 free_page((unsigned long)vcpu->pio_data);
178 free_page((unsigned long)vcpu->run);
179}
180EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
181
f17abe9a 182static struct kvm *kvm_create_vm(void)
6aa8b732
AK
183{
184 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
6aa8b732
AK
185
186 if (!kvm)
f17abe9a 187 return ERR_PTR(-ENOMEM);
6aa8b732 188
74906345 189 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 190 mutex_init(&kvm->lock);
6aa8b732 191 INIT_LIST_HEAD(&kvm->active_mmu_pages);
2eeb2e94 192 kvm_io_bus_init(&kvm->mmio_bus);
5e58cfe4
RR
193 spin_lock(&kvm_lock);
194 list_add(&kvm->vm_list, &vm_list);
195 spin_unlock(&kvm_lock);
f17abe9a
AK
196 return kvm;
197}
198
6aa8b732
AK
199/*
200 * Free any memory in @free but not in @dont.
201 */
202static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
203 struct kvm_memory_slot *dont)
204{
290fc38d
IE
205 if (!dont || free->rmap != dont->rmap)
206 vfree(free->rmap);
6aa8b732
AK
207
208 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
209 vfree(free->dirty_bitmap);
210
6aa8b732 211 free->npages = 0;
8b6d44c7 212 free->dirty_bitmap = NULL;
8d4e1288 213 free->rmap = NULL;
6aa8b732
AK
214}
215
216static void kvm_free_physmem(struct kvm *kvm)
217{
218 int i;
219
220 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 221 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
222}
223
7b53aa56
AK
224static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
225{
7b53aa56
AK
226 vcpu_load(vcpu);
227 kvm_mmu_unload(vcpu);
228 vcpu_put(vcpu);
229}
230
6aa8b732
AK
231static void kvm_free_vcpus(struct kvm *kvm)
232{
233 unsigned int i;
234
7b53aa56
AK
235 /*
236 * Unpin any mmu pages first.
237 */
238 for (i = 0; i < KVM_MAX_VCPUS; ++i)
fb3f0f51
RR
239 if (kvm->vcpus[i])
240 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
241 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
242 if (kvm->vcpus[i]) {
cbdd1bea 243 kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
fb3f0f51
RR
244 kvm->vcpus[i] = NULL;
245 }
246 }
247
6aa8b732
AK
248}
249
f17abe9a
AK
250static void kvm_destroy_vm(struct kvm *kvm)
251{
133de902
AK
252 spin_lock(&kvm_lock);
253 list_del(&kvm->vm_list);
254 spin_unlock(&kvm_lock);
74906345 255 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 256 kvm_io_bus_destroy(&kvm->mmio_bus);
85f455f7 257 kfree(kvm->vpic);
1fd4f2a5 258 kfree(kvm->vioapic);
6aa8b732
AK
259 kvm_free_vcpus(kvm);
260 kvm_free_physmem(kvm);
261 kfree(kvm);
f17abe9a
AK
262}
263
264static int kvm_vm_release(struct inode *inode, struct file *filp)
265{
266 struct kvm *kvm = filp->private_data;
267
268 kvm_destroy_vm(kvm);
6aa8b732
AK
269 return 0;
270}
271
6aa8b732
AK
272/*
273 * Allocate some memory and give it an address in the guest physical address
274 * space.
275 *
276 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e
SY
277 *
278 * Must be called holding kvm->lock.
6aa8b732 279 */
f78e0e2e
SY
280int __kvm_set_memory_region(struct kvm *kvm,
281 struct kvm_userspace_memory_region *mem,
282 int user_alloc)
6aa8b732
AK
283{
284 int r;
285 gfn_t base_gfn;
286 unsigned long npages;
287 unsigned long i;
288 struct kvm_memory_slot *memslot;
289 struct kvm_memory_slot old, new;
6aa8b732
AK
290
291 r = -EINVAL;
292 /* General sanity checks */
293 if (mem->memory_size & (PAGE_SIZE - 1))
294 goto out;
295 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
296 goto out;
e0d62c7f 297 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
298 goto out;
299 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
300 goto out;
301
302 memslot = &kvm->memslots[mem->slot];
303 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
304 npages = mem->memory_size >> PAGE_SHIFT;
305
306 if (!npages)
307 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
308
6aa8b732
AK
309 new = old = *memslot;
310
311 new.base_gfn = base_gfn;
312 new.npages = npages;
313 new.flags = mem->flags;
314
315 /* Disallow changing a memory slot's size. */
316 r = -EINVAL;
317 if (npages && old.npages && npages != old.npages)
f78e0e2e 318 goto out_free;
6aa8b732
AK
319
320 /* Check for overlaps */
321 r = -EEXIST;
322 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
323 struct kvm_memory_slot *s = &kvm->memslots[i];
324
325 if (s == memslot)
326 continue;
327 if (!((base_gfn + npages <= s->base_gfn) ||
328 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 329 goto out_free;
6aa8b732 330 }
6aa8b732 331
6aa8b732
AK
332 /* Free page dirty bitmap if unneeded */
333 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 334 new.dirty_bitmap = NULL;
6aa8b732
AK
335
336 r = -ENOMEM;
337
338 /* Allocate if a slot is being created */
8d4e1288 339 if (npages && !new.rmap) {
d77c26fc 340 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
341
342 if (!new.rmap)
f78e0e2e 343 goto out_free;
290fc38d 344
290fc38d 345 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 346
80b14b5b 347 new.user_alloc = user_alloc;
8d4e1288 348 if (user_alloc)
8a7ae055 349 new.userspace_addr = mem->userspace_addr;
8d4e1288
AL
350 else {
351 down_write(&current->mm->mmap_sem);
352 new.userspace_addr = do_mmap(NULL, 0,
353 npages * PAGE_SIZE,
354 PROT_READ | PROT_WRITE,
355 MAP_SHARED | MAP_ANONYMOUS,
356 0);
357 up_write(&current->mm->mmap_sem);
358
359 if (IS_ERR((void *)new.userspace_addr))
f78e0e2e 360 goto out_free;
6aa8b732 361 }
80b14b5b
IE
362 } else {
363 if (!old.user_alloc && old.rmap) {
364 int ret;
365
366 down_write(&current->mm->mmap_sem);
367 ret = do_munmap(current->mm, old.userspace_addr,
368 old.npages * PAGE_SIZE);
369 up_write(&current->mm->mmap_sem);
370 if (ret < 0)
371 printk(KERN_WARNING
372 "kvm_vm_ioctl_set_memory_region: "
373 "failed to munmap memory\n");
374 }
6aa8b732
AK
375 }
376
377 /* Allocate page dirty bitmap if needed */
378 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
379 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
380
381 new.dirty_bitmap = vmalloc(dirty_bytes);
382 if (!new.dirty_bitmap)
f78e0e2e 383 goto out_free;
6aa8b732
AK
384 memset(new.dirty_bitmap, 0, dirty_bytes);
385 }
386
6aa8b732
AK
387 if (mem->slot >= kvm->nmemslots)
388 kvm->nmemslots = mem->slot + 1;
389
82ce2c96
IE
390 if (!kvm->n_requested_mmu_pages) {
391 unsigned int n_pages;
392
393 if (npages) {
394 n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
395 kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
396 n_pages);
397 } else {
398 unsigned int nr_mmu_pages;
399
400 n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
401 nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
402 nr_mmu_pages = max(nr_mmu_pages,
403 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
404 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
405 }
406 }
407
6aa8b732 408 *memslot = new;
6aa8b732 409
90cb0529
AK
410 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
411 kvm_flush_remote_tlbs(kvm);
6aa8b732 412
6aa8b732
AK
413 kvm_free_physmem_slot(&old, &new);
414 return 0;
415
f78e0e2e 416out_free:
6aa8b732
AK
417 kvm_free_physmem_slot(&new, &old);
418out:
419 return r;
210c7c4d
IE
420
421}
f78e0e2e
SY
422EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
423
424int kvm_set_memory_region(struct kvm *kvm,
425 struct kvm_userspace_memory_region *mem,
426 int user_alloc)
427{
428 int r;
429
430 mutex_lock(&kvm->lock);
431 r = __kvm_set_memory_region(kvm, mem, user_alloc);
432 mutex_unlock(&kvm->lock);
433 return r;
434}
210c7c4d
IE
435EXPORT_SYMBOL_GPL(kvm_set_memory_region);
436
1fe779f8
CO
437int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
438 struct
439 kvm_userspace_memory_region *mem,
440 int user_alloc)
210c7c4d 441{
e0d62c7f
IE
442 if (mem->slot >= KVM_MEMORY_SLOTS)
443 return -EINVAL;
210c7c4d 444 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
445}
446
447/*
448 * Get (and clear) the dirty memory log for a memory slot.
449 */
2c6f5df9
AK
450static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
451 struct kvm_dirty_log *log)
6aa8b732
AK
452{
453 struct kvm_memory_slot *memslot;
454 int r, i;
455 int n;
456 unsigned long any = 0;
457
11ec2804 458 mutex_lock(&kvm->lock);
6aa8b732 459
6aa8b732
AK
460 r = -EINVAL;
461 if (log->slot >= KVM_MEMORY_SLOTS)
462 goto out;
463
464 memslot = &kvm->memslots[log->slot];
465 r = -ENOENT;
466 if (!memslot->dirty_bitmap)
467 goto out;
468
cd1a4a98 469 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 470
cd1a4a98 471 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
472 any = memslot->dirty_bitmap[i];
473
474 r = -EFAULT;
475 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
476 goto out;
477
39214915
RR
478 /* If nothing is dirty, don't bother messing with page tables. */
479 if (any) {
39214915
RR
480 kvm_mmu_slot_remove_write_access(kvm, log->slot);
481 kvm_flush_remote_tlbs(kvm);
482 memset(memslot->dirty_bitmap, 0, n);
39214915 483 }
6aa8b732
AK
484
485 r = 0;
486
487out:
11ec2804 488 mutex_unlock(&kvm->lock);
6aa8b732
AK
489 return r;
490}
491
cea7bb21
IE
492int is_error_page(struct page *page)
493{
494 return page == bad_page;
495}
496EXPORT_SYMBOL_GPL(is_error_page);
497
f9d46eb0
IE
498static inline unsigned long bad_hva(void)
499{
500 return PAGE_OFFSET;
501}
502
503int kvm_is_error_hva(unsigned long addr)
504{
505 return addr == bad_hva();
506}
507EXPORT_SYMBOL_GPL(kvm_is_error_hva);
508
290fc38d 509gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
e8207547
AK
510{
511 int i;
512 struct kvm_mem_alias *alias;
513
514 for (i = 0; i < kvm->naliases; ++i) {
515 alias = &kvm->aliases[i];
516 if (gfn >= alias->base_gfn
517 && gfn < alias->base_gfn + alias->npages)
518 return alias->target_gfn + gfn - alias->base_gfn;
519 }
520 return gfn;
521}
522
523static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
524{
525 int i;
526
527 for (i = 0; i < kvm->nmemslots; ++i) {
528 struct kvm_memory_slot *memslot = &kvm->memslots[i];
529
530 if (gfn >= memslot->base_gfn
531 && gfn < memslot->base_gfn + memslot->npages)
532 return memslot;
533 }
8b6d44c7 534 return NULL;
6aa8b732 535}
e8207547
AK
536
537struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
538{
539 gfn = unalias_gfn(kvm, gfn);
540 return __gfn_to_memslot(kvm, gfn);
541}
6aa8b732 542
e0d62c7f
IE
543int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
544{
545 int i;
546
547 gfn = unalias_gfn(kvm, gfn);
548 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
549 struct kvm_memory_slot *memslot = &kvm->memslots[i];
550
551 if (gfn >= memslot->base_gfn
552 && gfn < memslot->base_gfn + memslot->npages)
553 return 1;
554 }
555 return 0;
556}
557EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
558
539cb660
IE
559static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
560{
561 struct kvm_memory_slot *slot;
562
563 gfn = unalias_gfn(kvm, gfn);
564 slot = __gfn_to_memslot(kvm, gfn);
565 if (!slot)
566 return bad_hva();
567 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
568}
569
aab61cc0
AL
570/*
571 * Requires current->mm->mmap_sem to be held
572 */
573static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
954bbbc2 574{
8d4e1288 575 struct page *page[1];
539cb660 576 unsigned long addr;
8d4e1288 577 int npages;
954bbbc2 578
60395224
AK
579 might_sleep();
580
539cb660
IE
581 addr = gfn_to_hva(kvm, gfn);
582 if (kvm_is_error_hva(addr)) {
8a7ae055 583 get_page(bad_page);
cea7bb21 584 return bad_page;
8a7ae055 585 }
8d4e1288 586
539cb660
IE
587 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
588 NULL);
589
8d4e1288
AL
590 if (npages != 1) {
591 get_page(bad_page);
592 return bad_page;
8a7ae055 593 }
8d4e1288
AL
594
595 return page[0];
954bbbc2 596}
aab61cc0
AL
597
598struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
599{
600 struct page *page;
601
602 down_read(&current->mm->mmap_sem);
603 page = __gfn_to_page(kvm, gfn);
604 up_read(&current->mm->mmap_sem);
605
606 return page;
607}
608
954bbbc2
AK
609EXPORT_SYMBOL_GPL(gfn_to_page);
610
8a7ae055
IE
611void kvm_release_page(struct page *page)
612{
613 if (!PageReserved(page))
614 SetPageDirty(page);
615 put_page(page);
616}
617EXPORT_SYMBOL_GPL(kvm_release_page);
618
195aefde
IE
619static int next_segment(unsigned long len, int offset)
620{
621 if (len > PAGE_SIZE - offset)
622 return PAGE_SIZE - offset;
623 else
624 return len;
625}
626
627int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
628 int len)
629{
e0506bcb
IE
630 int r;
631 unsigned long addr;
195aefde 632
e0506bcb
IE
633 addr = gfn_to_hva(kvm, gfn);
634 if (kvm_is_error_hva(addr))
635 return -EFAULT;
636 r = copy_from_user(data, (void __user *)addr + offset, len);
637 if (r)
195aefde 638 return -EFAULT;
195aefde
IE
639 return 0;
640}
641EXPORT_SYMBOL_GPL(kvm_read_guest_page);
642
643int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
644{
645 gfn_t gfn = gpa >> PAGE_SHIFT;
646 int seg;
647 int offset = offset_in_page(gpa);
648 int ret;
649
650 while ((seg = next_segment(len, offset)) != 0) {
651 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
652 if (ret < 0)
653 return ret;
654 offset = 0;
655 len -= seg;
656 data += seg;
657 ++gfn;
658 }
659 return 0;
660}
661EXPORT_SYMBOL_GPL(kvm_read_guest);
662
663int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
664 int offset, int len)
665{
e0506bcb
IE
666 int r;
667 unsigned long addr;
195aefde 668
e0506bcb
IE
669 addr = gfn_to_hva(kvm, gfn);
670 if (kvm_is_error_hva(addr))
671 return -EFAULT;
672 r = copy_to_user((void __user *)addr + offset, data, len);
673 if (r)
195aefde 674 return -EFAULT;
195aefde
IE
675 mark_page_dirty(kvm, gfn);
676 return 0;
677}
678EXPORT_SYMBOL_GPL(kvm_write_guest_page);
679
680int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
681 unsigned long len)
682{
683 gfn_t gfn = gpa >> PAGE_SHIFT;
684 int seg;
685 int offset = offset_in_page(gpa);
686 int ret;
687
688 while ((seg = next_segment(len, offset)) != 0) {
689 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
690 if (ret < 0)
691 return ret;
692 offset = 0;
693 len -= seg;
694 data += seg;
695 ++gfn;
696 }
697 return 0;
698}
699
700int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
701{
702 void *page_virt;
703 struct page *page;
704
705 page = gfn_to_page(kvm, gfn);
8a7ae055
IE
706 if (is_error_page(page)) {
707 kvm_release_page(page);
195aefde 708 return -EFAULT;
8a7ae055 709 }
195aefde
IE
710 page_virt = kmap_atomic(page, KM_USER0);
711
712 memset(page_virt + offset, 0, len);
713
714 kunmap_atomic(page_virt, KM_USER0);
8a7ae055 715 kvm_release_page(page);
12264760 716 mark_page_dirty(kvm, gfn);
195aefde
IE
717 return 0;
718}
719EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
720
721int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
722{
723 gfn_t gfn = gpa >> PAGE_SHIFT;
724 int seg;
725 int offset = offset_in_page(gpa);
726 int ret;
727
728 while ((seg = next_segment(len, offset)) != 0) {
729 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
730 if (ret < 0)
731 return ret;
732 offset = 0;
733 len -= seg;
734 ++gfn;
735 }
736 return 0;
737}
738EXPORT_SYMBOL_GPL(kvm_clear_guest);
739
6aa8b732
AK
740void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
741{
31389947 742 struct kvm_memory_slot *memslot;
6aa8b732 743
3b6fff19 744 gfn = unalias_gfn(kvm, gfn);
7e9d619d
RR
745 memslot = __gfn_to_memslot(kvm, gfn);
746 if (memslot && memslot->dirty_bitmap) {
747 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 748
7e9d619d
RR
749 /* avoid RMW */
750 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
751 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
752 }
753}
754
b6958ce4
ED
755/*
756 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
757 */
8776e519 758void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 759{
b6958ce4
ED
760 DECLARE_WAITQUEUE(wait, current);
761
762 add_wait_queue(&vcpu->wq, &wait);
763
764 /*
765 * We will block until either an interrupt or a signal wakes us up
766 */
c5ec1534
HQ
767 while (!kvm_cpu_has_interrupt(vcpu)
768 && !signal_pending(current)
769 && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
770 && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
b6958ce4
ED
771 set_current_state(TASK_INTERRUPTIBLE);
772 vcpu_put(vcpu);
773 schedule();
774 vcpu_load(vcpu);
775 }
d3bef15f 776
c5ec1534 777 __set_current_state(TASK_RUNNING);
b6958ce4 778 remove_wait_queue(&vcpu->wq, &wait);
b6958ce4
ED
779}
780
6aa8b732
AK
781void kvm_resched(struct kvm_vcpu *vcpu)
782{
3fca0365
YD
783 if (!need_resched())
784 return;
6aa8b732 785 cond_resched();
6aa8b732
AK
786}
787EXPORT_SYMBOL_GPL(kvm_resched);
788
6aa8b732
AK
789/*
790 * Translate a guest virtual address to a guest physical address.
791 */
bccf2150
AK
792static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
793 struct kvm_translation *tr)
6aa8b732
AK
794{
795 unsigned long vaddr = tr->linear_address;
6aa8b732
AK
796 gpa_t gpa;
797
bccf2150 798 vcpu_load(vcpu);
11ec2804 799 mutex_lock(&vcpu->kvm->lock);
6aa8b732
AK
800 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
801 tr->physical_address = gpa;
802 tr->valid = gpa != UNMAPPED_GVA;
803 tr->writeable = 1;
804 tr->usermode = 0;
11ec2804 805 mutex_unlock(&vcpu->kvm->lock);
6aa8b732
AK
806 vcpu_put(vcpu);
807
808 return 0;
809}
810
bccf2150
AK
811static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
812 struct kvm_interrupt *irq)
6aa8b732 813{
6aa8b732
AK
814 if (irq->irq < 0 || irq->irq >= 256)
815 return -EINVAL;
97222cc8
ED
816 if (irqchip_in_kernel(vcpu->kvm))
817 return -ENXIO;
bccf2150 818 vcpu_load(vcpu);
6aa8b732
AK
819
820 set_bit(irq->irq, vcpu->irq_pending);
821 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
822
823 vcpu_put(vcpu);
824
825 return 0;
826}
827
9a2bb7f4
AK
828static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
829 unsigned long address,
830 int *type)
831{
832 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
833 unsigned long pgoff;
834 struct page *page;
835
9a2bb7f4 836 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
039576c0
AK
837 if (pgoff == 0)
838 page = virt_to_page(vcpu->run);
839 else if (pgoff == KVM_PIO_PAGE_OFFSET)
840 page = virt_to_page(vcpu->pio_data);
841 else
9a2bb7f4 842 return NOPAGE_SIGBUS;
9a2bb7f4 843 get_page(page);
cd0d9137
NAQ
844 if (type != NULL)
845 *type = VM_FAULT_MINOR;
846
9a2bb7f4
AK
847 return page;
848}
849
850static struct vm_operations_struct kvm_vcpu_vm_ops = {
851 .nopage = kvm_vcpu_nopage,
852};
853
854static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
855{
856 vma->vm_ops = &kvm_vcpu_vm_ops;
857 return 0;
858}
859
bccf2150
AK
860static int kvm_vcpu_release(struct inode *inode, struct file *filp)
861{
862 struct kvm_vcpu *vcpu = filp->private_data;
863
864 fput(vcpu->kvm->filp);
865 return 0;
866}
867
868static struct file_operations kvm_vcpu_fops = {
869 .release = kvm_vcpu_release,
870 .unlocked_ioctl = kvm_vcpu_ioctl,
871 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 872 .mmap = kvm_vcpu_mmap,
bccf2150
AK
873};
874
875/*
876 * Allocates an inode for the vcpu.
877 */
878static int create_vcpu_fd(struct kvm_vcpu *vcpu)
879{
880 int fd, r;
881 struct inode *inode;
882 struct file *file;
883
d6d28168
AK
884 r = anon_inode_getfd(&fd, &inode, &file,
885 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
886 if (r)
887 return r;
bccf2150 888 atomic_inc(&vcpu->kvm->filp->f_count);
bccf2150 889 return fd;
bccf2150
AK
890}
891
c5ea7660
AK
892/*
893 * Creates some virtual cpus. Good luck creating more than one.
894 */
895static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
896{
897 int r;
898 struct kvm_vcpu *vcpu;
899
c5ea7660 900 if (!valid_vcpu(n))
fb3f0f51 901 return -EINVAL;
c5ea7660 902
cbdd1bea 903 vcpu = kvm_x86_ops->vcpu_create(kvm, n);
fb3f0f51
RR
904 if (IS_ERR(vcpu))
905 return PTR_ERR(vcpu);
c5ea7660 906
15ad7146
AK
907 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
908
b114b080
RR
909 /* We do fxsave: this must be aligned. */
910 BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
911
fb3f0f51 912 vcpu_load(vcpu);
e00c8cf2
AK
913 r = kvm_x86_ops->vcpu_reset(vcpu);
914 if (r == 0)
915 r = kvm_mmu_setup(vcpu);
c5ea7660 916 vcpu_put(vcpu);
c5ea7660 917 if (r < 0)
fb3f0f51
RR
918 goto free_vcpu;
919
11ec2804 920 mutex_lock(&kvm->lock);
fb3f0f51
RR
921 if (kvm->vcpus[n]) {
922 r = -EEXIST;
11ec2804 923 mutex_unlock(&kvm->lock);
fb3f0f51
RR
924 goto mmu_unload;
925 }
926 kvm->vcpus[n] = vcpu;
11ec2804 927 mutex_unlock(&kvm->lock);
c5ea7660 928
fb3f0f51 929 /* Now it's all set up, let userspace reach it */
bccf2150
AK
930 r = create_vcpu_fd(vcpu);
931 if (r < 0)
fb3f0f51
RR
932 goto unlink;
933 return r;
39c3b86e 934
fb3f0f51 935unlink:
11ec2804 936 mutex_lock(&kvm->lock);
fb3f0f51 937 kvm->vcpus[n] = NULL;
11ec2804 938 mutex_unlock(&kvm->lock);
a2fa3e9f 939
fb3f0f51
RR
940mmu_unload:
941 vcpu_load(vcpu);
942 kvm_mmu_unload(vcpu);
943 vcpu_put(vcpu);
c5ea7660 944
fb3f0f51 945free_vcpu:
cbdd1bea 946 kvm_x86_ops->vcpu_free(vcpu);
c5ea7660
AK
947 return r;
948}
949
1961d276
AK
950static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
951{
952 if (sigset) {
953 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
954 vcpu->sigset_active = 1;
955 vcpu->sigset = *sigset;
956 } else
957 vcpu->sigset_active = 0;
958 return 0;
959}
960
bccf2150
AK
961static long kvm_vcpu_ioctl(struct file *filp,
962 unsigned int ioctl, unsigned long arg)
6aa8b732 963{
bccf2150 964 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 965 void __user *argp = (void __user *)arg;
313a3dc7 966 int r;
6aa8b732
AK
967
968 switch (ioctl) {
9a2bb7f4 969 case KVM_RUN:
f0fe5108
AK
970 r = -EINVAL;
971 if (arg)
972 goto out;
b6c7a5dc 973 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 974 break;
6aa8b732
AK
975 case KVM_GET_REGS: {
976 struct kvm_regs kvm_regs;
977
bccf2150 978 memset(&kvm_regs, 0, sizeof kvm_regs);
b6c7a5dc 979 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
6aa8b732
AK
980 if (r)
981 goto out;
982 r = -EFAULT;
2f366987 983 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
6aa8b732
AK
984 goto out;
985 r = 0;
986 break;
987 }
988 case KVM_SET_REGS: {
989 struct kvm_regs kvm_regs;
990
991 r = -EFAULT;
2f366987 992 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732 993 goto out;
b6c7a5dc 994 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
6aa8b732
AK
995 if (r)
996 goto out;
997 r = 0;
998 break;
999 }
1000 case KVM_GET_SREGS: {
1001 struct kvm_sregs kvm_sregs;
1002
bccf2150 1003 memset(&kvm_sregs, 0, sizeof kvm_sregs);
b6c7a5dc 1004 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
1005 if (r)
1006 goto out;
1007 r = -EFAULT;
2f366987 1008 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
1009 goto out;
1010 r = 0;
1011 break;
1012 }
1013 case KVM_SET_SREGS: {
1014 struct kvm_sregs kvm_sregs;
1015
1016 r = -EFAULT;
2f366987 1017 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 1018 goto out;
b6c7a5dc 1019 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
1020 if (r)
1021 goto out;
1022 r = 0;
1023 break;
1024 }
1025 case KVM_TRANSLATE: {
1026 struct kvm_translation tr;
1027
1028 r = -EFAULT;
2f366987 1029 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 1030 goto out;
bccf2150 1031 r = kvm_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
1032 if (r)
1033 goto out;
1034 r = -EFAULT;
2f366987 1035 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
1036 goto out;
1037 r = 0;
1038 break;
1039 }
1040 case KVM_INTERRUPT: {
1041 struct kvm_interrupt irq;
1042
1043 r = -EFAULT;
2f366987 1044 if (copy_from_user(&irq, argp, sizeof irq))
6aa8b732 1045 goto out;
bccf2150 1046 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
6aa8b732
AK
1047 if (r)
1048 goto out;
1049 r = 0;
1050 break;
1051 }
1052 case KVM_DEBUG_GUEST: {
1053 struct kvm_debug_guest dbg;
1054
1055 r = -EFAULT;
2f366987 1056 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 1057 goto out;
b6c7a5dc 1058 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
1059 if (r)
1060 goto out;
1061 r = 0;
1062 break;
1063 }
1961d276
AK
1064 case KVM_SET_SIGNAL_MASK: {
1065 struct kvm_signal_mask __user *sigmask_arg = argp;
1066 struct kvm_signal_mask kvm_sigmask;
1067 sigset_t sigset, *p;
1068
1069 p = NULL;
1070 if (argp) {
1071 r = -EFAULT;
1072 if (copy_from_user(&kvm_sigmask, argp,
1073 sizeof kvm_sigmask))
1074 goto out;
1075 r = -EINVAL;
1076 if (kvm_sigmask.len != sizeof sigset)
1077 goto out;
1078 r = -EFAULT;
1079 if (copy_from_user(&sigset, sigmask_arg->sigset,
1080 sizeof sigset))
1081 goto out;
1082 p = &sigset;
1083 }
1084 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1085 break;
1086 }
b8836737
AK
1087 case KVM_GET_FPU: {
1088 struct kvm_fpu fpu;
1089
1090 memset(&fpu, 0, sizeof fpu);
d0752060 1091 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
b8836737
AK
1092 if (r)
1093 goto out;
1094 r = -EFAULT;
1095 if (copy_to_user(argp, &fpu, sizeof fpu))
1096 goto out;
1097 r = 0;
1098 break;
1099 }
1100 case KVM_SET_FPU: {
1101 struct kvm_fpu fpu;
1102
1103 r = -EFAULT;
1104 if (copy_from_user(&fpu, argp, sizeof fpu))
1105 goto out;
d0752060 1106 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
b8836737
AK
1107 if (r)
1108 goto out;
1109 r = 0;
1110 break;
1111 }
bccf2150 1112 default:
313a3dc7 1113 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
1114 }
1115out:
1116 return r;
1117}
1118
1119static long kvm_vm_ioctl(struct file *filp,
1120 unsigned int ioctl, unsigned long arg)
1121{
1122 struct kvm *kvm = filp->private_data;
1123 void __user *argp = (void __user *)arg;
1fe779f8 1124 int r;
bccf2150
AK
1125
1126 switch (ioctl) {
1127 case KVM_CREATE_VCPU:
1128 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1129 if (r < 0)
1130 goto out;
1131 break;
6fc138d2
IE
1132 case KVM_SET_USER_MEMORY_REGION: {
1133 struct kvm_userspace_memory_region kvm_userspace_mem;
1134
1135 r = -EFAULT;
1136 if (copy_from_user(&kvm_userspace_mem, argp,
1137 sizeof kvm_userspace_mem))
1138 goto out;
1139
1140 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1141 if (r)
1142 goto out;
1143 break;
1144 }
1145 case KVM_GET_DIRTY_LOG: {
1146 struct kvm_dirty_log log;
1147
1148 r = -EFAULT;
2f366987 1149 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1150 goto out;
2c6f5df9 1151 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1152 if (r)
1153 goto out;
1154 break;
1155 }
f17abe9a 1156 default:
1fe779f8 1157 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
1158 }
1159out:
1160 return r;
1161}
1162
1163static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
1164 unsigned long address,
1165 int *type)
1166{
1167 struct kvm *kvm = vma->vm_file->private_data;
1168 unsigned long pgoff;
f17abe9a
AK
1169 struct page *page;
1170
f17abe9a 1171 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
e0d62c7f
IE
1172 if (!kvm_is_visible_gfn(kvm, pgoff))
1173 return NOPAGE_SIGBUS;
aab61cc0
AL
1174 /* current->mm->mmap_sem is already held so call lockless version */
1175 page = __gfn_to_page(kvm, pgoff);
8a7ae055
IE
1176 if (is_error_page(page)) {
1177 kvm_release_page(page);
f17abe9a 1178 return NOPAGE_SIGBUS;
8a7ae055 1179 }
cd0d9137
NAQ
1180 if (type != NULL)
1181 *type = VM_FAULT_MINOR;
1182
f17abe9a
AK
1183 return page;
1184}
1185
1186static struct vm_operations_struct kvm_vm_vm_ops = {
1187 .nopage = kvm_vm_nopage,
1188};
1189
1190static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1191{
1192 vma->vm_ops = &kvm_vm_vm_ops;
1193 return 0;
1194}
1195
1196static struct file_operations kvm_vm_fops = {
1197 .release = kvm_vm_release,
1198 .unlocked_ioctl = kvm_vm_ioctl,
1199 .compat_ioctl = kvm_vm_ioctl,
1200 .mmap = kvm_vm_mmap,
1201};
1202
1203static int kvm_dev_ioctl_create_vm(void)
1204{
1205 int fd, r;
1206 struct inode *inode;
1207 struct file *file;
1208 struct kvm *kvm;
1209
f17abe9a 1210 kvm = kvm_create_vm();
d6d28168
AK
1211 if (IS_ERR(kvm))
1212 return PTR_ERR(kvm);
1213 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
1214 if (r) {
1215 kvm_destroy_vm(kvm);
1216 return r;
f17abe9a
AK
1217 }
1218
bccf2150 1219 kvm->filp = file;
f17abe9a 1220
f17abe9a 1221 return fd;
f17abe9a
AK
1222}
1223
1224static long kvm_dev_ioctl(struct file *filp,
1225 unsigned int ioctl, unsigned long arg)
1226{
1227 void __user *argp = (void __user *)arg;
07c45a36 1228 long r = -EINVAL;
f17abe9a
AK
1229
1230 switch (ioctl) {
1231 case KVM_GET_API_VERSION:
f0fe5108
AK
1232 r = -EINVAL;
1233 if (arg)
1234 goto out;
f17abe9a
AK
1235 r = KVM_API_VERSION;
1236 break;
1237 case KVM_CREATE_VM:
f0fe5108
AK
1238 r = -EINVAL;
1239 if (arg)
1240 goto out;
f17abe9a
AK
1241 r = kvm_dev_ioctl_create_vm();
1242 break;
85f455f7
ED
1243 case KVM_CHECK_EXTENSION: {
1244 int ext = (long)argp;
1245
1246 switch (ext) {
1247 case KVM_CAP_IRQCHIP:
b6958ce4 1248 case KVM_CAP_HLT:
82ce2c96 1249 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
6fc138d2 1250 case KVM_CAP_USER_MEMORY:
cbc94022 1251 case KVM_CAP_SET_TSS_ADDR:
85f455f7
ED
1252 r = 1;
1253 break;
1254 default:
1255 r = 0;
1256 break;
1257 }
5d308f45 1258 break;
85f455f7 1259 }
07c45a36
AK
1260 case KVM_GET_VCPU_MMAP_SIZE:
1261 r = -EINVAL;
1262 if (arg)
1263 goto out;
039576c0 1264 r = 2 * PAGE_SIZE;
07c45a36 1265 break;
6aa8b732 1266 default:
043405e1 1267 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1268 }
1269out:
1270 return r;
1271}
1272
6aa8b732 1273static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1274 .unlocked_ioctl = kvm_dev_ioctl,
1275 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1276};
1277
1278static struct miscdevice kvm_dev = {
bbe4432e 1279 KVM_MINOR,
6aa8b732
AK
1280 "kvm",
1281 &kvm_chardev_ops,
1282};
1283
774c47f1
AK
1284/*
1285 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
1286 * cached on it.
1287 */
1288static void decache_vcpus_on_cpu(int cpu)
1289{
1290 struct kvm *vm;
1291 struct kvm_vcpu *vcpu;
1292 int i;
1293
1294 spin_lock(&kvm_lock);
11ec2804 1295 list_for_each_entry(vm, &vm_list, vm_list)
774c47f1 1296 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
1297 vcpu = vm->vcpus[i];
1298 if (!vcpu)
1299 continue;
774c47f1
AK
1300 /*
1301 * If the vcpu is locked, then it is running on some
1302 * other cpu and therefore it is not cached on the
1303 * cpu in question.
1304 *
1305 * If it's not locked, check the last cpu it executed
1306 * on.
1307 */
1308 if (mutex_trylock(&vcpu->mutex)) {
1309 if (vcpu->cpu == cpu) {
cbdd1bea 1310 kvm_x86_ops->vcpu_decache(vcpu);
774c47f1
AK
1311 vcpu->cpu = -1;
1312 }
1313 mutex_unlock(&vcpu->mutex);
1314 }
1315 }
1316 spin_unlock(&kvm_lock);
1317}
1318
1b6c0168
AK
1319static void hardware_enable(void *junk)
1320{
1321 int cpu = raw_smp_processor_id();
1322
1323 if (cpu_isset(cpu, cpus_hardware_enabled))
1324 return;
1325 cpu_set(cpu, cpus_hardware_enabled);
cbdd1bea 1326 kvm_x86_ops->hardware_enable(NULL);
1b6c0168
AK
1327}
1328
1329static void hardware_disable(void *junk)
1330{
1331 int cpu = raw_smp_processor_id();
1332
1333 if (!cpu_isset(cpu, cpus_hardware_enabled))
1334 return;
1335 cpu_clear(cpu, cpus_hardware_enabled);
1336 decache_vcpus_on_cpu(cpu);
cbdd1bea 1337 kvm_x86_ops->hardware_disable(NULL);
1b6c0168
AK
1338}
1339
774c47f1
AK
1340static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1341 void *v)
1342{
1343 int cpu = (long)v;
1344
1a6f4d7f 1345 val &= ~CPU_TASKS_FROZEN;
774c47f1 1346 switch (val) {
cec9ad27 1347 case CPU_DYING:
6ec8a856
AK
1348 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1349 cpu);
1350 hardware_disable(NULL);
1351 break;
774c47f1 1352 case CPU_UP_CANCELED:
43934a38
JK
1353 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1354 cpu);
1b6c0168 1355 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
774c47f1 1356 break;
43934a38
JK
1357 case CPU_ONLINE:
1358 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1359 cpu);
1b6c0168 1360 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
774c47f1
AK
1361 break;
1362 }
1363 return NOTIFY_OK;
1364}
1365
9a2b85c6 1366static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1367 void *v)
9a2b85c6
RR
1368{
1369 if (val == SYS_RESTART) {
1370 /*
1371 * Some (well, at least mine) BIOSes hang on reboot if
1372 * in vmx root mode.
1373 */
1374 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1375 on_each_cpu(hardware_disable, NULL, 0, 1);
1376 }
1377 return NOTIFY_OK;
1378}
1379
1380static struct notifier_block kvm_reboot_notifier = {
1381 .notifier_call = kvm_reboot,
1382 .priority = 0,
1383};
1384
2eeb2e94
GH
1385void kvm_io_bus_init(struct kvm_io_bus *bus)
1386{
1387 memset(bus, 0, sizeof(*bus));
1388}
1389
1390void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1391{
1392 int i;
1393
1394 for (i = 0; i < bus->dev_count; i++) {
1395 struct kvm_io_device *pos = bus->devs[i];
1396
1397 kvm_iodevice_destructor(pos);
1398 }
1399}
1400
1401struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
1402{
1403 int i;
1404
1405 for (i = 0; i < bus->dev_count; i++) {
1406 struct kvm_io_device *pos = bus->devs[i];
1407
1408 if (pos->in_range(pos, addr))
1409 return pos;
1410 }
1411
1412 return NULL;
1413}
1414
1415void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1416{
1417 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1418
1419 bus->devs[bus->dev_count++] = dev;
1420}
1421
774c47f1
AK
1422static struct notifier_block kvm_cpu_notifier = {
1423 .notifier_call = kvm_cpu_hotplug,
1424 .priority = 20, /* must be > scheduler priority */
1425};
1426
1165f5fe
AK
1427static u64 stat_get(void *_offset)
1428{
1429 unsigned offset = (long)_offset;
1430 u64 total = 0;
1431 struct kvm *kvm;
1432 struct kvm_vcpu *vcpu;
1433 int i;
1434
1435 spin_lock(&kvm_lock);
1436 list_for_each_entry(kvm, &vm_list, vm_list)
1437 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
1438 vcpu = kvm->vcpus[i];
1439 if (vcpu)
1440 total += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
1441 }
1442 spin_unlock(&kvm_lock);
1443 return total;
1444}
1445
3dea7ca7 1446DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, NULL, "%llu\n");
1165f5fe 1447
6aa8b732
AK
1448static __init void kvm_init_debug(void)
1449{
1450 struct kvm_stats_debugfs_item *p;
1451
8b6d44c7 1452 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1453 for (p = debugfs_entries; p->name; ++p)
1165f5fe
AK
1454 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
1455 (void *)(long)p->offset,
1456 &stat_fops);
6aa8b732
AK
1457}
1458
1459static void kvm_exit_debug(void)
1460{
1461 struct kvm_stats_debugfs_item *p;
1462
1463 for (p = debugfs_entries; p->name; ++p)
1464 debugfs_remove(p->dentry);
1465 debugfs_remove(debugfs_dir);
1466}
1467
59ae6c6b
AK
1468static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1469{
4267c41a 1470 hardware_disable(NULL);
59ae6c6b
AK
1471 return 0;
1472}
1473
1474static int kvm_resume(struct sys_device *dev)
1475{
4267c41a 1476 hardware_enable(NULL);
59ae6c6b
AK
1477 return 0;
1478}
1479
1480static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 1481 .name = "kvm",
59ae6c6b
AK
1482 .suspend = kvm_suspend,
1483 .resume = kvm_resume,
1484};
1485
1486static struct sys_device kvm_sysdev = {
1487 .id = 0,
1488 .cls = &kvm_sysdev_class,
1489};
1490
cea7bb21 1491struct page *bad_page;
6aa8b732 1492
15ad7146
AK
1493static inline
1494struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1495{
1496 return container_of(pn, struct kvm_vcpu, preempt_notifier);
1497}
1498
1499static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1500{
1501 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1502
cbdd1bea 1503 kvm_x86_ops->vcpu_load(vcpu, cpu);
15ad7146
AK
1504}
1505
1506static void kvm_sched_out(struct preempt_notifier *pn,
1507 struct task_struct *next)
1508{
1509 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1510
cbdd1bea 1511 kvm_x86_ops->vcpu_put(vcpu);
15ad7146
AK
1512}
1513
cbdd1bea 1514int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
c16f862d 1515 struct module *module)
6aa8b732
AK
1516{
1517 int r;
002c7f7c 1518 int cpu;
6aa8b732 1519
cbdd1bea 1520 if (kvm_x86_ops) {
09db28b8
YI
1521 printk(KERN_ERR "kvm: already loaded the other module\n");
1522 return -EEXIST;
1523 }
1524
e097f35c 1525 if (!ops->cpu_has_kvm_support()) {
6aa8b732
AK
1526 printk(KERN_ERR "kvm: no hardware support\n");
1527 return -EOPNOTSUPP;
1528 }
e097f35c 1529 if (ops->disabled_by_bios()) {
6aa8b732
AK
1530 printk(KERN_ERR "kvm: disabled by bios\n");
1531 return -EOPNOTSUPP;
1532 }
1533
cbdd1bea 1534 kvm_x86_ops = ops;
e097f35c 1535
cbdd1bea 1536 r = kvm_x86_ops->hardware_setup();
6aa8b732 1537 if (r < 0)
ca45aaae 1538 goto out;
6aa8b732 1539
002c7f7c
YS
1540 for_each_online_cpu(cpu) {
1541 smp_call_function_single(cpu,
cbdd1bea 1542 kvm_x86_ops->check_processor_compatibility,
002c7f7c
YS
1543 &r, 0, 1);
1544 if (r < 0)
1545 goto out_free_0;
1546 }
1547
1b6c0168 1548 on_each_cpu(hardware_enable, NULL, 0, 1);
774c47f1
AK
1549 r = register_cpu_notifier(&kvm_cpu_notifier);
1550 if (r)
1551 goto out_free_1;
6aa8b732
AK
1552 register_reboot_notifier(&kvm_reboot_notifier);
1553
59ae6c6b
AK
1554 r = sysdev_class_register(&kvm_sysdev_class);
1555 if (r)
1556 goto out_free_2;
1557
1558 r = sysdev_register(&kvm_sysdev);
1559 if (r)
1560 goto out_free_3;
1561
c16f862d
RR
1562 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1563 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
1564 __alignof__(struct kvm_vcpu), 0, 0);
1565 if (!kvm_vcpu_cache) {
1566 r = -ENOMEM;
1567 goto out_free_4;
1568 }
1569
6aa8b732
AK
1570 kvm_chardev_ops.owner = module;
1571
1572 r = misc_register(&kvm_dev);
1573 if (r) {
d77c26fc 1574 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
1575 goto out_free;
1576 }
1577
15ad7146
AK
1578 kvm_preempt_ops.sched_in = kvm_sched_in;
1579 kvm_preempt_ops.sched_out = kvm_sched_out;
1580
c7addb90
AK
1581 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
1582
1583 return 0;
6aa8b732
AK
1584
1585out_free:
c16f862d
RR
1586 kmem_cache_destroy(kvm_vcpu_cache);
1587out_free_4:
59ae6c6b
AK
1588 sysdev_unregister(&kvm_sysdev);
1589out_free_3:
1590 sysdev_class_unregister(&kvm_sysdev_class);
1591out_free_2:
6aa8b732 1592 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1
AK
1593 unregister_cpu_notifier(&kvm_cpu_notifier);
1594out_free_1:
1b6c0168 1595 on_each_cpu(hardware_disable, NULL, 0, 1);
002c7f7c 1596out_free_0:
cbdd1bea 1597 kvm_x86_ops->hardware_unsetup();
ca45aaae 1598out:
cbdd1bea 1599 kvm_x86_ops = NULL;
6aa8b732
AK
1600 return r;
1601}
d77c26fc 1602EXPORT_SYMBOL_GPL(kvm_init_x86);
6aa8b732 1603
cbdd1bea 1604void kvm_exit_x86(void)
6aa8b732
AK
1605{
1606 misc_deregister(&kvm_dev);
c16f862d 1607 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
1608 sysdev_unregister(&kvm_sysdev);
1609 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 1610 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 1611 unregister_cpu_notifier(&kvm_cpu_notifier);
1b6c0168 1612 on_each_cpu(hardware_disable, NULL, 0, 1);
cbdd1bea
CE
1613 kvm_x86_ops->hardware_unsetup();
1614 kvm_x86_ops = NULL;
6aa8b732 1615}
d77c26fc 1616EXPORT_SYMBOL_GPL(kvm_exit_x86);
6aa8b732
AK
1617
1618static __init int kvm_init(void)
1619{
37e29d90
AK
1620 int r;
1621
b5a33a75
AK
1622 r = kvm_mmu_module_init();
1623 if (r)
1624 goto out4;
1625
6aa8b732
AK
1626 kvm_init_debug();
1627
043405e1 1628 kvm_arch_init();
bf591b24 1629
cea7bb21 1630 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
d77c26fc
MD
1631
1632 if (bad_page == NULL) {
6aa8b732
AK
1633 r = -ENOMEM;
1634 goto out;
1635 }
1636
58e690e6 1637 return 0;
6aa8b732
AK
1638
1639out:
1640 kvm_exit_debug();
b5a33a75
AK
1641 kvm_mmu_module_exit();
1642out4:
6aa8b732
AK
1643 return r;
1644}
1645
1646static __exit void kvm_exit(void)
1647{
1648 kvm_exit_debug();
cea7bb21 1649 __free_page(bad_page);
b5a33a75 1650 kvm_mmu_module_exit();
6aa8b732
AK
1651}
1652
1653module_init(kvm_init)
1654module_exit(kvm_exit)
This page took 0.405383 seconds and 5 git commands to generate.