KVM: Make coalesced mmio use a device per zone
[deliverable/linux.git] / include / linux / kvm_host.h
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3
4 /*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/preempt.h>
18 #include <linux/msi.h>
19 #include <linux/slab.h>
20 #include <linux/rcupdate.h>
21 #include <asm/signal.h>
22
23 #include <linux/kvm.h>
24 #include <linux/kvm_para.h>
25
26 #include <linux/kvm_types.h>
27
28 #include <asm/kvm_host.h>
29
30 #ifndef KVM_MMIO_SIZE
31 #define KVM_MMIO_SIZE 8
32 #endif
33
34 /*
35 * vcpu->requests bit members
36 */
37 #define KVM_REQ_TLB_FLUSH 0
38 #define KVM_REQ_MIGRATE_TIMER 1
39 #define KVM_REQ_REPORT_TPR_ACCESS 2
40 #define KVM_REQ_MMU_RELOAD 3
41 #define KVM_REQ_TRIPLE_FAULT 4
42 #define KVM_REQ_PENDING_TIMER 5
43 #define KVM_REQ_UNHALT 6
44 #define KVM_REQ_MMU_SYNC 7
45 #define KVM_REQ_CLOCK_UPDATE 8
46 #define KVM_REQ_KICK 9
47 #define KVM_REQ_DEACTIVATE_FPU 10
48 #define KVM_REQ_EVENT 11
49 #define KVM_REQ_APF_HALT 12
50 #define KVM_REQ_STEAL_UPDATE 13
51
52 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
53
54 struct kvm;
55 struct kvm_vcpu;
56 extern struct kmem_cache *kvm_vcpu_cache;
57
58 /*
59 * It would be nice to use something smarter than a linear search, TBD...
60 * Thankfully we dont expect many devices to register (famous last words :),
61 * so until then it will suffice. At least its abstracted so we can change
62 * in one place.
63 */
64 struct kvm_io_bus {
65 int dev_count;
66 #define NR_IOBUS_DEVS 300
67 struct kvm_io_device *devs[NR_IOBUS_DEVS];
68 };
69
70 enum kvm_bus {
71 KVM_MMIO_BUS,
72 KVM_PIO_BUS,
73 KVM_NR_BUSES
74 };
75
76 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
77 int len, const void *val);
78 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
79 void *val);
80 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
81 struct kvm_io_device *dev);
82 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
83 struct kvm_io_device *dev);
84
85 #ifdef CONFIG_KVM_ASYNC_PF
86 struct kvm_async_pf {
87 struct work_struct work;
88 struct list_head link;
89 struct list_head queue;
90 struct kvm_vcpu *vcpu;
91 struct mm_struct *mm;
92 gva_t gva;
93 unsigned long addr;
94 struct kvm_arch_async_pf arch;
95 struct page *page;
96 bool done;
97 };
98
99 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
100 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
101 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
102 struct kvm_arch_async_pf *arch);
103 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
104 #endif
105
106 enum {
107 OUTSIDE_GUEST_MODE,
108 IN_GUEST_MODE,
109 EXITING_GUEST_MODE
110 };
111
112 struct kvm_vcpu {
113 struct kvm *kvm;
114 #ifdef CONFIG_PREEMPT_NOTIFIERS
115 struct preempt_notifier preempt_notifier;
116 #endif
117 int cpu;
118 int vcpu_id;
119 int srcu_idx;
120 int mode;
121 unsigned long requests;
122 unsigned long guest_debug;
123
124 struct mutex mutex;
125 struct kvm_run *run;
126
127 int fpu_active;
128 int guest_fpu_loaded, guest_xcr0_loaded;
129 wait_queue_head_t wq;
130 struct pid *pid;
131 int sigset_active;
132 sigset_t sigset;
133 struct kvm_vcpu_stat stat;
134
135 #ifdef CONFIG_HAS_IOMEM
136 int mmio_needed;
137 int mmio_read_completed;
138 int mmio_is_write;
139 int mmio_size;
140 int mmio_index;
141 unsigned char mmio_data[KVM_MMIO_SIZE];
142 gpa_t mmio_phys_addr;
143 #endif
144
145 #ifdef CONFIG_KVM_ASYNC_PF
146 struct {
147 u32 queued;
148 struct list_head queue;
149 struct list_head done;
150 spinlock_t lock;
151 } async_pf;
152 #endif
153
154 struct kvm_vcpu_arch arch;
155 };
156
157 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
158 {
159 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
160 }
161
162 /*
163 * Some of the bitops functions do not support too long bitmaps.
164 * This number must be determined not to exceed such limits.
165 */
166 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
167
168 struct kvm_lpage_info {
169 unsigned long rmap_pde;
170 int write_count;
171 };
172
173 struct kvm_memory_slot {
174 gfn_t base_gfn;
175 unsigned long npages;
176 unsigned long flags;
177 unsigned long *rmap;
178 unsigned long *dirty_bitmap;
179 unsigned long *dirty_bitmap_head;
180 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
181 unsigned long userspace_addr;
182 int user_alloc;
183 int id;
184 };
185
186 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
187 {
188 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
189 }
190
191 struct kvm_kernel_irq_routing_entry {
192 u32 gsi;
193 u32 type;
194 int (*set)(struct kvm_kernel_irq_routing_entry *e,
195 struct kvm *kvm, int irq_source_id, int level);
196 union {
197 struct {
198 unsigned irqchip;
199 unsigned pin;
200 } irqchip;
201 struct msi_msg msi;
202 };
203 struct hlist_node link;
204 };
205
206 #ifdef __KVM_HAVE_IOAPIC
207
208 struct kvm_irq_routing_table {
209 int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
210 struct kvm_kernel_irq_routing_entry *rt_entries;
211 u32 nr_rt_entries;
212 /*
213 * Array indexed by gsi. Each entry contains list of irq chips
214 * the gsi is connected to.
215 */
216 struct hlist_head map[0];
217 };
218
219 #else
220
221 struct kvm_irq_routing_table {};
222
223 #endif
224
225 struct kvm_memslots {
226 int nmemslots;
227 u64 generation;
228 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
229 KVM_PRIVATE_MEM_SLOTS];
230 };
231
232 struct kvm {
233 spinlock_t mmu_lock;
234 struct mutex slots_lock;
235 struct mm_struct *mm; /* userspace tied to this vm */
236 struct kvm_memslots *memslots;
237 struct srcu_struct srcu;
238 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
239 u32 bsp_vcpu_id;
240 struct kvm_vcpu *bsp_vcpu;
241 #endif
242 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
243 atomic_t online_vcpus;
244 int last_boosted_vcpu;
245 struct list_head vm_list;
246 struct mutex lock;
247 struct kvm_io_bus *buses[KVM_NR_BUSES];
248 #ifdef CONFIG_HAVE_KVM_EVENTFD
249 struct {
250 spinlock_t lock;
251 struct list_head items;
252 } irqfds;
253 struct list_head ioeventfds;
254 #endif
255 struct kvm_vm_stat stat;
256 struct kvm_arch arch;
257 atomic_t users_count;
258 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
259 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
260 spinlock_t ring_lock;
261 struct list_head coalesced_zones;
262 #endif
263
264 struct mutex irq_lock;
265 #ifdef CONFIG_HAVE_KVM_IRQCHIP
266 /*
267 * Update side is protected by irq_lock and,
268 * if configured, irqfds.lock.
269 */
270 struct kvm_irq_routing_table __rcu *irq_routing;
271 struct hlist_head mask_notifier_list;
272 struct hlist_head irq_ack_notifier_list;
273 #endif
274
275 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
276 struct mmu_notifier mmu_notifier;
277 unsigned long mmu_notifier_seq;
278 long mmu_notifier_count;
279 #endif
280 long tlbs_dirty;
281 };
282
283 /* The guest did something we don't support. */
284 #define pr_unimpl(vcpu, fmt, ...) \
285 do { \
286 if (printk_ratelimit()) \
287 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
288 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
289 } while (0)
290
291 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
292 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
293
294 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
295 {
296 smp_rmb();
297 return kvm->vcpus[i];
298 }
299
300 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
301 for (idx = 0; \
302 idx < atomic_read(&kvm->online_vcpus) && \
303 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
304 idx++)
305
306 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
307 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
308
309 void vcpu_load(struct kvm_vcpu *vcpu);
310 void vcpu_put(struct kvm_vcpu *vcpu);
311
312 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
313 struct module *module);
314 void kvm_exit(void);
315
316 void kvm_get_kvm(struct kvm *kvm);
317 void kvm_put_kvm(struct kvm *kvm);
318
319 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
320 {
321 return rcu_dereference_check(kvm->memslots,
322 srcu_read_lock_held(&kvm->srcu)
323 || lockdep_is_held(&kvm->slots_lock));
324 }
325
326 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
327 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
328 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
329
330 extern struct page *bad_page;
331 extern struct page *fault_page;
332
333 extern pfn_t bad_pfn;
334 extern pfn_t fault_pfn;
335
336 int is_error_page(struct page *page);
337 int is_error_pfn(pfn_t pfn);
338 int is_hwpoison_pfn(pfn_t pfn);
339 int is_fault_pfn(pfn_t pfn);
340 int is_noslot_pfn(pfn_t pfn);
341 int is_invalid_pfn(pfn_t pfn);
342 int kvm_is_error_hva(unsigned long addr);
343 int kvm_set_memory_region(struct kvm *kvm,
344 struct kvm_userspace_memory_region *mem,
345 int user_alloc);
346 int __kvm_set_memory_region(struct kvm *kvm,
347 struct kvm_userspace_memory_region *mem,
348 int user_alloc);
349 int kvm_arch_prepare_memory_region(struct kvm *kvm,
350 struct kvm_memory_slot *memslot,
351 struct kvm_memory_slot old,
352 struct kvm_userspace_memory_region *mem,
353 int user_alloc);
354 void kvm_arch_commit_memory_region(struct kvm *kvm,
355 struct kvm_userspace_memory_region *mem,
356 struct kvm_memory_slot old,
357 int user_alloc);
358 void kvm_disable_largepages(void);
359 void kvm_arch_flush_shadow(struct kvm *kvm);
360
361 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
362 int nr_pages);
363
364 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
365 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
366 void kvm_release_page_clean(struct page *page);
367 void kvm_release_page_dirty(struct page *page);
368 void kvm_set_page_dirty(struct page *page);
369 void kvm_set_page_accessed(struct page *page);
370
371 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
372 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
373 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
374 bool write_fault, bool *writable);
375 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
376 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
377 bool *writable);
378 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
379 struct kvm_memory_slot *slot, gfn_t gfn);
380 void kvm_release_pfn_dirty(pfn_t);
381 void kvm_release_pfn_clean(pfn_t pfn);
382 void kvm_set_pfn_dirty(pfn_t pfn);
383 void kvm_set_pfn_accessed(pfn_t pfn);
384 void kvm_get_pfn(pfn_t pfn);
385
386 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
387 int len);
388 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
389 unsigned long len);
390 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
391 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
392 void *data, unsigned long len);
393 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
394 int offset, int len);
395 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
396 unsigned long len);
397 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
398 void *data, unsigned long len);
399 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
400 gpa_t gpa);
401 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
402 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
403 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
404 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
405 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
406 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
407 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
408 gfn_t gfn);
409
410 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
411 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
412 void kvm_resched(struct kvm_vcpu *vcpu);
413 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
414 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
415
416 void kvm_flush_remote_tlbs(struct kvm *kvm);
417 void kvm_reload_remote_mmus(struct kvm *kvm);
418
419 long kvm_arch_dev_ioctl(struct file *filp,
420 unsigned int ioctl, unsigned long arg);
421 long kvm_arch_vcpu_ioctl(struct file *filp,
422 unsigned int ioctl, unsigned long arg);
423
424 int kvm_dev_ioctl_check_extension(long ext);
425
426 int kvm_get_dirty_log(struct kvm *kvm,
427 struct kvm_dirty_log *log, int *is_dirty);
428 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
429 struct kvm_dirty_log *log);
430
431 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
432 struct
433 kvm_userspace_memory_region *mem,
434 int user_alloc);
435 long kvm_arch_vm_ioctl(struct file *filp,
436 unsigned int ioctl, unsigned long arg);
437
438 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
439 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
440
441 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
442 struct kvm_translation *tr);
443
444 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
445 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
446 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
447 struct kvm_sregs *sregs);
448 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
449 struct kvm_sregs *sregs);
450 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
451 struct kvm_mp_state *mp_state);
452 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
453 struct kvm_mp_state *mp_state);
454 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
455 struct kvm_guest_debug *dbg);
456 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
457
458 int kvm_arch_init(void *opaque);
459 void kvm_arch_exit(void);
460
461 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
462 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
463
464 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
465 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
466 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
467 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
468 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
469 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
470
471 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
472 int kvm_arch_hardware_enable(void *garbage);
473 void kvm_arch_hardware_disable(void *garbage);
474 int kvm_arch_hardware_setup(void);
475 void kvm_arch_hardware_unsetup(void);
476 void kvm_arch_check_processor_compat(void *rtn);
477 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
478
479 void kvm_free_physmem(struct kvm *kvm);
480
481 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
482 static inline struct kvm *kvm_arch_alloc_vm(void)
483 {
484 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
485 }
486
487 static inline void kvm_arch_free_vm(struct kvm *kvm)
488 {
489 kfree(kvm);
490 }
491 #endif
492
493 int kvm_arch_init_vm(struct kvm *kvm);
494 void kvm_arch_destroy_vm(struct kvm *kvm);
495 void kvm_free_all_assigned_devices(struct kvm *kvm);
496 void kvm_arch_sync_events(struct kvm *kvm);
497
498 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
499 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
500
501 int kvm_is_mmio_pfn(pfn_t pfn);
502
503 struct kvm_irq_ack_notifier {
504 struct hlist_node link;
505 unsigned gsi;
506 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
507 };
508
509 struct kvm_assigned_dev_kernel {
510 struct kvm_irq_ack_notifier ack_notifier;
511 struct list_head list;
512 int assigned_dev_id;
513 int host_segnr;
514 int host_busnr;
515 int host_devfn;
516 unsigned int entries_nr;
517 int host_irq;
518 bool host_irq_disabled;
519 struct msix_entry *host_msix_entries;
520 int guest_irq;
521 struct msix_entry *guest_msix_entries;
522 unsigned long irq_requested_type;
523 int irq_source_id;
524 int flags;
525 struct pci_dev *dev;
526 struct kvm *kvm;
527 spinlock_t intx_lock;
528 char irq_name[32];
529 struct pci_saved_state *pci_saved_state;
530 };
531
532 struct kvm_irq_mask_notifier {
533 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
534 int irq;
535 struct hlist_node link;
536 };
537
538 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
539 struct kvm_irq_mask_notifier *kimn);
540 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
541 struct kvm_irq_mask_notifier *kimn);
542 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
543 bool mask);
544
545 #ifdef __KVM_HAVE_IOAPIC
546 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
547 union kvm_ioapic_redirect_entry *entry,
548 unsigned long *deliver_bitmask);
549 #endif
550 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
551 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
552 int irq_source_id, int level);
553 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
554 void kvm_register_irq_ack_notifier(struct kvm *kvm,
555 struct kvm_irq_ack_notifier *kian);
556 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
557 struct kvm_irq_ack_notifier *kian);
558 int kvm_request_irq_source_id(struct kvm *kvm);
559 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
560
561 /* For vcpu->arch.iommu_flags */
562 #define KVM_IOMMU_CACHE_COHERENCY 0x1
563
564 #ifdef CONFIG_IOMMU_API
565 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
566 int kvm_iommu_map_guest(struct kvm *kvm);
567 int kvm_iommu_unmap_guest(struct kvm *kvm);
568 int kvm_assign_device(struct kvm *kvm,
569 struct kvm_assigned_dev_kernel *assigned_dev);
570 int kvm_deassign_device(struct kvm *kvm,
571 struct kvm_assigned_dev_kernel *assigned_dev);
572 #else /* CONFIG_IOMMU_API */
573 static inline int kvm_iommu_map_pages(struct kvm *kvm,
574 struct kvm_memory_slot *slot)
575 {
576 return 0;
577 }
578
579 static inline int kvm_iommu_map_guest(struct kvm *kvm)
580 {
581 return -ENODEV;
582 }
583
584 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
585 {
586 return 0;
587 }
588
589 static inline int kvm_assign_device(struct kvm *kvm,
590 struct kvm_assigned_dev_kernel *assigned_dev)
591 {
592 return 0;
593 }
594
595 static inline int kvm_deassign_device(struct kvm *kvm,
596 struct kvm_assigned_dev_kernel *assigned_dev)
597 {
598 return 0;
599 }
600 #endif /* CONFIG_IOMMU_API */
601
602 static inline void kvm_guest_enter(void)
603 {
604 BUG_ON(preemptible());
605 account_system_vtime(current);
606 current->flags |= PF_VCPU;
607 /* KVM does not hold any references to rcu protected data when it
608 * switches CPU into a guest mode. In fact switching to a guest mode
609 * is very similar to exiting to userspase from rcu point of view. In
610 * addition CPU may stay in a guest mode for quite a long time (up to
611 * one time slice). Lets treat guest mode as quiescent state, just like
612 * we do with user-mode execution.
613 */
614 rcu_virt_note_context_switch(smp_processor_id());
615 }
616
617 static inline void kvm_guest_exit(void)
618 {
619 account_system_vtime(current);
620 current->flags &= ~PF_VCPU;
621 }
622
623 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
624 {
625 return gfn_to_memslot(kvm, gfn)->id;
626 }
627
628 static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
629 gfn_t gfn)
630 {
631 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
632 }
633
634 static inline gpa_t gfn_to_gpa(gfn_t gfn)
635 {
636 return (gpa_t)gfn << PAGE_SHIFT;
637 }
638
639 static inline gfn_t gpa_to_gfn(gpa_t gpa)
640 {
641 return (gfn_t)(gpa >> PAGE_SHIFT);
642 }
643
644 static inline hpa_t pfn_to_hpa(pfn_t pfn)
645 {
646 return (hpa_t)pfn << PAGE_SHIFT;
647 }
648
649 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
650 {
651 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
652 }
653
654 enum kvm_stat_kind {
655 KVM_STAT_VM,
656 KVM_STAT_VCPU,
657 };
658
659 struct kvm_stats_debugfs_item {
660 const char *name;
661 int offset;
662 enum kvm_stat_kind kind;
663 struct dentry *dentry;
664 };
665 extern struct kvm_stats_debugfs_item debugfs_entries[];
666 extern struct dentry *kvm_debugfs_dir;
667
668 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
669 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
670 {
671 if (unlikely(vcpu->kvm->mmu_notifier_count))
672 return 1;
673 /*
674 * Both reads happen under the mmu_lock and both values are
675 * modified under mmu_lock, so there's no need of smb_rmb()
676 * here in between, otherwise mmu_notifier_count should be
677 * read before mmu_notifier_seq, see
678 * mmu_notifier_invalidate_range_end write side.
679 */
680 if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
681 return 1;
682 return 0;
683 }
684 #endif
685
686 #ifdef CONFIG_HAVE_KVM_IRQCHIP
687
688 #define KVM_MAX_IRQ_ROUTES 1024
689
690 int kvm_setup_default_irq_routing(struct kvm *kvm);
691 int kvm_set_irq_routing(struct kvm *kvm,
692 const struct kvm_irq_routing_entry *entries,
693 unsigned nr,
694 unsigned flags);
695 void kvm_free_irq_routing(struct kvm *kvm);
696
697 #else
698
699 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
700
701 #endif
702
703 #ifdef CONFIG_HAVE_KVM_EVENTFD
704
705 void kvm_eventfd_init(struct kvm *kvm);
706 int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
707 void kvm_irqfd_release(struct kvm *kvm);
708 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
709 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
710
711 #else
712
713 static inline void kvm_eventfd_init(struct kvm *kvm) {}
714
715 static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
716 {
717 return -EINVAL;
718 }
719
720 static inline void kvm_irqfd_release(struct kvm *kvm) {}
721
722 #ifdef CONFIG_HAVE_KVM_IRQCHIP
723 static inline void kvm_irq_routing_update(struct kvm *kvm,
724 struct kvm_irq_routing_table *irq_rt)
725 {
726 rcu_assign_pointer(kvm->irq_routing, irq_rt);
727 }
728 #endif
729
730 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
731 {
732 return -ENOSYS;
733 }
734
735 #endif /* CONFIG_HAVE_KVM_EVENTFD */
736
737 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
738 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
739 {
740 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
741 }
742 #endif
743
744 #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
745
746 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
747 unsigned long arg);
748
749 #else
750
751 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
752 unsigned long arg)
753 {
754 return -ENOTTY;
755 }
756
757 #endif
758
759 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
760 {
761 set_bit(req, &vcpu->requests);
762 }
763
764 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
765 {
766 if (test_bit(req, &vcpu->requests)) {
767 clear_bit(req, &vcpu->requests);
768 return true;
769 } else {
770 return false;
771 }
772 }
773
774 #endif
775
This page took 0.047359 seconds and 5 git commands to generate.