Commit | Line | Data |
---|---|---|
edf88417 AK |
1 | #ifndef __KVM_HOST_H |
2 | #define __KVM_HOST_H | |
6aa8b732 AK |
3 | |
4 | /* | |
5 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
6 | * the COPYING file in the top-level directory. | |
7 | */ | |
8 | ||
9 | #include <linux/types.h> | |
e56a7a28 | 10 | #include <linux/hardirq.h> |
6aa8b732 AK |
11 | #include <linux/list.h> |
12 | #include <linux/mutex.h> | |
13 | #include <linux/spinlock.h> | |
06ff0d37 MR |
14 | #include <linux/signal.h> |
15 | #include <linux/sched.h> | |
6aa8b732 | 16 | #include <linux/mm.h> |
15ad7146 | 17 | #include <linux/preempt.h> |
d4c9ff2d | 18 | #include <linux/marker.h> |
0937c48d | 19 | #include <linux/msi.h> |
e8edc6e0 | 20 | #include <asm/signal.h> |
6aa8b732 | 21 | |
6aa8b732 | 22 | #include <linux/kvm.h> |
102d8325 | 23 | #include <linux/kvm_para.h> |
6aa8b732 | 24 | |
edf88417 | 25 | #include <linux/kvm_types.h> |
d77a39d9 | 26 | |
edf88417 | 27 | #include <asm/kvm_host.h> |
d657a98e | 28 | |
d9e368d6 AK |
29 | /* |
30 | * vcpu->requests bit members | |
31 | */ | |
3176bc3e | 32 | #define KVM_REQ_TLB_FLUSH 0 |
2f52d58c | 33 | #define KVM_REQ_MIGRATE_TIMER 1 |
b209749f | 34 | #define KVM_REQ_REPORT_TPR_ACCESS 2 |
2e53d63a | 35 | #define KVM_REQ_MMU_RELOAD 3 |
71c4dfaf | 36 | #define KVM_REQ_TRIPLE_FAULT 4 |
06e05645 | 37 | #define KVM_REQ_PENDING_TIMER 5 |
d7690175 | 38 | #define KVM_REQ_UNHALT 6 |
4731d4c7 | 39 | #define KVM_REQ_MMU_SYNC 7 |
c8076604 | 40 | #define KVM_REQ_KVMCLOCK_UPDATE 8 |
6aa8b732 | 41 | |
5550af4d SY |
42 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
43 | ||
6aa8b732 | 44 | struct kvm_vcpu; |
c16f862d | 45 | extern struct kmem_cache *kvm_vcpu_cache; |
6aa8b732 | 46 | |
2eeb2e94 GH |
47 | /* |
48 | * It would be nice to use something smarter than a linear search, TBD... | |
49 | * Thankfully we dont expect many devices to register (famous last words :), | |
50 | * so until then it will suffice. At least its abstracted so we can change | |
51 | * in one place. | |
52 | */ | |
53 | struct kvm_io_bus { | |
54 | int dev_count; | |
55 | #define NR_IOBUS_DEVS 6 | |
56 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; | |
57 | }; | |
58 | ||
59 | void kvm_io_bus_init(struct kvm_io_bus *bus); | |
60 | void kvm_io_bus_destroy(struct kvm_io_bus *bus); | |
92760499 LV |
61 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, |
62 | gpa_t addr, int len, int is_write); | |
2eeb2e94 GH |
63 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, |
64 | struct kvm_io_device *dev); | |
65 | ||
d17fbbf7 ZX |
66 | struct kvm_vcpu { |
67 | struct kvm *kvm; | |
31bb117e | 68 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
d17fbbf7 | 69 | struct preempt_notifier preempt_notifier; |
31bb117e | 70 | #endif |
d17fbbf7 ZX |
71 | int vcpu_id; |
72 | struct mutex mutex; | |
73 | int cpu; | |
74 | struct kvm_run *run; | |
75 | int guest_mode; | |
76 | unsigned long requests; | |
d0bfb940 | 77 | unsigned long guest_debug; |
d17fbbf7 ZX |
78 | int fpu_active; |
79 | int guest_fpu_loaded; | |
80 | wait_queue_head_t wq; | |
81 | int sigset_active; | |
82 | sigset_t sigset; | |
83 | struct kvm_vcpu_stat stat; | |
84 | ||
34c16eec | 85 | #ifdef CONFIG_HAS_IOMEM |
d17fbbf7 ZX |
86 | int mmio_needed; |
87 | int mmio_read_completed; | |
88 | int mmio_is_write; | |
89 | int mmio_size; | |
90 | unsigned char mmio_data[8]; | |
6aa8b732 | 91 | gpa_t mmio_phys_addr; |
34c16eec | 92 | #endif |
1165f5fe | 93 | |
d657a98e ZX |
94 | struct kvm_vcpu_arch arch; |
95 | }; | |
96 | ||
6aa8b732 AK |
97 | struct kvm_memory_slot { |
98 | gfn_t base_gfn; | |
99 | unsigned long npages; | |
100 | unsigned long flags; | |
290fc38d | 101 | unsigned long *rmap; |
6aa8b732 | 102 | unsigned long *dirty_bitmap; |
05da4558 MT |
103 | struct { |
104 | unsigned long rmap_pde; | |
105 | int write_count; | |
106 | } *lpage_info; | |
8a7ae055 | 107 | unsigned long userspace_addr; |
80b14b5b | 108 | int user_alloc; |
6aa8b732 AK |
109 | }; |
110 | ||
399ec807 AK |
111 | struct kvm_kernel_irq_routing_entry { |
112 | u32 gsi; | |
4925663a | 113 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
399ec807 AK |
114 | struct kvm *kvm, int level); |
115 | union { | |
116 | struct { | |
117 | unsigned irqchip; | |
118 | unsigned pin; | |
119 | } irqchip; | |
79950e10 | 120 | struct msi_msg msi; |
399ec807 AK |
121 | }; |
122 | struct list_head link; | |
123 | }; | |
124 | ||
6aa8b732 | 125 | struct kvm { |
aaee2c94 MT |
126 | struct mutex lock; /* protects the vcpus array and APIC accesses */ |
127 | spinlock_t mmu_lock; | |
72dc67a6 | 128 | struct rw_semaphore slots_lock; |
6d4e4c4f | 129 | struct mm_struct *mm; /* userspace tied to this vm */ |
6aa8b732 | 130 | int nmemslots; |
e0d62c7f IE |
131 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + |
132 | KVM_PRIVATE_MEM_SLOTS]; | |
fb3f0f51 | 133 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
133de902 | 134 | struct list_head vm_list; |
2eeb2e94 | 135 | struct kvm_io_bus mmio_bus; |
74906345 | 136 | struct kvm_io_bus pio_bus; |
ba1389b7 | 137 | struct kvm_vm_stat stat; |
d69fb81f | 138 | struct kvm_arch arch; |
d39f13b0 | 139 | atomic_t users_count; |
5f94c174 LV |
140 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
141 | struct kvm_coalesced_mmio_dev *coalesced_mmio_dev; | |
142 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; | |
143 | #endif | |
e930bffe | 144 | |
75858a84 | 145 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
399ec807 | 146 | struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */ |
75858a84 AK |
147 | struct hlist_head mask_notifier_list; |
148 | #endif | |
149 | ||
e930bffe AA |
150 | #ifdef KVM_ARCH_WANT_MMU_NOTIFIER |
151 | struct mmu_notifier mmu_notifier; | |
152 | unsigned long mmu_notifier_seq; | |
153 | long mmu_notifier_count; | |
154 | #endif | |
6aa8b732 AK |
155 | }; |
156 | ||
f0242478 RR |
157 | /* The guest did something we don't support. */ |
158 | #define pr_unimpl(vcpu, fmt, ...) \ | |
159 | do { \ | |
160 | if (printk_ratelimit()) \ | |
161 | printk(KERN_ERR "kvm: %i: cpu%i " fmt, \ | |
162 | current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \ | |
d77c26fc | 163 | } while (0) |
f0242478 | 164 | |
6aa8b732 AK |
165 | #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) |
166 | #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) | |
167 | ||
fb3f0f51 RR |
168 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
169 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); | |
170 | ||
313a3dc7 CO |
171 | void vcpu_load(struct kvm_vcpu *vcpu); |
172 | void vcpu_put(struct kvm_vcpu *vcpu); | |
173 | ||
f8c16bba | 174 | int kvm_init(void *opaque, unsigned int vcpu_size, |
c16f862d | 175 | struct module *module); |
cb498ea2 | 176 | void kvm_exit(void); |
6aa8b732 | 177 | |
d39f13b0 IE |
178 | void kvm_get_kvm(struct kvm *kvm); |
179 | void kvm_put_kvm(struct kvm *kvm); | |
180 | ||
6aa8b732 AK |
181 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) |
182 | #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) | |
183 | static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } | |
039576c0 | 184 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); |
6aa8b732 | 185 | |
cea7bb21 | 186 | extern struct page *bad_page; |
35149e21 | 187 | extern pfn_t bad_pfn; |
6aa8b732 | 188 | |
cea7bb21 | 189 | int is_error_page(struct page *page); |
35149e21 | 190 | int is_error_pfn(pfn_t pfn); |
f9d46eb0 | 191 | int kvm_is_error_hva(unsigned long addr); |
210c7c4d IE |
192 | int kvm_set_memory_region(struct kvm *kvm, |
193 | struct kvm_userspace_memory_region *mem, | |
194 | int user_alloc); | |
f78e0e2e SY |
195 | int __kvm_set_memory_region(struct kvm *kvm, |
196 | struct kvm_userspace_memory_region *mem, | |
197 | int user_alloc); | |
0de10343 ZX |
198 | int kvm_arch_set_memory_region(struct kvm *kvm, |
199 | struct kvm_userspace_memory_region *mem, | |
200 | struct kvm_memory_slot old, | |
201 | int user_alloc); | |
34d4cb8f | 202 | void kvm_arch_flush_shadow(struct kvm *kvm); |
290fc38d | 203 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); |
954bbbc2 | 204 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
05da4558 | 205 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
b4231d61 IE |
206 | void kvm_release_page_clean(struct page *page); |
207 | void kvm_release_page_dirty(struct page *page); | |
35149e21 AL |
208 | void kvm_set_page_dirty(struct page *page); |
209 | void kvm_set_page_accessed(struct page *page); | |
210 | ||
211 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); | |
212 | void kvm_release_pfn_dirty(pfn_t); | |
213 | void kvm_release_pfn_clean(pfn_t pfn); | |
214 | void kvm_set_pfn_dirty(pfn_t pfn); | |
215 | void kvm_set_pfn_accessed(pfn_t pfn); | |
216 | void kvm_get_pfn(pfn_t pfn); | |
217 | ||
195aefde IE |
218 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
219 | int len); | |
7ec54588 MT |
220 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, |
221 | unsigned long len); | |
195aefde IE |
222 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
223 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | |
224 | int offset, int len); | |
225 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |
226 | unsigned long len); | |
227 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); | |
228 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); | |
6aa8b732 | 229 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
e0d62c7f | 230 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
6aa8b732 AK |
231 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
232 | ||
8776e519 | 233 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
6aa8b732 | 234 | void kvm_resched(struct kvm_vcpu *vcpu); |
7702fd1f AK |
235 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); |
236 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | |
d9e368d6 | 237 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
2e53d63a | 238 | void kvm_reload_remote_mmus(struct kvm *kvm); |
6aa8b732 | 239 | |
043405e1 CO |
240 | long kvm_arch_dev_ioctl(struct file *filp, |
241 | unsigned int ioctl, unsigned long arg); | |
313a3dc7 CO |
242 | long kvm_arch_vcpu_ioctl(struct file *filp, |
243 | unsigned int ioctl, unsigned long arg); | |
244 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |
245 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | |
018d00d2 ZX |
246 | |
247 | int kvm_dev_ioctl_check_extension(long ext); | |
248 | ||
5bb064dc ZX |
249 | int kvm_get_dirty_log(struct kvm *kvm, |
250 | struct kvm_dirty_log *log, int *is_dirty); | |
251 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |
252 | struct kvm_dirty_log *log); | |
253 | ||
1fe779f8 CO |
254 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
255 | struct | |
256 | kvm_userspace_memory_region *mem, | |
257 | int user_alloc); | |
258 | long kvm_arch_vm_ioctl(struct file *filp, | |
259 | unsigned int ioctl, unsigned long arg); | |
313a3dc7 | 260 | |
d0752060 HB |
261 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
262 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); | |
263 | ||
8b006791 ZX |
264 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
265 | struct kvm_translation *tr); | |
266 | ||
b6c7a5dc HB |
267 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
268 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); | |
269 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
270 | struct kvm_sregs *sregs); | |
271 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
272 | struct kvm_sregs *sregs); | |
62d9f0db MT |
273 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
274 | struct kvm_mp_state *mp_state); | |
275 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
276 | struct kvm_mp_state *mp_state); | |
d0bfb940 JK |
277 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
278 | struct kvm_guest_debug *dbg); | |
b6c7a5dc HB |
279 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
280 | ||
f8c16bba ZX |
281 | int kvm_arch_init(void *opaque); |
282 | void kvm_arch_exit(void); | |
043405e1 | 283 | |
e9b11c17 ZX |
284 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); |
285 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); | |
286 | ||
287 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); | |
288 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |
289 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | |
290 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); | |
26e5215f | 291 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); |
d40ccc62 | 292 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
e9b11c17 ZX |
293 | |
294 | int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); | |
295 | void kvm_arch_hardware_enable(void *garbage); | |
296 | void kvm_arch_hardware_disable(void *garbage); | |
297 | int kvm_arch_hardware_setup(void); | |
298 | void kvm_arch_hardware_unsetup(void); | |
299 | void kvm_arch_check_processor_compat(void *rtn); | |
1d737c8a | 300 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
e9b11c17 | 301 | |
d19a9cd2 ZX |
302 | void kvm_free_physmem(struct kvm *kvm); |
303 | ||
304 | struct kvm *kvm_arch_create_vm(void); | |
305 | void kvm_arch_destroy_vm(struct kvm *kvm); | |
8a98f664 | 306 | void kvm_free_all_assigned_devices(struct kvm *kvm); |
ad8ba2cd | 307 | void kvm_arch_sync_events(struct kvm *kvm); |
e9b11c17 | 308 | |
682c59a3 ZX |
309 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); |
310 | int kvm_cpu_has_interrupt(struct kvm_vcpu *v); | |
3d80840d | 311 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
5736199a | 312 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
682c59a3 | 313 | |
c77fb9dc XZ |
314 | int kvm_is_mmio_pfn(pfn_t pfn); |
315 | ||
62c476c7 BAY |
316 | struct kvm_irq_ack_notifier { |
317 | struct hlist_node link; | |
318 | unsigned gsi; | |
319 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | |
320 | }; | |
321 | ||
c1e01514 SY |
322 | struct kvm_guest_msix_entry { |
323 | u32 vector; | |
324 | u16 entry; | |
325 | u16 flags; | |
326 | }; | |
327 | ||
62c476c7 BAY |
328 | struct kvm_assigned_dev_kernel { |
329 | struct kvm_irq_ack_notifier ack_notifier; | |
330 | struct work_struct interrupt_work; | |
331 | struct list_head list; | |
332 | int assigned_dev_id; | |
333 | int host_busnr; | |
334 | int host_devfn; | |
c1e01514 | 335 | unsigned int entries_nr; |
62c476c7 | 336 | int host_irq; |
defaf158 | 337 | bool host_irq_disabled; |
c1e01514 | 338 | struct msix_entry *host_msix_entries; |
62c476c7 | 339 | int guest_irq; |
c1e01514 | 340 | struct kvm_guest_msix_entry *guest_msix_entries; |
4f906c19 | 341 | #define KVM_ASSIGNED_DEV_GUEST_INTX (1 << 0) |
0937c48d | 342 | #define KVM_ASSIGNED_DEV_GUEST_MSI (1 << 1) |
4f906c19 | 343 | #define KVM_ASSIGNED_DEV_HOST_INTX (1 << 8) |
0937c48d | 344 | #define KVM_ASSIGNED_DEV_HOST_MSI (1 << 9) |
c1e01514 | 345 | #define KVM_ASSIGNED_DEV_MSIX ((1 << 2) | (1 << 10)) |
4f906c19 | 346 | unsigned long irq_requested_type; |
5550af4d | 347 | int irq_source_id; |
b653574a | 348 | int flags; |
62c476c7 BAY |
349 | struct pci_dev *dev; |
350 | struct kvm *kvm; | |
351 | }; | |
75858a84 AK |
352 | |
353 | struct kvm_irq_mask_notifier { | |
354 | void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); | |
355 | int irq; | |
356 | struct hlist_node link; | |
357 | }; | |
358 | ||
359 | void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, | |
360 | struct kvm_irq_mask_notifier *kimn); | |
361 | void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, | |
362 | struct kvm_irq_mask_notifier *kimn); | |
363 | void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); | |
364 | ||
116191b6 SY |
365 | void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic, |
366 | union kvm_ioapic_redirect_entry *entry, | |
367 | unsigned long *deliver_bitmask); | |
4925663a | 368 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); |
44882eed | 369 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
3de42dc0 XZ |
370 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
371 | struct kvm_irq_ack_notifier *kian); | |
e19e30ef | 372 | void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); |
5550af4d SY |
373 | int kvm_request_irq_source_id(struct kvm *kvm); |
374 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | |
62c476c7 | 375 | |
19de40a8 | 376 | #ifdef CONFIG_IOMMU_API |
62c476c7 BAY |
377 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, |
378 | unsigned long npages); | |
260782bc | 379 | int kvm_iommu_map_guest(struct kvm *kvm); |
62c476c7 | 380 | int kvm_iommu_unmap_guest(struct kvm *kvm); |
260782bc WH |
381 | int kvm_assign_device(struct kvm *kvm, |
382 | struct kvm_assigned_dev_kernel *assigned_dev); | |
0a920356 WH |
383 | int kvm_deassign_device(struct kvm *kvm, |
384 | struct kvm_assigned_dev_kernel *assigned_dev); | |
19de40a8 | 385 | #else /* CONFIG_IOMMU_API */ |
62c476c7 BAY |
386 | static inline int kvm_iommu_map_pages(struct kvm *kvm, |
387 | gfn_t base_gfn, | |
388 | unsigned long npages) | |
389 | { | |
390 | return 0; | |
391 | } | |
392 | ||
260782bc | 393 | static inline int kvm_iommu_map_guest(struct kvm *kvm) |
62c476c7 BAY |
394 | { |
395 | return -ENODEV; | |
396 | } | |
397 | ||
398 | static inline int kvm_iommu_unmap_guest(struct kvm *kvm) | |
399 | { | |
400 | return 0; | |
401 | } | |
260782bc WH |
402 | |
403 | static inline int kvm_assign_device(struct kvm *kvm, | |
404 | struct kvm_assigned_dev_kernel *assigned_dev) | |
405 | { | |
406 | return 0; | |
407 | } | |
0a920356 WH |
408 | |
409 | static inline int kvm_deassign_device(struct kvm *kvm, | |
410 | struct kvm_assigned_dev_kernel *assigned_dev) | |
411 | { | |
412 | return 0; | |
413 | } | |
19de40a8 | 414 | #endif /* CONFIG_IOMMU_API */ |
62c476c7 | 415 | |
d172fcd3 LV |
416 | static inline void kvm_guest_enter(void) |
417 | { | |
e56a7a28 | 418 | account_system_vtime(current); |
d172fcd3 LV |
419 | current->flags |= PF_VCPU; |
420 | } | |
421 | ||
422 | static inline void kvm_guest_exit(void) | |
423 | { | |
e56a7a28 | 424 | account_system_vtime(current); |
d172fcd3 LV |
425 | current->flags &= ~PF_VCPU; |
426 | } | |
427 | ||
6aa8b732 AK |
428 | static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) |
429 | { | |
430 | return slot - kvm->memslots; | |
431 | } | |
432 | ||
1755fbcc AK |
433 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
434 | { | |
435 | return (gpa_t)gfn << PAGE_SHIFT; | |
436 | } | |
6aa8b732 | 437 | |
62c476c7 BAY |
438 | static inline hpa_t pfn_to_hpa(pfn_t pfn) |
439 | { | |
440 | return (hpa_t)pfn << PAGE_SHIFT; | |
441 | } | |
442 | ||
2f599714 | 443 | static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) |
2f52d58c AK |
444 | { |
445 | set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); | |
446 | } | |
447 | ||
ba1389b7 AK |
448 | enum kvm_stat_kind { |
449 | KVM_STAT_VM, | |
450 | KVM_STAT_VCPU, | |
451 | }; | |
452 | ||
417bc304 HB |
453 | struct kvm_stats_debugfs_item { |
454 | const char *name; | |
455 | int offset; | |
ba1389b7 | 456 | enum kvm_stat_kind kind; |
417bc304 HB |
457 | struct dentry *dentry; |
458 | }; | |
459 | extern struct kvm_stats_debugfs_item debugfs_entries[]; | |
76f7c879 | 460 | extern struct dentry *kvm_debugfs_dir; |
d4c9ff2d | 461 | |
d98e6346 HB |
462 | #define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \ |
463 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | |
464 | vcpu, 5, d1, d2, d3, d4, d5) | |
465 | #define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \ | |
466 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | |
467 | vcpu, 4, d1, d2, d3, d4, 0) | |
468 | #define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \ | |
469 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | |
470 | vcpu, 3, d1, d2, d3, 0, 0) | |
471 | #define KVMTRACE_2D(evt, vcpu, d1, d2, name) \ | |
472 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | |
473 | vcpu, 2, d1, d2, 0, 0, 0) | |
474 | #define KVMTRACE_1D(evt, vcpu, d1, name) \ | |
475 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | |
476 | vcpu, 1, d1, 0, 0, 0, 0) | |
477 | #define KVMTRACE_0D(evt, vcpu, name) \ | |
478 | trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \ | |
479 | vcpu, 0, 0, 0, 0, 0, 0) | |
480 | ||
d4c9ff2d FEL |
481 | #ifdef CONFIG_KVM_TRACE |
482 | int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg); | |
483 | void kvm_trace_cleanup(void); | |
484 | #else | |
485 | static inline | |
486 | int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg) | |
487 | { | |
488 | return -EINVAL; | |
489 | } | |
490 | #define kvm_trace_cleanup() ((void)0) | |
491 | #endif | |
417bc304 | 492 | |
e930bffe AA |
493 | #ifdef KVM_ARCH_WANT_MMU_NOTIFIER |
494 | static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq) | |
495 | { | |
496 | if (unlikely(vcpu->kvm->mmu_notifier_count)) | |
497 | return 1; | |
498 | /* | |
499 | * Both reads happen under the mmu_lock and both values are | |
500 | * modified under mmu_lock, so there's no need of smb_rmb() | |
501 | * here in between, otherwise mmu_notifier_count should be | |
502 | * read before mmu_notifier_seq, see | |
503 | * mmu_notifier_invalidate_range_end write side. | |
504 | */ | |
505 | if (vcpu->kvm->mmu_notifier_seq != mmu_seq) | |
506 | return 1; | |
507 | return 0; | |
508 | } | |
509 | #endif | |
510 | ||
399ec807 AK |
511 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
512 | ||
513 | #define KVM_MAX_IRQ_ROUTES 1024 | |
514 | ||
515 | int kvm_setup_default_irq_routing(struct kvm *kvm); | |
516 | int kvm_set_irq_routing(struct kvm *kvm, | |
517 | const struct kvm_irq_routing_entry *entries, | |
518 | unsigned nr, | |
519 | unsigned flags); | |
520 | void kvm_free_irq_routing(struct kvm *kvm); | |
521 | ||
522 | #else | |
523 | ||
524 | static inline void kvm_free_irq_routing(struct kvm *kvm) {} | |
525 | ||
526 | #endif | |
527 | ||
6aa8b732 | 528 | #endif |