KVM: Future-proof argument-less ioctls
[deliverable/linux.git] / drivers / kvm / kvm.h
CommitLineData
6aa8b732
AK
1#ifndef __KVM_H
2#define __KVM_H
3
4/*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
10#include <linux/list.h>
11#include <linux/mutex.h>
12#include <linux/spinlock.h>
13#include <linux/mm.h>
14
15#include "vmx.h"
16#include <linux/kvm.h>
102d8325 17#include <linux/kvm_para.h>
6aa8b732
AK
18
19#define CR0_PE_MASK (1ULL << 0)
20#define CR0_TS_MASK (1ULL << 3)
21#define CR0_NE_MASK (1ULL << 5)
22#define CR0_WP_MASK (1ULL << 16)
23#define CR0_NW_MASK (1ULL << 29)
24#define CR0_CD_MASK (1ULL << 30)
25#define CR0_PG_MASK (1ULL << 31)
26
27#define CR3_WPT_MASK (1ULL << 3)
28#define CR3_PCD_MASK (1ULL << 4)
29
30#define CR3_RESEVED_BITS 0x07ULL
31#define CR3_L_MODE_RESEVED_BITS (~((1ULL << 40) - 1) | 0x0fe7ULL)
32#define CR3_FLAGS_MASK ((1ULL << 5) - 1)
33
34#define CR4_VME_MASK (1ULL << 0)
35#define CR4_PSE_MASK (1ULL << 4)
36#define CR4_PAE_MASK (1ULL << 5)
37#define CR4_PGE_MASK (1ULL << 7)
38#define CR4_VMXE_MASK (1ULL << 13)
39
40#define KVM_GUEST_CR0_MASK \
41 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
42 | CR0_NW_MASK | CR0_CD_MASK)
43#define KVM_VM_CR0_ALWAYS_ON \
44 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK)
45#define KVM_GUEST_CR4_MASK \
46 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
47#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
48#define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK)
49
50#define INVALID_PAGE (~(hpa_t)0)
51#define UNMAPPED_GVA (~(gpa_t)0)
52
53#define KVM_MAX_VCPUS 1
54#define KVM_MEMORY_SLOTS 4
55#define KVM_NUM_MMU_PAGES 256
ebeace86
AK
56#define KVM_MIN_FREE_MMU_PAGES 5
57#define KVM_REFILL_PAGES 25
06465c5a 58#define KVM_MAX_CPUID_ENTRIES 40
6aa8b732
AK
59
60#define FX_IMAGE_SIZE 512
61#define FX_IMAGE_ALIGN 16
62#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
63
64#define DE_VECTOR 0
65#define DF_VECTOR 8
66#define TS_VECTOR 10
67#define NP_VECTOR 11
68#define SS_VECTOR 12
69#define GP_VECTOR 13
70#define PF_VECTOR 14
71
72#define SELECTOR_TI_MASK (1 << 2)
73#define SELECTOR_RPL_MASK 0x03
74
75#define IOPL_SHIFT 12
76
77/*
78 * Address types:
79 *
80 * gva - guest virtual address
81 * gpa - guest physical address
82 * gfn - guest frame number
83 * hva - host virtual address
84 * hpa - host physical address
85 * hfn - host frame number
86 */
87
88typedef unsigned long gva_t;
89typedef u64 gpa_t;
90typedef unsigned long gfn_t;
91
92typedef unsigned long hva_t;
93typedef u64 hpa_t;
94typedef unsigned long hfn_t;
95
cea0f0e7
AK
96#define NR_PTE_CHAIN_ENTRIES 5
97
98struct kvm_pte_chain {
99 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
100 struct hlist_node link;
101};
102
103/*
104 * kvm_mmu_page_role, below, is defined as:
105 *
106 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
107 * bits 4:7 - page table level for this shadow (1-4)
108 * bits 8:9 - page table quadrant for 2-level guests
109 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
110 */
111union kvm_mmu_page_role {
112 unsigned word;
113 struct {
114 unsigned glevels : 4;
115 unsigned level : 4;
116 unsigned quadrant : 2;
117 unsigned pad_for_nice_hex_output : 6;
118 unsigned metaphysical : 1;
119 };
120};
121
6aa8b732
AK
122struct kvm_mmu_page {
123 struct list_head link;
cea0f0e7
AK
124 struct hlist_node hash_link;
125
126 /*
127 * The following two entries are used to key the shadow page in the
128 * hash table.
129 */
130 gfn_t gfn;
131 union kvm_mmu_page_role role;
132
6aa8b732
AK
133 hpa_t page_hpa;
134 unsigned long slot_bitmap; /* One bit set per slot which has memory
135 * in this shadow page.
136 */
137 int global; /* Set if all ptes in this page are global */
cea0f0e7 138 int multimapped; /* More than one parent_pte? */
3bb65a22 139 int root_count; /* Currently serving as active root */
cea0f0e7
AK
140 union {
141 u64 *parent_pte; /* !multimapped */
142 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
143 };
6aa8b732
AK
144};
145
146struct vmcs {
147 u32 revision_id;
148 u32 abort;
149 char data[0];
150};
151
152#define vmx_msr_entry kvm_msr_entry
153
154struct kvm_vcpu;
155
156/*
157 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
158 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
159 * mode.
160 */
161struct kvm_mmu {
162 void (*new_cr3)(struct kvm_vcpu *vcpu);
163 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
6aa8b732
AK
164 void (*free)(struct kvm_vcpu *vcpu);
165 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
166 hpa_t root_hpa;
167 int root_level;
168 int shadow_root_level;
17ac10ad
AK
169
170 u64 *pae_root;
6aa8b732
AK
171};
172
714b93da
AK
173#define KVM_NR_MEM_OBJS 20
174
175struct kvm_mmu_memory_cache {
176 int nobjs;
177 void *objects[KVM_NR_MEM_OBJS];
178};
179
180/*
181 * We don't want allocation failures within the mmu code, so we preallocate
182 * enough memory for a single page fault in a cache.
183 */
6aa8b732
AK
184struct kvm_guest_debug {
185 int enabled;
186 unsigned long bp[4];
187 int singlestep;
188};
189
190enum {
191 VCPU_REGS_RAX = 0,
192 VCPU_REGS_RCX = 1,
193 VCPU_REGS_RDX = 2,
194 VCPU_REGS_RBX = 3,
195 VCPU_REGS_RSP = 4,
196 VCPU_REGS_RBP = 5,
197 VCPU_REGS_RSI = 6,
198 VCPU_REGS_RDI = 7,
05b3e0c2 199#ifdef CONFIG_X86_64
6aa8b732
AK
200 VCPU_REGS_R8 = 8,
201 VCPU_REGS_R9 = 9,
202 VCPU_REGS_R10 = 10,
203 VCPU_REGS_R11 = 11,
204 VCPU_REGS_R12 = 12,
205 VCPU_REGS_R13 = 13,
206 VCPU_REGS_R14 = 14,
207 VCPU_REGS_R15 = 15,
208#endif
209 NR_VCPU_REGS
210};
211
212enum {
213 VCPU_SREG_CS,
214 VCPU_SREG_DS,
215 VCPU_SREG_ES,
216 VCPU_SREG_FS,
217 VCPU_SREG_GS,
218 VCPU_SREG_SS,
219 VCPU_SREG_TR,
220 VCPU_SREG_LDTR,
221};
222
223struct kvm_vcpu {
224 struct kvm *kvm;
225 union {
226 struct vmcs *vmcs;
227 struct vcpu_svm *svm;
228 };
229 struct mutex mutex;
230 int cpu;
231 int launched;
9a2bb7f4 232 struct kvm_run *run;
c1150d8c 233 int interrupt_window_open;
6aa8b732
AK
234 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
235#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
236 unsigned long irq_pending[NR_IRQ_WORDS];
237 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
238 unsigned long rip; /* needs vcpu_load_rsp_rip() */
239
240 unsigned long cr0;
241 unsigned long cr2;
242 unsigned long cr3;
102d8325
IM
243 gpa_t para_state_gpa;
244 struct page *para_state_page;
245 gpa_t hypercall_gpa;
6aa8b732
AK
246 unsigned long cr4;
247 unsigned long cr8;
1342d353 248 u64 pdptrs[4]; /* pae */
6aa8b732
AK
249 u64 shadow_efer;
250 u64 apic_base;
6f00e68f 251 u64 ia32_misc_enable_msr;
6aa8b732
AK
252 int nmsrs;
253 struct vmx_msr_entry *guest_msrs;
254 struct vmx_msr_entry *host_msrs;
255
256 struct list_head free_pages;
257 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
258 struct kvm_mmu mmu;
259
714b93da
AK
260 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
261 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
262
86a5ba02
AK
263 gfn_t last_pt_write_gfn;
264 int last_pt_write_count;
265
6aa8b732
AK
266 struct kvm_guest_debug guest_debug;
267
268 char fx_buf[FX_BUF_SIZE];
269 char *host_fx_image;
270 char *guest_fx_image;
271
272 int mmio_needed;
273 int mmio_read_completed;
274 int mmio_is_write;
275 int mmio_size;
276 unsigned char mmio_data[8];
277 gpa_t mmio_phys_addr;
46fc1477 278 int pio_pending;
6aa8b732 279
1961d276
AK
280 int sigset_active;
281 sigset_t sigset;
282
6aa8b732
AK
283 struct {
284 int active;
285 u8 save_iopl;
286 struct kvm_save_segment {
287 u16 selector;
288 unsigned long base;
289 u32 limit;
290 u32 ar;
291 } tr, es, ds, fs, gs;
292 } rmode;
06465c5a
AK
293
294 int cpuid_nent;
295 struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
6aa8b732
AK
296};
297
298struct kvm_memory_slot {
299 gfn_t base_gfn;
300 unsigned long npages;
301 unsigned long flags;
302 struct page **phys_mem;
303 unsigned long *dirty_bitmap;
304};
305
306struct kvm {
307 spinlock_t lock; /* protects everything except vcpus */
308 int nmemslots;
309 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
cea0f0e7
AK
310 /*
311 * Hash table of struct kvm_mmu_page.
312 */
6aa8b732 313 struct list_head active_mmu_pages;
ebeace86 314 int n_free_mmu_pages;
cea0f0e7 315 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
6aa8b732
AK
316 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
317 int memory_config_version;
318 int busy;
cd4a4e53 319 unsigned long rmap_overflow;
133de902 320 struct list_head vm_list;
bccf2150 321 struct file *filp;
6aa8b732
AK
322};
323
324struct kvm_stat {
325 u32 pf_fixed;
326 u32 pf_guest;
327 u32 tlb_flush;
328 u32 invlpg;
329
330 u32 exits;
331 u32 io_exits;
332 u32 mmio_exits;
333 u32 signal_exits;
c1150d8c
DL
334 u32 irq_window_exits;
335 u32 halt_exits;
336 u32 request_irq_exits;
6aa8b732
AK
337 u32 irq_exits;
338};
339
340struct descriptor_table {
341 u16 limit;
342 unsigned long base;
343} __attribute__((packed));
344
345struct kvm_arch_ops {
346 int (*cpu_has_kvm_support)(void); /* __init */
347 int (*disabled_by_bios)(void); /* __init */
348 void (*hardware_enable)(void *dummy); /* __init */
349 void (*hardware_disable)(void *dummy);
350 int (*hardware_setup)(void); /* __init */
351 void (*hardware_unsetup)(void); /* __exit */
352
353 int (*vcpu_create)(struct kvm_vcpu *vcpu);
354 void (*vcpu_free)(struct kvm_vcpu *vcpu);
355
bccf2150 356 void (*vcpu_load)(struct kvm_vcpu *vcpu);
6aa8b732 357 void (*vcpu_put)(struct kvm_vcpu *vcpu);
774c47f1 358 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
6aa8b732
AK
359
360 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
361 struct kvm_debug_guest *dbg);
362 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
363 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
364 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
365 void (*get_segment)(struct kvm_vcpu *vcpu,
366 struct kvm_segment *var, int seg);
367 void (*set_segment)(struct kvm_vcpu *vcpu,
368 struct kvm_segment *var, int seg);
6aa8b732 369 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
399badf3 370 void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu);
6aa8b732
AK
371 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
372 void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu,
373 unsigned long cr0);
374 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
375 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
376 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
377 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
378 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
379 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
380 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
381 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
382 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
383 int *exception);
384 void (*cache_regs)(struct kvm_vcpu *vcpu);
385 void (*decache_regs)(struct kvm_vcpu *vcpu);
386 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
387 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
388
389 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr);
390 void (*tlb_flush)(struct kvm_vcpu *vcpu);
391 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
392 unsigned long addr, u32 err_code);
393
394 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
395
396 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
397 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
398 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
102d8325
IM
399 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
400 unsigned char *hypercall_addr);
6aa8b732
AK
401};
402
403extern struct kvm_stat kvm_stat;
404extern struct kvm_arch_ops *kvm_arch_ops;
405
406#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
407#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
408
409int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
410void kvm_exit_arch(void);
411
412void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
8018c27b
IM
413int kvm_mmu_create(struct kvm_vcpu *vcpu);
414int kvm_mmu_setup(struct kvm_vcpu *vcpu);
6aa8b732
AK
415
416int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
714b93da 417void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot);
6aa8b732
AK
418
419hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
420#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
421#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
422static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
423hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
424
425void kvm_emulator_want_group7_invlpg(void);
426
427extern hpa_t bad_page_address;
428
429static inline struct page *gfn_to_page(struct kvm_memory_slot *slot, gfn_t gfn)
430{
431 return slot->phys_mem[gfn - slot->base_gfn];
432}
433
434struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
435void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
436
437enum emulation_result {
438 EMULATE_DONE, /* no further processing */
439 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
440 EMULATE_FAIL, /* can't emulate this instruction */
441};
442
443int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
444 unsigned long cr2, u16 error_code);
445void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
446void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
447void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
448 unsigned long *rflags);
449
450unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
451void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
452 unsigned long *rflags);
453
454struct x86_emulate_ctxt;
455
06465c5a 456void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
6aa8b732
AK
457int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
458int emulate_clts(struct kvm_vcpu *vcpu);
459int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
460 unsigned long *dest);
461int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
462 unsigned long value);
463
464void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
465void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
466void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
467void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
468void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
469
3bab1f5d
AK
470int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
471int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
6aa8b732
AK
472
473void fx_init(struct kvm_vcpu *vcpu);
474
475void load_msrs(struct vmx_msr_entry *e, int n);
476void save_msrs(struct vmx_msr_entry *e, int n);
477void kvm_resched(struct kvm_vcpu *vcpu);
478
479int kvm_read_guest(struct kvm_vcpu *vcpu,
480 gva_t addr,
481 unsigned long size,
482 void *dest);
483
484int kvm_write_guest(struct kvm_vcpu *vcpu,
485 gva_t addr,
486 unsigned long size,
487 void *data);
488
489unsigned long segment_base(u16 selector);
490
da4a00f0
AK
491void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
492void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
a436036b 493int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
ebeace86
AK
494void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
495
270fd9b9
AK
496int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
497
ebeace86
AK
498static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
499 u32 error_code)
500{
501 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
502 kvm_mmu_free_some_pages(vcpu);
503 return vcpu->mmu.page_fault(vcpu, gva, error_code);
504}
da4a00f0 505
6aa8b732
AK
506static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
507{
508 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
509 return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL;
510}
511
a9058ecd
AK
512static inline int is_long_mode(struct kvm_vcpu *vcpu)
513{
514#ifdef CONFIG_X86_64
515 return vcpu->shadow_efer & EFER_LME;
516#else
517 return 0;
518#endif
519}
520
6aa8b732
AK
521static inline int is_pae(struct kvm_vcpu *vcpu)
522{
523 return vcpu->cr4 & CR4_PAE_MASK;
524}
525
526static inline int is_pse(struct kvm_vcpu *vcpu)
527{
528 return vcpu->cr4 & CR4_PSE_MASK;
529}
530
531static inline int is_paging(struct kvm_vcpu *vcpu)
532{
533 return vcpu->cr0 & CR0_PG_MASK;
534}
535
536static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
537{
538 return slot - kvm->memslots;
539}
540
541static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
542{
543 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
544
5972e953 545 return (struct kvm_mmu_page *)page_private(page);
6aa8b732
AK
546}
547
548static inline u16 read_fs(void)
549{
550 u16 seg;
551 asm ("mov %%fs, %0" : "=g"(seg));
552 return seg;
553}
554
555static inline u16 read_gs(void)
556{
557 u16 seg;
558 asm ("mov %%gs, %0" : "=g"(seg));
559 return seg;
560}
561
562static inline u16 read_ldt(void)
563{
564 u16 ldt;
565 asm ("sldt %0" : "=g"(ldt));
566 return ldt;
567}
568
569static inline void load_fs(u16 sel)
570{
571 asm ("mov %0, %%fs" : : "rm"(sel));
572}
573
574static inline void load_gs(u16 sel)
575{
576 asm ("mov %0, %%gs" : : "rm"(sel));
577}
578
579#ifndef load_ldt
580static inline void load_ldt(u16 sel)
581{
a0610ddf 582 asm ("lldt %0" : : "rm"(sel));
6aa8b732
AK
583}
584#endif
585
586static inline void get_idt(struct descriptor_table *table)
587{
588 asm ("sidt %0" : "=m"(*table));
589}
590
591static inline void get_gdt(struct descriptor_table *table)
592{
593 asm ("sgdt %0" : "=m"(*table));
594}
595
596static inline unsigned long read_tr_base(void)
597{
598 u16 tr;
599 asm ("str %0" : "=g"(tr));
600 return segment_base(tr);
601}
602
05b3e0c2 603#ifdef CONFIG_X86_64
6aa8b732
AK
604static inline unsigned long read_msr(unsigned long msr)
605{
606 u64 value;
607
608 rdmsrl(msr, value);
609 return value;
610}
611#endif
612
613static inline void fx_save(void *image)
614{
615 asm ("fxsave (%0)":: "r" (image));
616}
617
618static inline void fx_restore(void *image)
619{
620 asm ("fxrstor (%0)":: "r" (image));
621}
622
623static inline void fpu_init(void)
624{
625 asm ("finit");
626}
627
628static inline u32 get_rdx_init_val(void)
629{
630 return 0x600; /* P6 family */
631}
632
633#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
634#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
635#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
636#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
637#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
638#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
639#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
640#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
641#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
642
643#define MSR_IA32_TIME_STAMP_COUNTER 0x010
644
645#define TSS_IOPB_BASE_OFFSET 0x66
646#define TSS_BASE_SIZE 0x68
647#define TSS_IOPB_SIZE (65536 / 8)
648#define TSS_REDIRECTION_SIZE (256 / 8)
649#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
650
6aa8b732 651#endif
This page took 0.084455 seconds and 5 git commands to generate.