2 * Kernel-based Virtual Machine driver for Linux
6 * Copyright (C) 2006 Qumranet, Inc.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
18 #include "x86_emulate.h"
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/vmalloc.h>
24 #include <linux/highmem.h>
25 #include <linux/sched.h>
29 MODULE_AUTHOR("Qumranet");
30 MODULE_LICENSE("GPL");
32 #define IOPM_ALLOC_ORDER 2
33 #define MSRPM_ALLOC_ORDER 1
39 #define DR7_GD_MASK (1 << 13)
40 #define DR6_BD_MASK (1 << 13)
42 #define SEG_TYPE_LDT 2
43 #define SEG_TYPE_BUSY_TSS16 3
45 #define KVM_EFER_LMA (1 << 10)
46 #define KVM_EFER_LME (1 << 8)
48 #define SVM_FEATURE_NPT (1 << 0)
49 #define SVM_FEATURE_LBRV (1 << 1)
50 #define SVM_DEATURE_SVML (1 << 2)
52 static void kvm_reput_irq(struct vcpu_svm
*svm
);
54 static inline struct vcpu_svm
*to_svm(struct kvm_vcpu
*vcpu
)
56 return container_of(vcpu
, struct vcpu_svm
, vcpu
);
59 unsigned long iopm_base
;
60 unsigned long msrpm_base
;
62 struct kvm_ldttss_desc
{
65 unsigned base1
: 8, type
: 5, dpl
: 2, p
: 1;
66 unsigned limit1
: 4, zero0
: 3, g
: 1, base2
: 8;
69 } __attribute__((packed
));
77 struct kvm_ldttss_desc
*tss_desc
;
79 struct page
*save_area
;
82 static DEFINE_PER_CPU(struct svm_cpu_data
*, svm_data
);
83 static uint32_t svm_features
;
85 struct svm_init_data
{
90 static u32 msrpm_ranges
[] = {0, 0xc0000000, 0xc0010000};
92 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
93 #define MSRS_RANGE_SIZE 2048
94 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
96 #define MAX_INST_SIZE 15
98 static inline u32
svm_has(u32 feat
)
100 return svm_features
& feat
;
103 static inline u8
pop_irq(struct kvm_vcpu
*vcpu
)
105 int word_index
= __ffs(vcpu
->irq_summary
);
106 int bit_index
= __ffs(vcpu
->irq_pending
[word_index
]);
107 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
109 clear_bit(bit_index
, &vcpu
->irq_pending
[word_index
]);
110 if (!vcpu
->irq_pending
[word_index
])
111 clear_bit(word_index
, &vcpu
->irq_summary
);
115 static inline void push_irq(struct kvm_vcpu
*vcpu
, u8 irq
)
117 set_bit(irq
, vcpu
->irq_pending
);
118 set_bit(irq
/ BITS_PER_LONG
, &vcpu
->irq_summary
);
121 static inline void clgi(void)
123 asm volatile (SVM_CLGI
);
126 static inline void stgi(void)
128 asm volatile (SVM_STGI
);
131 static inline void invlpga(unsigned long addr
, u32 asid
)
133 asm volatile (SVM_INVLPGA :: "a"(addr
), "c"(asid
));
136 static inline unsigned long kvm_read_cr2(void)
140 asm volatile ("mov %%cr2, %0" : "=r" (cr2
));
144 static inline void kvm_write_cr2(unsigned long val
)
146 asm volatile ("mov %0, %%cr2" :: "r" (val
));
149 static inline unsigned long read_dr6(void)
153 asm volatile ("mov %%dr6, %0" : "=r" (dr6
));
157 static inline void write_dr6(unsigned long val
)
159 asm volatile ("mov %0, %%dr6" :: "r" (val
));
162 static inline unsigned long read_dr7(void)
166 asm volatile ("mov %%dr7, %0" : "=r" (dr7
));
170 static inline void write_dr7(unsigned long val
)
172 asm volatile ("mov %0, %%dr7" :: "r" (val
));
175 static inline void force_new_asid(struct kvm_vcpu
*vcpu
)
177 to_svm(vcpu
)->asid_generation
--;
180 static inline void flush_guest_tlb(struct kvm_vcpu
*vcpu
)
182 force_new_asid(vcpu
);
185 static void svm_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
187 if (!(efer
& KVM_EFER_LMA
))
188 efer
&= ~KVM_EFER_LME
;
190 to_svm(vcpu
)->vmcb
->save
.efer
= efer
| MSR_EFER_SVME_MASK
;
191 vcpu
->shadow_efer
= efer
;
194 static void svm_inject_gp(struct kvm_vcpu
*vcpu
, unsigned error_code
)
196 struct vcpu_svm
*svm
= to_svm(vcpu
);
198 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
199 SVM_EVTINJ_VALID_ERR
|
200 SVM_EVTINJ_TYPE_EXEPT
|
202 svm
->vmcb
->control
.event_inj_err
= error_code
;
205 static void inject_ud(struct kvm_vcpu
*vcpu
)
207 to_svm(vcpu
)->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
208 SVM_EVTINJ_TYPE_EXEPT
|
212 static int is_page_fault(uint32_t info
)
214 info
&= SVM_EVTINJ_VEC_MASK
| SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
215 return info
== (PF_VECTOR
| SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_EXEPT
);
218 static int is_external_interrupt(u32 info
)
220 info
&= SVM_EVTINJ_TYPE_MASK
| SVM_EVTINJ_VALID
;
221 return info
== (SVM_EVTINJ_VALID
| SVM_EVTINJ_TYPE_INTR
);
224 static void skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
226 struct vcpu_svm
*svm
= to_svm(vcpu
);
228 if (!svm
->next_rip
) {
229 printk(KERN_DEBUG
"%s: NOP\n", __FUNCTION__
);
232 if (svm
->next_rip
- svm
->vmcb
->save
.rip
> MAX_INST_SIZE
)
233 printk(KERN_ERR
"%s: ip 0x%llx next 0x%llx\n",
238 vcpu
->rip
= svm
->vmcb
->save
.rip
= svm
->next_rip
;
239 svm
->vmcb
->control
.int_state
&= ~SVM_INTERRUPT_SHADOW_MASK
;
241 vcpu
->interrupt_window_open
= 1;
244 static int has_svm(void)
246 uint32_t eax
, ebx
, ecx
, edx
;
248 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
) {
249 printk(KERN_INFO
"has_svm: not amd\n");
253 cpuid(0x80000000, &eax
, &ebx
, &ecx
, &edx
);
254 if (eax
< SVM_CPUID_FUNC
) {
255 printk(KERN_INFO
"has_svm: can't execute cpuid_8000000a\n");
259 cpuid(0x80000001, &eax
, &ebx
, &ecx
, &edx
);
260 if (!(ecx
& (1 << SVM_CPUID_FEATURE_SHIFT
))) {
261 printk(KERN_DEBUG
"has_svm: svm not available\n");
267 static void svm_hardware_disable(void *garbage
)
269 struct svm_cpu_data
*svm_data
270 = per_cpu(svm_data
, raw_smp_processor_id());
275 wrmsrl(MSR_VM_HSAVE_PA
, 0);
276 rdmsrl(MSR_EFER
, efer
);
277 wrmsrl(MSR_EFER
, efer
& ~MSR_EFER_SVME_MASK
);
278 per_cpu(svm_data
, raw_smp_processor_id()) = NULL
;
279 __free_page(svm_data
->save_area
);
284 static void svm_hardware_enable(void *garbage
)
287 struct svm_cpu_data
*svm_data
;
290 struct desc_ptr gdt_descr
;
292 struct desc_ptr gdt_descr
;
294 struct desc_struct
*gdt
;
295 int me
= raw_smp_processor_id();
298 printk(KERN_ERR
"svm_cpu_init: err EOPNOTSUPP on %d\n", me
);
301 svm_data
= per_cpu(svm_data
, me
);
304 printk(KERN_ERR
"svm_cpu_init: svm_data is NULL on %d\n",
309 svm_data
->asid_generation
= 1;
310 svm_data
->max_asid
= cpuid_ebx(SVM_CPUID_FUNC
) - 1;
311 svm_data
->next_asid
= svm_data
->max_asid
+ 1;
312 svm_features
= cpuid_edx(SVM_CPUID_FUNC
);
314 asm volatile ("sgdt %0" : "=m"(gdt_descr
));
315 gdt
= (struct desc_struct
*)gdt_descr
.address
;
316 svm_data
->tss_desc
= (struct kvm_ldttss_desc
*)(gdt
+ GDT_ENTRY_TSS
);
318 rdmsrl(MSR_EFER
, efer
);
319 wrmsrl(MSR_EFER
, efer
| MSR_EFER_SVME_MASK
);
321 wrmsrl(MSR_VM_HSAVE_PA
,
322 page_to_pfn(svm_data
->save_area
) << PAGE_SHIFT
);
325 static int svm_cpu_init(int cpu
)
327 struct svm_cpu_data
*svm_data
;
330 svm_data
= kzalloc(sizeof(struct svm_cpu_data
), GFP_KERNEL
);
334 svm_data
->save_area
= alloc_page(GFP_KERNEL
);
336 if (!svm_data
->save_area
)
339 per_cpu(svm_data
, cpu
) = svm_data
;
349 static void set_msr_interception(u32
*msrpm
, unsigned msr
,
354 for (i
= 0; i
< NUM_MSR_MAPS
; i
++) {
355 if (msr
>= msrpm_ranges
[i
] &&
356 msr
< msrpm_ranges
[i
] + MSRS_IN_RANGE
) {
357 u32 msr_offset
= (i
* MSRS_IN_RANGE
+ msr
-
358 msrpm_ranges
[i
]) * 2;
360 u32
*base
= msrpm
+ (msr_offset
/ 32);
361 u32 msr_shift
= msr_offset
% 32;
362 u32 mask
= ((write
) ? 0 : 2) | ((read
) ? 0 : 1);
363 *base
= (*base
& ~(0x3 << msr_shift
)) |
371 static __init
int svm_hardware_setup(void)
374 struct page
*iopm_pages
;
375 struct page
*msrpm_pages
;
376 void *iopm_va
, *msrpm_va
;
379 iopm_pages
= alloc_pages(GFP_KERNEL
, IOPM_ALLOC_ORDER
);
384 iopm_va
= page_address(iopm_pages
);
385 memset(iopm_va
, 0xff, PAGE_SIZE
* (1 << IOPM_ALLOC_ORDER
));
386 clear_bit(0x80, iopm_va
); /* allow direct access to PC debug port */
387 iopm_base
= page_to_pfn(iopm_pages
) << PAGE_SHIFT
;
390 msrpm_pages
= alloc_pages(GFP_KERNEL
, MSRPM_ALLOC_ORDER
);
396 msrpm_va
= page_address(msrpm_pages
);
397 memset(msrpm_va
, 0xff, PAGE_SIZE
* (1 << MSRPM_ALLOC_ORDER
));
398 msrpm_base
= page_to_pfn(msrpm_pages
) << PAGE_SHIFT
;
401 set_msr_interception(msrpm_va
, MSR_GS_BASE
, 1, 1);
402 set_msr_interception(msrpm_va
, MSR_FS_BASE
, 1, 1);
403 set_msr_interception(msrpm_va
, MSR_KERNEL_GS_BASE
, 1, 1);
404 set_msr_interception(msrpm_va
, MSR_LSTAR
, 1, 1);
405 set_msr_interception(msrpm_va
, MSR_CSTAR
, 1, 1);
406 set_msr_interception(msrpm_va
, MSR_SYSCALL_MASK
, 1, 1);
408 set_msr_interception(msrpm_va
, MSR_K6_STAR
, 1, 1);
409 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_CS
, 1, 1);
410 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_ESP
, 1, 1);
411 set_msr_interception(msrpm_va
, MSR_IA32_SYSENTER_EIP
, 1, 1);
413 for_each_online_cpu(cpu
) {
414 r
= svm_cpu_init(cpu
);
421 __free_pages(msrpm_pages
, MSRPM_ALLOC_ORDER
);
424 __free_pages(iopm_pages
, IOPM_ALLOC_ORDER
);
429 static __exit
void svm_hardware_unsetup(void)
431 __free_pages(pfn_to_page(msrpm_base
>> PAGE_SHIFT
), MSRPM_ALLOC_ORDER
);
432 __free_pages(pfn_to_page(iopm_base
>> PAGE_SHIFT
), IOPM_ALLOC_ORDER
);
433 iopm_base
= msrpm_base
= 0;
436 static void init_seg(struct vmcb_seg
*seg
)
439 seg
->attrib
= SVM_SELECTOR_P_MASK
| SVM_SELECTOR_S_MASK
|
440 SVM_SELECTOR_WRITE_MASK
; /* Read/Write Data Segment */
445 static void init_sys_seg(struct vmcb_seg
*seg
, uint32_t type
)
448 seg
->attrib
= SVM_SELECTOR_P_MASK
| type
;
453 static void init_vmcb(struct vmcb
*vmcb
)
455 struct vmcb_control_area
*control
= &vmcb
->control
;
456 struct vmcb_save_area
*save
= &vmcb
->save
;
458 control
->intercept_cr_read
= INTERCEPT_CR0_MASK
|
462 control
->intercept_cr_write
= INTERCEPT_CR0_MASK
|
466 control
->intercept_dr_read
= INTERCEPT_DR0_MASK
|
471 control
->intercept_dr_write
= INTERCEPT_DR0_MASK
|
478 control
->intercept_exceptions
= (1 << PF_VECTOR
) |
482 control
->intercept
= (1ULL << INTERCEPT_INTR
) |
483 (1ULL << INTERCEPT_NMI
) |
484 (1ULL << INTERCEPT_SMI
) |
486 * selective cr0 intercept bug?
487 * 0: 0f 22 d8 mov %eax,%cr3
488 * 3: 0f 20 c0 mov %cr0,%eax
489 * 6: 0d 00 00 00 80 or $0x80000000,%eax
490 * b: 0f 22 c0 mov %eax,%cr0
491 * set cr3 ->interception
492 * get cr0 ->interception
493 * set cr0 -> no interception
495 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
496 (1ULL << INTERCEPT_CPUID
) |
497 (1ULL << INTERCEPT_INVD
) |
498 (1ULL << INTERCEPT_HLT
) |
499 (1ULL << INTERCEPT_INVLPGA
) |
500 (1ULL << INTERCEPT_IOIO_PROT
) |
501 (1ULL << INTERCEPT_MSR_PROT
) |
502 (1ULL << INTERCEPT_TASK_SWITCH
) |
503 (1ULL << INTERCEPT_SHUTDOWN
) |
504 (1ULL << INTERCEPT_VMRUN
) |
505 (1ULL << INTERCEPT_VMMCALL
) |
506 (1ULL << INTERCEPT_VMLOAD
) |
507 (1ULL << INTERCEPT_VMSAVE
) |
508 (1ULL << INTERCEPT_STGI
) |
509 (1ULL << INTERCEPT_CLGI
) |
510 (1ULL << INTERCEPT_SKINIT
) |
511 (1ULL << INTERCEPT_WBINVD
) |
512 (1ULL << INTERCEPT_MONITOR
) |
513 (1ULL << INTERCEPT_MWAIT
);
515 control
->iopm_base_pa
= iopm_base
;
516 control
->msrpm_base_pa
= msrpm_base
;
517 control
->tsc_offset
= 0;
518 control
->int_ctl
= V_INTR_MASKING_MASK
;
526 save
->cs
.selector
= 0xf000;
527 /* Executable/Readable Code Segment */
528 save
->cs
.attrib
= SVM_SELECTOR_READ_MASK
| SVM_SELECTOR_P_MASK
|
529 SVM_SELECTOR_S_MASK
| SVM_SELECTOR_CODE_MASK
;
530 save
->cs
.limit
= 0xffff;
532 * cs.base should really be 0xffff0000, but vmx can't handle that, so
533 * be consistent with it.
535 * Replace when we have real mode working for vmx.
537 save
->cs
.base
= 0xf0000;
539 save
->gdtr
.limit
= 0xffff;
540 save
->idtr
.limit
= 0xffff;
542 init_sys_seg(&save
->ldtr
, SEG_TYPE_LDT
);
543 init_sys_seg(&save
->tr
, SEG_TYPE_BUSY_TSS16
);
545 save
->efer
= MSR_EFER_SVME_MASK
;
546 save
->dr6
= 0xffff0ff0;
549 save
->rip
= 0x0000fff0;
552 * cr0 val on cpu init should be 0x60000010, we enable cpu
553 * cache by default. the orderly way is to enable cache in bios.
555 save
->cr0
= 0x00000010 | X86_CR0_PG
| X86_CR0_WP
;
556 save
->cr4
= X86_CR4_PAE
;
560 static void svm_vcpu_reset(struct kvm_vcpu
*vcpu
)
562 struct vcpu_svm
*svm
= to_svm(vcpu
);
564 init_vmcb(svm
->vmcb
);
566 if (vcpu
->vcpu_id
!= 0) {
567 svm
->vmcb
->save
.rip
= 0;
568 svm
->vmcb
->save
.cs
.base
= svm
->vcpu
.sipi_vector
<< 12;
569 svm
->vmcb
->save
.cs
.selector
= svm
->vcpu
.sipi_vector
<< 8;
573 static struct kvm_vcpu
*svm_create_vcpu(struct kvm
*kvm
, unsigned int id
)
575 struct vcpu_svm
*svm
;
579 svm
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
585 err
= kvm_vcpu_init(&svm
->vcpu
, kvm
, id
);
589 page
= alloc_page(GFP_KERNEL
);
595 svm
->vmcb
= page_address(page
);
596 clear_page(svm
->vmcb
);
597 svm
->vmcb_pa
= page_to_pfn(page
) << PAGE_SHIFT
;
598 svm
->asid_generation
= 0;
599 memset(svm
->db_regs
, 0, sizeof(svm
->db_regs
));
600 init_vmcb(svm
->vmcb
);
603 svm
->vcpu
.fpu_active
= 1;
604 svm
->vcpu
.apic_base
= 0xfee00000 | MSR_IA32_APICBASE_ENABLE
;
605 if (svm
->vcpu
.vcpu_id
== 0)
606 svm
->vcpu
.apic_base
|= MSR_IA32_APICBASE_BSP
;
611 kvm_vcpu_uninit(&svm
->vcpu
);
613 kmem_cache_free(kvm_vcpu_cache
, svm
);
618 static void svm_free_vcpu(struct kvm_vcpu
*vcpu
)
620 struct vcpu_svm
*svm
= to_svm(vcpu
);
622 __free_page(pfn_to_page(svm
->vmcb_pa
>> PAGE_SHIFT
));
623 kvm_vcpu_uninit(vcpu
);
624 kmem_cache_free(kvm_vcpu_cache
, svm
);
627 static void svm_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
629 struct vcpu_svm
*svm
= to_svm(vcpu
);
632 if (unlikely(cpu
!= vcpu
->cpu
)) {
636 * Make sure that the guest sees a monotonically
640 delta
= vcpu
->host_tsc
- tsc_this
;
641 svm
->vmcb
->control
.tsc_offset
+= delta
;
643 kvm_migrate_apic_timer(vcpu
);
646 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
647 rdmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
650 static void svm_vcpu_put(struct kvm_vcpu
*vcpu
)
652 struct vcpu_svm
*svm
= to_svm(vcpu
);
655 for (i
= 0; i
< NR_HOST_SAVE_USER_MSRS
; i
++)
656 wrmsrl(host_save_user_msrs
[i
], svm
->host_user_msrs
[i
]);
658 rdtscll(vcpu
->host_tsc
);
659 kvm_put_guest_fpu(vcpu
);
662 static void svm_vcpu_decache(struct kvm_vcpu
*vcpu
)
666 static void svm_cache_regs(struct kvm_vcpu
*vcpu
)
668 struct vcpu_svm
*svm
= to_svm(vcpu
);
670 vcpu
->regs
[VCPU_REGS_RAX
] = svm
->vmcb
->save
.rax
;
671 vcpu
->regs
[VCPU_REGS_RSP
] = svm
->vmcb
->save
.rsp
;
672 vcpu
->rip
= svm
->vmcb
->save
.rip
;
675 static void svm_decache_regs(struct kvm_vcpu
*vcpu
)
677 struct vcpu_svm
*svm
= to_svm(vcpu
);
678 svm
->vmcb
->save
.rax
= vcpu
->regs
[VCPU_REGS_RAX
];
679 svm
->vmcb
->save
.rsp
= vcpu
->regs
[VCPU_REGS_RSP
];
680 svm
->vmcb
->save
.rip
= vcpu
->rip
;
683 static unsigned long svm_get_rflags(struct kvm_vcpu
*vcpu
)
685 return to_svm(vcpu
)->vmcb
->save
.rflags
;
688 static void svm_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
690 to_svm(vcpu
)->vmcb
->save
.rflags
= rflags
;
693 static struct vmcb_seg
*svm_seg(struct kvm_vcpu
*vcpu
, int seg
)
695 struct vmcb_save_area
*save
= &to_svm(vcpu
)->vmcb
->save
;
698 case VCPU_SREG_CS
: return &save
->cs
;
699 case VCPU_SREG_DS
: return &save
->ds
;
700 case VCPU_SREG_ES
: return &save
->es
;
701 case VCPU_SREG_FS
: return &save
->fs
;
702 case VCPU_SREG_GS
: return &save
->gs
;
703 case VCPU_SREG_SS
: return &save
->ss
;
704 case VCPU_SREG_TR
: return &save
->tr
;
705 case VCPU_SREG_LDTR
: return &save
->ldtr
;
711 static u64
svm_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
713 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
718 static void svm_get_segment(struct kvm_vcpu
*vcpu
,
719 struct kvm_segment
*var
, int seg
)
721 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
724 var
->limit
= s
->limit
;
725 var
->selector
= s
->selector
;
726 var
->type
= s
->attrib
& SVM_SELECTOR_TYPE_MASK
;
727 var
->s
= (s
->attrib
>> SVM_SELECTOR_S_SHIFT
) & 1;
728 var
->dpl
= (s
->attrib
>> SVM_SELECTOR_DPL_SHIFT
) & 3;
729 var
->present
= (s
->attrib
>> SVM_SELECTOR_P_SHIFT
) & 1;
730 var
->avl
= (s
->attrib
>> SVM_SELECTOR_AVL_SHIFT
) & 1;
731 var
->l
= (s
->attrib
>> SVM_SELECTOR_L_SHIFT
) & 1;
732 var
->db
= (s
->attrib
>> SVM_SELECTOR_DB_SHIFT
) & 1;
733 var
->g
= (s
->attrib
>> SVM_SELECTOR_G_SHIFT
) & 1;
734 var
->unusable
= !var
->present
;
737 static void svm_get_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
739 struct vcpu_svm
*svm
= to_svm(vcpu
);
741 dt
->limit
= svm
->vmcb
->save
.idtr
.limit
;
742 dt
->base
= svm
->vmcb
->save
.idtr
.base
;
745 static void svm_set_idt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
747 struct vcpu_svm
*svm
= to_svm(vcpu
);
749 svm
->vmcb
->save
.idtr
.limit
= dt
->limit
;
750 svm
->vmcb
->save
.idtr
.base
= dt
->base
;
753 static void svm_get_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
755 struct vcpu_svm
*svm
= to_svm(vcpu
);
757 dt
->limit
= svm
->vmcb
->save
.gdtr
.limit
;
758 dt
->base
= svm
->vmcb
->save
.gdtr
.base
;
761 static void svm_set_gdt(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
)
763 struct vcpu_svm
*svm
= to_svm(vcpu
);
765 svm
->vmcb
->save
.gdtr
.limit
= dt
->limit
;
766 svm
->vmcb
->save
.gdtr
.base
= dt
->base
;
769 static void svm_decache_cr4_guest_bits(struct kvm_vcpu
*vcpu
)
773 static void svm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
775 struct vcpu_svm
*svm
= to_svm(vcpu
);
778 if (vcpu
->shadow_efer
& KVM_EFER_LME
) {
779 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
)) {
780 vcpu
->shadow_efer
|= KVM_EFER_LMA
;
781 svm
->vmcb
->save
.efer
|= KVM_EFER_LMA
| KVM_EFER_LME
;
784 if (is_paging(vcpu
) && !(cr0
& X86_CR0_PG
)) {
785 vcpu
->shadow_efer
&= ~KVM_EFER_LMA
;
786 svm
->vmcb
->save
.efer
&= ~(KVM_EFER_LMA
| KVM_EFER_LME
);
790 if ((vcpu
->cr0
& X86_CR0_TS
) && !(cr0
& X86_CR0_TS
)) {
791 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
792 vcpu
->fpu_active
= 1;
796 cr0
|= X86_CR0_PG
| X86_CR0_WP
;
797 cr0
&= ~(X86_CR0_CD
| X86_CR0_NW
);
798 svm
->vmcb
->save
.cr0
= cr0
;
801 static void svm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
804 to_svm(vcpu
)->vmcb
->save
.cr4
= cr4
| X86_CR4_PAE
;
807 static void svm_set_segment(struct kvm_vcpu
*vcpu
,
808 struct kvm_segment
*var
, int seg
)
810 struct vcpu_svm
*svm
= to_svm(vcpu
);
811 struct vmcb_seg
*s
= svm_seg(vcpu
, seg
);
814 s
->limit
= var
->limit
;
815 s
->selector
= var
->selector
;
819 s
->attrib
= (var
->type
& SVM_SELECTOR_TYPE_MASK
);
820 s
->attrib
|= (var
->s
& 1) << SVM_SELECTOR_S_SHIFT
;
821 s
->attrib
|= (var
->dpl
& 3) << SVM_SELECTOR_DPL_SHIFT
;
822 s
->attrib
|= (var
->present
& 1) << SVM_SELECTOR_P_SHIFT
;
823 s
->attrib
|= (var
->avl
& 1) << SVM_SELECTOR_AVL_SHIFT
;
824 s
->attrib
|= (var
->l
& 1) << SVM_SELECTOR_L_SHIFT
;
825 s
->attrib
|= (var
->db
& 1) << SVM_SELECTOR_DB_SHIFT
;
826 s
->attrib
|= (var
->g
& 1) << SVM_SELECTOR_G_SHIFT
;
828 if (seg
== VCPU_SREG_CS
)
830 = (svm
->vmcb
->save
.cs
.attrib
831 >> SVM_SELECTOR_DPL_SHIFT
) & 3;
837 svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
838 svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
842 static int svm_guest_debug(struct kvm_vcpu
*vcpu
, struct kvm_debug_guest
*dbg
)
847 static int svm_get_irq(struct kvm_vcpu
*vcpu
)
849 struct vcpu_svm
*svm
= to_svm(vcpu
);
850 u32 exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
852 if (is_external_interrupt(exit_int_info
))
853 return exit_int_info
& SVM_EVTINJ_VEC_MASK
;
857 static void load_host_msrs(struct kvm_vcpu
*vcpu
)
860 wrmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
864 static void save_host_msrs(struct kvm_vcpu
*vcpu
)
867 rdmsrl(MSR_GS_BASE
, to_svm(vcpu
)->host_gs_base
);
871 static void new_asid(struct vcpu_svm
*svm
, struct svm_cpu_data
*svm_data
)
873 if (svm_data
->next_asid
> svm_data
->max_asid
) {
874 ++svm_data
->asid_generation
;
875 svm_data
->next_asid
= 1;
876 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_FLUSH_ALL_ASID
;
879 svm
->vcpu
.cpu
= svm_data
->cpu
;
880 svm
->asid_generation
= svm_data
->asid_generation
;
881 svm
->vmcb
->control
.asid
= svm_data
->next_asid
++;
884 static unsigned long svm_get_dr(struct kvm_vcpu
*vcpu
, int dr
)
886 return to_svm(vcpu
)->db_regs
[dr
];
889 static void svm_set_dr(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
892 struct vcpu_svm
*svm
= to_svm(vcpu
);
896 if (svm
->vmcb
->save
.dr7
& DR7_GD_MASK
) {
897 svm
->vmcb
->save
.dr7
&= ~DR7_GD_MASK
;
898 svm
->vmcb
->save
.dr6
|= DR6_BD_MASK
;
899 *exception
= DB_VECTOR
;
905 svm
->db_regs
[dr
] = value
;
908 if (vcpu
->cr4
& X86_CR4_DE
) {
909 *exception
= UD_VECTOR
;
913 if (value
& ~((1ULL << 32) - 1)) {
914 *exception
= GP_VECTOR
;
917 svm
->vmcb
->save
.dr7
= value
;
921 printk(KERN_DEBUG
"%s: unexpected dr %u\n",
923 *exception
= UD_VECTOR
;
928 static int pf_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
930 u32 exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
931 struct kvm
*kvm
= svm
->vcpu
.kvm
;
934 enum emulation_result er
;
937 if (!irqchip_in_kernel(kvm
) &&
938 is_external_interrupt(exit_int_info
))
939 push_irq(&svm
->vcpu
, exit_int_info
& SVM_EVTINJ_VEC_MASK
);
941 mutex_lock(&kvm
->lock
);
943 fault_address
= svm
->vmcb
->control
.exit_info_2
;
944 error_code
= svm
->vmcb
->control
.exit_info_1
;
945 r
= kvm_mmu_page_fault(&svm
->vcpu
, fault_address
, error_code
);
947 mutex_unlock(&kvm
->lock
);
951 mutex_unlock(&kvm
->lock
);
954 er
= emulate_instruction(&svm
->vcpu
, kvm_run
, fault_address
,
956 mutex_unlock(&kvm
->lock
);
961 case EMULATE_DO_MMIO
:
962 ++svm
->vcpu
.stat
.mmio_exits
;
965 kvm_report_emulation_failure(&svm
->vcpu
, "pagetable");
971 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
975 static int ud_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
979 er
= emulate_instruction(&svm
->vcpu
, kvm_run
, 0, 0, 0);
980 if (er
!= EMULATE_DONE
)
981 inject_ud(&svm
->vcpu
);
986 static int nm_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
988 svm
->vmcb
->control
.intercept_exceptions
&= ~(1 << NM_VECTOR
);
989 if (!(svm
->vcpu
.cr0
& X86_CR0_TS
))
990 svm
->vmcb
->save
.cr0
&= ~X86_CR0_TS
;
991 svm
->vcpu
.fpu_active
= 1;
996 static int shutdown_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
999 * VMCB is undefined after a SHUTDOWN intercept
1000 * so reinitialize it.
1002 clear_page(svm
->vmcb
);
1003 init_vmcb(svm
->vmcb
);
1005 kvm_run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
1009 static int io_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1011 u32 io_info
= svm
->vmcb
->control
.exit_info_1
; /* address size bug? */
1012 int size
, down
, in
, string
, rep
;
1015 ++svm
->vcpu
.stat
.io_exits
;
1017 svm
->next_rip
= svm
->vmcb
->control
.exit_info_2
;
1019 string
= (io_info
& SVM_IOIO_STR_MASK
) != 0;
1022 if (emulate_instruction(&svm
->vcpu
,
1023 kvm_run
, 0, 0, 0) == EMULATE_DO_MMIO
)
1028 in
= (io_info
& SVM_IOIO_TYPE_MASK
) != 0;
1029 port
= io_info
>> 16;
1030 size
= (io_info
& SVM_IOIO_SIZE_MASK
) >> SVM_IOIO_SIZE_SHIFT
;
1031 rep
= (io_info
& SVM_IOIO_REP_MASK
) != 0;
1032 down
= (svm
->vmcb
->save
.rflags
& X86_EFLAGS_DF
) != 0;
1034 return kvm_emulate_pio(&svm
->vcpu
, kvm_run
, in
, size
, port
);
1037 static int nop_on_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1042 static int halt_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1044 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 1;
1045 skip_emulated_instruction(&svm
->vcpu
);
1046 return kvm_emulate_halt(&svm
->vcpu
);
1049 static int vmmcall_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1051 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 3;
1052 skip_emulated_instruction(&svm
->vcpu
);
1053 kvm_emulate_hypercall(&svm
->vcpu
);
1057 static int invalid_op_interception(struct vcpu_svm
*svm
,
1058 struct kvm_run
*kvm_run
)
1060 inject_ud(&svm
->vcpu
);
1064 static int task_switch_interception(struct vcpu_svm
*svm
,
1065 struct kvm_run
*kvm_run
)
1067 pr_unimpl(&svm
->vcpu
, "%s: task switch is unsupported\n", __FUNCTION__
);
1068 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1072 static int cpuid_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1074 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1075 kvm_emulate_cpuid(&svm
->vcpu
);
1079 static int emulate_on_interception(struct vcpu_svm
*svm
,
1080 struct kvm_run
*kvm_run
)
1082 if (emulate_instruction(&svm
->vcpu
, NULL
, 0, 0, 0) != EMULATE_DONE
)
1083 pr_unimpl(&svm
->vcpu
, "%s: failed\n", __FUNCTION__
);
1087 static int svm_get_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64
*data
)
1089 struct vcpu_svm
*svm
= to_svm(vcpu
);
1092 case MSR_IA32_TIME_STAMP_COUNTER
: {
1096 *data
= svm
->vmcb
->control
.tsc_offset
+ tsc
;
1100 *data
= svm
->vmcb
->save
.star
;
1102 #ifdef CONFIG_X86_64
1104 *data
= svm
->vmcb
->save
.lstar
;
1107 *data
= svm
->vmcb
->save
.cstar
;
1109 case MSR_KERNEL_GS_BASE
:
1110 *data
= svm
->vmcb
->save
.kernel_gs_base
;
1112 case MSR_SYSCALL_MASK
:
1113 *data
= svm
->vmcb
->save
.sfmask
;
1116 case MSR_IA32_SYSENTER_CS
:
1117 *data
= svm
->vmcb
->save
.sysenter_cs
;
1119 case MSR_IA32_SYSENTER_EIP
:
1120 *data
= svm
->vmcb
->save
.sysenter_eip
;
1122 case MSR_IA32_SYSENTER_ESP
:
1123 *data
= svm
->vmcb
->save
.sysenter_esp
;
1126 return kvm_get_msr_common(vcpu
, ecx
, data
);
1131 static int rdmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1133 u32 ecx
= svm
->vcpu
.regs
[VCPU_REGS_RCX
];
1136 if (svm_get_msr(&svm
->vcpu
, ecx
, &data
))
1137 svm_inject_gp(&svm
->vcpu
, 0);
1139 svm
->vmcb
->save
.rax
= data
& 0xffffffff;
1140 svm
->vcpu
.regs
[VCPU_REGS_RDX
] = data
>> 32;
1141 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1142 skip_emulated_instruction(&svm
->vcpu
);
1147 static int svm_set_msr(struct kvm_vcpu
*vcpu
, unsigned ecx
, u64 data
)
1149 struct vcpu_svm
*svm
= to_svm(vcpu
);
1152 case MSR_IA32_TIME_STAMP_COUNTER
: {
1156 svm
->vmcb
->control
.tsc_offset
= data
- tsc
;
1160 svm
->vmcb
->save
.star
= data
;
1162 #ifdef CONFIG_X86_64
1164 svm
->vmcb
->save
.lstar
= data
;
1167 svm
->vmcb
->save
.cstar
= data
;
1169 case MSR_KERNEL_GS_BASE
:
1170 svm
->vmcb
->save
.kernel_gs_base
= data
;
1172 case MSR_SYSCALL_MASK
:
1173 svm
->vmcb
->save
.sfmask
= data
;
1176 case MSR_IA32_SYSENTER_CS
:
1177 svm
->vmcb
->save
.sysenter_cs
= data
;
1179 case MSR_IA32_SYSENTER_EIP
:
1180 svm
->vmcb
->save
.sysenter_eip
= data
;
1182 case MSR_IA32_SYSENTER_ESP
:
1183 svm
->vmcb
->save
.sysenter_esp
= data
;
1186 return kvm_set_msr_common(vcpu
, ecx
, data
);
1191 static int wrmsr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1193 u32 ecx
= svm
->vcpu
.regs
[VCPU_REGS_RCX
];
1194 u64 data
= (svm
->vmcb
->save
.rax
& -1u)
1195 | ((u64
)(svm
->vcpu
.regs
[VCPU_REGS_RDX
] & -1u) << 32);
1196 svm
->next_rip
= svm
->vmcb
->save
.rip
+ 2;
1197 if (svm_set_msr(&svm
->vcpu
, ecx
, data
))
1198 svm_inject_gp(&svm
->vcpu
, 0);
1200 skip_emulated_instruction(&svm
->vcpu
);
1204 static int msr_interception(struct vcpu_svm
*svm
, struct kvm_run
*kvm_run
)
1206 if (svm
->vmcb
->control
.exit_info_1
)
1207 return wrmsr_interception(svm
, kvm_run
);
1209 return rdmsr_interception(svm
, kvm_run
);
1212 static int interrupt_window_interception(struct vcpu_svm
*svm
,
1213 struct kvm_run
*kvm_run
)
1215 svm
->vmcb
->control
.intercept
&= ~(1ULL << INTERCEPT_VINTR
);
1216 svm
->vmcb
->control
.int_ctl
&= ~V_IRQ_MASK
;
1218 * If the user space waits to inject interrupts, exit as soon as
1221 if (kvm_run
->request_interrupt_window
&&
1222 !svm
->vcpu
.irq_summary
) {
1223 ++svm
->vcpu
.stat
.irq_window_exits
;
1224 kvm_run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
1231 static int (*svm_exit_handlers
[])(struct vcpu_svm
*svm
,
1232 struct kvm_run
*kvm_run
) = {
1233 [SVM_EXIT_READ_CR0
] = emulate_on_interception
,
1234 [SVM_EXIT_READ_CR3
] = emulate_on_interception
,
1235 [SVM_EXIT_READ_CR4
] = emulate_on_interception
,
1237 [SVM_EXIT_WRITE_CR0
] = emulate_on_interception
,
1238 [SVM_EXIT_WRITE_CR3
] = emulate_on_interception
,
1239 [SVM_EXIT_WRITE_CR4
] = emulate_on_interception
,
1240 [SVM_EXIT_READ_DR0
] = emulate_on_interception
,
1241 [SVM_EXIT_READ_DR1
] = emulate_on_interception
,
1242 [SVM_EXIT_READ_DR2
] = emulate_on_interception
,
1243 [SVM_EXIT_READ_DR3
] = emulate_on_interception
,
1244 [SVM_EXIT_WRITE_DR0
] = emulate_on_interception
,
1245 [SVM_EXIT_WRITE_DR1
] = emulate_on_interception
,
1246 [SVM_EXIT_WRITE_DR2
] = emulate_on_interception
,
1247 [SVM_EXIT_WRITE_DR3
] = emulate_on_interception
,
1248 [SVM_EXIT_WRITE_DR5
] = emulate_on_interception
,
1249 [SVM_EXIT_WRITE_DR7
] = emulate_on_interception
,
1250 [SVM_EXIT_EXCP_BASE
+ UD_VECTOR
] = ud_interception
,
1251 [SVM_EXIT_EXCP_BASE
+ PF_VECTOR
] = pf_interception
,
1252 [SVM_EXIT_EXCP_BASE
+ NM_VECTOR
] = nm_interception
,
1253 [SVM_EXIT_INTR
] = nop_on_interception
,
1254 [SVM_EXIT_NMI
] = nop_on_interception
,
1255 [SVM_EXIT_SMI
] = nop_on_interception
,
1256 [SVM_EXIT_INIT
] = nop_on_interception
,
1257 [SVM_EXIT_VINTR
] = interrupt_window_interception
,
1258 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1259 [SVM_EXIT_CPUID
] = cpuid_interception
,
1260 [SVM_EXIT_INVD
] = emulate_on_interception
,
1261 [SVM_EXIT_HLT
] = halt_interception
,
1262 [SVM_EXIT_INVLPG
] = emulate_on_interception
,
1263 [SVM_EXIT_INVLPGA
] = invalid_op_interception
,
1264 [SVM_EXIT_IOIO
] = io_interception
,
1265 [SVM_EXIT_MSR
] = msr_interception
,
1266 [SVM_EXIT_TASK_SWITCH
] = task_switch_interception
,
1267 [SVM_EXIT_SHUTDOWN
] = shutdown_interception
,
1268 [SVM_EXIT_VMRUN
] = invalid_op_interception
,
1269 [SVM_EXIT_VMMCALL
] = vmmcall_interception
,
1270 [SVM_EXIT_VMLOAD
] = invalid_op_interception
,
1271 [SVM_EXIT_VMSAVE
] = invalid_op_interception
,
1272 [SVM_EXIT_STGI
] = invalid_op_interception
,
1273 [SVM_EXIT_CLGI
] = invalid_op_interception
,
1274 [SVM_EXIT_SKINIT
] = invalid_op_interception
,
1275 [SVM_EXIT_WBINVD
] = emulate_on_interception
,
1276 [SVM_EXIT_MONITOR
] = invalid_op_interception
,
1277 [SVM_EXIT_MWAIT
] = invalid_op_interception
,
1281 static int handle_exit(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
1283 struct vcpu_svm
*svm
= to_svm(vcpu
);
1284 u32 exit_code
= svm
->vmcb
->control
.exit_code
;
1288 if (svm
->vmcb
->control
.exit_code
== SVM_EXIT_ERR
) {
1289 kvm_run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
1290 kvm_run
->fail_entry
.hardware_entry_failure_reason
1291 = svm
->vmcb
->control
.exit_code
;
1295 if (is_external_interrupt(svm
->vmcb
->control
.exit_int_info
) &&
1296 exit_code
!= SVM_EXIT_EXCP_BASE
+ PF_VECTOR
)
1297 printk(KERN_ERR
"%s: unexpected exit_ini_info 0x%x "
1299 __FUNCTION__
, svm
->vmcb
->control
.exit_int_info
,
1302 if (exit_code
>= ARRAY_SIZE(svm_exit_handlers
)
1303 || svm_exit_handlers
[exit_code
] == 0) {
1304 kvm_run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1305 kvm_run
->hw
.hardware_exit_reason
= exit_code
;
1309 return svm_exit_handlers
[exit_code
](svm
, kvm_run
);
1312 static void reload_tss(struct kvm_vcpu
*vcpu
)
1314 int cpu
= raw_smp_processor_id();
1316 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1317 svm_data
->tss_desc
->type
= 9; /* available 32/64-bit TSS */
1321 static void pre_svm_run(struct vcpu_svm
*svm
)
1323 int cpu
= raw_smp_processor_id();
1325 struct svm_cpu_data
*svm_data
= per_cpu(svm_data
, cpu
);
1327 svm
->vmcb
->control
.tlb_ctl
= TLB_CONTROL_DO_NOTHING
;
1328 if (svm
->vcpu
.cpu
!= cpu
||
1329 svm
->asid_generation
!= svm_data
->asid_generation
)
1330 new_asid(svm
, svm_data
);
1334 static inline void svm_inject_irq(struct vcpu_svm
*svm
, int irq
)
1336 struct vmcb_control_area
*control
;
1338 control
= &svm
->vmcb
->control
;
1339 control
->int_vector
= irq
;
1340 control
->int_ctl
&= ~V_INTR_PRIO_MASK
;
1341 control
->int_ctl
|= V_IRQ_MASK
|
1342 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT
);
1345 static void svm_set_irq(struct kvm_vcpu
*vcpu
, int irq
)
1347 struct vcpu_svm
*svm
= to_svm(vcpu
);
1349 svm_inject_irq(svm
, irq
);
1352 static void svm_intr_assist(struct kvm_vcpu
*vcpu
)
1354 struct vcpu_svm
*svm
= to_svm(vcpu
);
1355 struct vmcb
*vmcb
= svm
->vmcb
;
1356 int intr_vector
= -1;
1358 kvm_inject_pending_timer_irqs(vcpu
);
1359 if ((vmcb
->control
.exit_int_info
& SVM_EVTINJ_VALID
) &&
1360 ((vmcb
->control
.exit_int_info
& SVM_EVTINJ_TYPE_MASK
) == 0)) {
1361 intr_vector
= vmcb
->control
.exit_int_info
&
1362 SVM_EVTINJ_VEC_MASK
;
1363 vmcb
->control
.exit_int_info
= 0;
1364 svm_inject_irq(svm
, intr_vector
);
1368 if (vmcb
->control
.int_ctl
& V_IRQ_MASK
)
1371 if (!kvm_cpu_has_interrupt(vcpu
))
1374 if (!(vmcb
->save
.rflags
& X86_EFLAGS_IF
) ||
1375 (vmcb
->control
.int_state
& SVM_INTERRUPT_SHADOW_MASK
) ||
1376 (vmcb
->control
.event_inj
& SVM_EVTINJ_VALID
)) {
1377 /* unable to deliver irq, set pending irq */
1378 vmcb
->control
.intercept
|= (1ULL << INTERCEPT_VINTR
);
1379 svm_inject_irq(svm
, 0x0);
1382 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
1383 intr_vector
= kvm_cpu_get_interrupt(vcpu
);
1384 svm_inject_irq(svm
, intr_vector
);
1385 kvm_timer_intr_post(vcpu
, intr_vector
);
1388 static void kvm_reput_irq(struct vcpu_svm
*svm
)
1390 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1392 if ((control
->int_ctl
& V_IRQ_MASK
)
1393 && !irqchip_in_kernel(svm
->vcpu
.kvm
)) {
1394 control
->int_ctl
&= ~V_IRQ_MASK
;
1395 push_irq(&svm
->vcpu
, control
->int_vector
);
1398 svm
->vcpu
.interrupt_window_open
=
1399 !(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
);
1402 static void svm_do_inject_vector(struct vcpu_svm
*svm
)
1404 struct kvm_vcpu
*vcpu
= &svm
->vcpu
;
1405 int word_index
= __ffs(vcpu
->irq_summary
);
1406 int bit_index
= __ffs(vcpu
->irq_pending
[word_index
]);
1407 int irq
= word_index
* BITS_PER_LONG
+ bit_index
;
1409 clear_bit(bit_index
, &vcpu
->irq_pending
[word_index
]);
1410 if (!vcpu
->irq_pending
[word_index
])
1411 clear_bit(word_index
, &vcpu
->irq_summary
);
1412 svm_inject_irq(svm
, irq
);
1415 static void do_interrupt_requests(struct kvm_vcpu
*vcpu
,
1416 struct kvm_run
*kvm_run
)
1418 struct vcpu_svm
*svm
= to_svm(vcpu
);
1419 struct vmcb_control_area
*control
= &svm
->vmcb
->control
;
1421 svm
->vcpu
.interrupt_window_open
=
1422 (!(control
->int_state
& SVM_INTERRUPT_SHADOW_MASK
) &&
1423 (svm
->vmcb
->save
.rflags
& X86_EFLAGS_IF
));
1425 if (svm
->vcpu
.interrupt_window_open
&& svm
->vcpu
.irq_summary
)
1427 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1429 svm_do_inject_vector(svm
);
1432 * Interrupts blocked. Wait for unblock.
1434 if (!svm
->vcpu
.interrupt_window_open
&&
1435 (svm
->vcpu
.irq_summary
|| kvm_run
->request_interrupt_window
))
1436 control
->intercept
|= 1ULL << INTERCEPT_VINTR
;
1438 control
->intercept
&= ~(1ULL << INTERCEPT_VINTR
);
1441 static void save_db_regs(unsigned long *db_regs
)
1443 asm volatile ("mov %%dr0, %0" : "=r"(db_regs
[0]));
1444 asm volatile ("mov %%dr1, %0" : "=r"(db_regs
[1]));
1445 asm volatile ("mov %%dr2, %0" : "=r"(db_regs
[2]));
1446 asm volatile ("mov %%dr3, %0" : "=r"(db_regs
[3]));
1449 static void load_db_regs(unsigned long *db_regs
)
1451 asm volatile ("mov %0, %%dr0" : : "r"(db_regs
[0]));
1452 asm volatile ("mov %0, %%dr1" : : "r"(db_regs
[1]));
1453 asm volatile ("mov %0, %%dr2" : : "r"(db_regs
[2]));
1454 asm volatile ("mov %0, %%dr3" : : "r"(db_regs
[3]));
1457 static void svm_flush_tlb(struct kvm_vcpu
*vcpu
)
1459 force_new_asid(vcpu
);
1462 static void svm_prepare_guest_switch(struct kvm_vcpu
*vcpu
)
1466 static void svm_vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1468 struct vcpu_svm
*svm
= to_svm(vcpu
);
1475 save_host_msrs(vcpu
);
1476 fs_selector
= read_fs();
1477 gs_selector
= read_gs();
1478 ldt_selector
= read_ldt();
1479 svm
->host_cr2
= kvm_read_cr2();
1480 svm
->host_dr6
= read_dr6();
1481 svm
->host_dr7
= read_dr7();
1482 svm
->vmcb
->save
.cr2
= vcpu
->cr2
;
1484 if (svm
->vmcb
->save
.dr7
& 0xff) {
1486 save_db_regs(svm
->host_db_regs
);
1487 load_db_regs(svm
->db_regs
);
1495 #ifdef CONFIG_X86_64
1496 "push %%rbx; push %%rcx; push %%rdx;"
1497 "push %%rsi; push %%rdi; push %%rbp;"
1498 "push %%r8; push %%r9; push %%r10; push %%r11;"
1499 "push %%r12; push %%r13; push %%r14; push %%r15;"
1501 "push %%ebx; push %%ecx; push %%edx;"
1502 "push %%esi; push %%edi; push %%ebp;"
1505 #ifdef CONFIG_X86_64
1506 "mov %c[rbx](%[svm]), %%rbx \n\t"
1507 "mov %c[rcx](%[svm]), %%rcx \n\t"
1508 "mov %c[rdx](%[svm]), %%rdx \n\t"
1509 "mov %c[rsi](%[svm]), %%rsi \n\t"
1510 "mov %c[rdi](%[svm]), %%rdi \n\t"
1511 "mov %c[rbp](%[svm]), %%rbp \n\t"
1512 "mov %c[r8](%[svm]), %%r8 \n\t"
1513 "mov %c[r9](%[svm]), %%r9 \n\t"
1514 "mov %c[r10](%[svm]), %%r10 \n\t"
1515 "mov %c[r11](%[svm]), %%r11 \n\t"
1516 "mov %c[r12](%[svm]), %%r12 \n\t"
1517 "mov %c[r13](%[svm]), %%r13 \n\t"
1518 "mov %c[r14](%[svm]), %%r14 \n\t"
1519 "mov %c[r15](%[svm]), %%r15 \n\t"
1521 "mov %c[rbx](%[svm]), %%ebx \n\t"
1522 "mov %c[rcx](%[svm]), %%ecx \n\t"
1523 "mov %c[rdx](%[svm]), %%edx \n\t"
1524 "mov %c[rsi](%[svm]), %%esi \n\t"
1525 "mov %c[rdi](%[svm]), %%edi \n\t"
1526 "mov %c[rbp](%[svm]), %%ebp \n\t"
1529 #ifdef CONFIG_X86_64
1530 /* Enter guest mode */
1532 "mov %c[vmcb](%[svm]), %%rax \n\t"
1538 /* Enter guest mode */
1540 "mov %c[vmcb](%[svm]), %%eax \n\t"
1547 /* Save guest registers, load host registers */
1548 #ifdef CONFIG_X86_64
1549 "mov %%rbx, %c[rbx](%[svm]) \n\t"
1550 "mov %%rcx, %c[rcx](%[svm]) \n\t"
1551 "mov %%rdx, %c[rdx](%[svm]) \n\t"
1552 "mov %%rsi, %c[rsi](%[svm]) \n\t"
1553 "mov %%rdi, %c[rdi](%[svm]) \n\t"
1554 "mov %%rbp, %c[rbp](%[svm]) \n\t"
1555 "mov %%r8, %c[r8](%[svm]) \n\t"
1556 "mov %%r9, %c[r9](%[svm]) \n\t"
1557 "mov %%r10, %c[r10](%[svm]) \n\t"
1558 "mov %%r11, %c[r11](%[svm]) \n\t"
1559 "mov %%r12, %c[r12](%[svm]) \n\t"
1560 "mov %%r13, %c[r13](%[svm]) \n\t"
1561 "mov %%r14, %c[r14](%[svm]) \n\t"
1562 "mov %%r15, %c[r15](%[svm]) \n\t"
1564 "pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
1565 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
1566 "pop %%rbp; pop %%rdi; pop %%rsi;"
1567 "pop %%rdx; pop %%rcx; pop %%rbx; \n\t"
1569 "mov %%ebx, %c[rbx](%[svm]) \n\t"
1570 "mov %%ecx, %c[rcx](%[svm]) \n\t"
1571 "mov %%edx, %c[rdx](%[svm]) \n\t"
1572 "mov %%esi, %c[rsi](%[svm]) \n\t"
1573 "mov %%edi, %c[rdi](%[svm]) \n\t"
1574 "mov %%ebp, %c[rbp](%[svm]) \n\t"
1576 "pop %%ebp; pop %%edi; pop %%esi;"
1577 "pop %%edx; pop %%ecx; pop %%ebx; \n\t"
1581 [vmcb
]"i"(offsetof(struct vcpu_svm
, vmcb_pa
)),
1582 [rbx
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RBX
])),
1583 [rcx
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RCX
])),
1584 [rdx
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RDX
])),
1585 [rsi
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RSI
])),
1586 [rdi
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RDI
])),
1587 [rbp
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_RBP
]))
1588 #ifdef CONFIG_X86_64
1589 , [r8
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R8
])),
1590 [r9
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R9
])),
1591 [r10
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R10
])),
1592 [r11
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R11
])),
1593 [r12
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R12
])),
1594 [r13
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R13
])),
1595 [r14
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R14
])),
1596 [r15
]"i"(offsetof(struct vcpu_svm
, vcpu
.regs
[VCPU_REGS_R15
]))
1600 if ((svm
->vmcb
->save
.dr7
& 0xff))
1601 load_db_regs(svm
->host_db_regs
);
1603 vcpu
->cr2
= svm
->vmcb
->save
.cr2
;
1605 write_dr6(svm
->host_dr6
);
1606 write_dr7(svm
->host_dr7
);
1607 kvm_write_cr2(svm
->host_cr2
);
1609 load_fs(fs_selector
);
1610 load_gs(gs_selector
);
1611 load_ldt(ldt_selector
);
1612 load_host_msrs(vcpu
);
1616 local_irq_disable();
1623 static void svm_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long root
)
1625 struct vcpu_svm
*svm
= to_svm(vcpu
);
1627 svm
->vmcb
->save
.cr3
= root
;
1628 force_new_asid(vcpu
);
1630 if (vcpu
->fpu_active
) {
1631 svm
->vmcb
->control
.intercept_exceptions
|= (1 << NM_VECTOR
);
1632 svm
->vmcb
->save
.cr0
|= X86_CR0_TS
;
1633 vcpu
->fpu_active
= 0;
1637 static void svm_inject_page_fault(struct kvm_vcpu
*vcpu
,
1641 struct vcpu_svm
*svm
= to_svm(vcpu
);
1642 uint32_t exit_int_info
= svm
->vmcb
->control
.exit_int_info
;
1644 ++vcpu
->stat
.pf_guest
;
1646 if (is_page_fault(exit_int_info
)) {
1648 svm
->vmcb
->control
.event_inj_err
= 0;
1649 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
1650 SVM_EVTINJ_VALID_ERR
|
1651 SVM_EVTINJ_TYPE_EXEPT
|
1656 svm
->vmcb
->save
.cr2
= addr
;
1657 svm
->vmcb
->control
.event_inj
= SVM_EVTINJ_VALID
|
1658 SVM_EVTINJ_VALID_ERR
|
1659 SVM_EVTINJ_TYPE_EXEPT
|
1661 svm
->vmcb
->control
.event_inj_err
= err_code
;
1665 static int is_disabled(void)
1669 rdmsrl(MSR_VM_CR
, vm_cr
);
1670 if (vm_cr
& (1 << SVM_VM_CR_SVM_DISABLE
))
1677 svm_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
1680 * Patch in the VMMCALL instruction:
1682 hypercall
[0] = 0x0f;
1683 hypercall
[1] = 0x01;
1684 hypercall
[2] = 0xd9;
1687 static void svm_check_processor_compat(void *rtn
)
1692 static struct kvm_x86_ops svm_x86_ops
= {
1693 .cpu_has_kvm_support
= has_svm
,
1694 .disabled_by_bios
= is_disabled
,
1695 .hardware_setup
= svm_hardware_setup
,
1696 .hardware_unsetup
= svm_hardware_unsetup
,
1697 .check_processor_compatibility
= svm_check_processor_compat
,
1698 .hardware_enable
= svm_hardware_enable
,
1699 .hardware_disable
= svm_hardware_disable
,
1701 .vcpu_create
= svm_create_vcpu
,
1702 .vcpu_free
= svm_free_vcpu
,
1703 .vcpu_reset
= svm_vcpu_reset
,
1705 .prepare_guest_switch
= svm_prepare_guest_switch
,
1706 .vcpu_load
= svm_vcpu_load
,
1707 .vcpu_put
= svm_vcpu_put
,
1708 .vcpu_decache
= svm_vcpu_decache
,
1710 .set_guest_debug
= svm_guest_debug
,
1711 .get_msr
= svm_get_msr
,
1712 .set_msr
= svm_set_msr
,
1713 .get_segment_base
= svm_get_segment_base
,
1714 .get_segment
= svm_get_segment
,
1715 .set_segment
= svm_set_segment
,
1716 .get_cs_db_l_bits
= kvm_get_cs_db_l_bits
,
1717 .decache_cr4_guest_bits
= svm_decache_cr4_guest_bits
,
1718 .set_cr0
= svm_set_cr0
,
1719 .set_cr3
= svm_set_cr3
,
1720 .set_cr4
= svm_set_cr4
,
1721 .set_efer
= svm_set_efer
,
1722 .get_idt
= svm_get_idt
,
1723 .set_idt
= svm_set_idt
,
1724 .get_gdt
= svm_get_gdt
,
1725 .set_gdt
= svm_set_gdt
,
1726 .get_dr
= svm_get_dr
,
1727 .set_dr
= svm_set_dr
,
1728 .cache_regs
= svm_cache_regs
,
1729 .decache_regs
= svm_decache_regs
,
1730 .get_rflags
= svm_get_rflags
,
1731 .set_rflags
= svm_set_rflags
,
1733 .tlb_flush
= svm_flush_tlb
,
1734 .inject_page_fault
= svm_inject_page_fault
,
1736 .inject_gp
= svm_inject_gp
,
1738 .run
= svm_vcpu_run
,
1739 .handle_exit
= handle_exit
,
1740 .skip_emulated_instruction
= skip_emulated_instruction
,
1741 .patch_hypercall
= svm_patch_hypercall
,
1742 .get_irq
= svm_get_irq
,
1743 .set_irq
= svm_set_irq
,
1744 .inject_pending_irq
= svm_intr_assist
,
1745 .inject_pending_vectors
= do_interrupt_requests
,
1748 static int __init
svm_init(void)
1750 return kvm_init_x86(&svm_x86_ops
, sizeof(struct vcpu_svm
),
1754 static void __exit
svm_exit(void)
1759 module_init(svm_init
)
1760 module_exit(svm_exit
)