KVM: x86: Rework INIT and SIPI handling
[deliverable/linux.git] / arch / x86 / kvm / vmx.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
9611c187 8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6aa8b732
AK
9 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
13 *
14 * This work is licensed under the terms of the GNU GPL, version 2. See
15 * the COPYING file in the top-level directory.
16 *
17 */
18
85f455f7 19#include "irq.h"
1d737c8a 20#include "mmu.h"
00b27a3e 21#include "cpuid.h"
e495606d 22
edf88417 23#include <linux/kvm_host.h>
6aa8b732 24#include <linux/module.h>
9d8f549d 25#include <linux/kernel.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/highmem.h>
e8edc6e0 28#include <linux/sched.h>
c7addb90 29#include <linux/moduleparam.h>
e9bda3b3 30#include <linux/mod_devicetable.h>
229456fc 31#include <linux/ftrace_event.h>
5a0e3ad6 32#include <linux/slab.h>
cafd6659 33#include <linux/tboot.h>
5fdbf976 34#include "kvm_cache_regs.h"
35920a35 35#include "x86.h"
e495606d 36
6aa8b732 37#include <asm/io.h>
3b3be0d1 38#include <asm/desc.h>
13673a90 39#include <asm/vmx.h>
6210e37b 40#include <asm/virtext.h>
a0861c02 41#include <asm/mce.h>
2acf923e
DC
42#include <asm/i387.h>
43#include <asm/xcr.h>
d7cd9796 44#include <asm/perf_event.h>
8f536b76 45#include <asm/kexec.h>
6aa8b732 46
229456fc
MT
47#include "trace.h"
48
4ecac3fd 49#define __ex(x) __kvm_handle_fault_on_reboot(x)
5e520e62
AK
50#define __ex_clear(x, reg) \
51 ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
4ecac3fd 52
6aa8b732
AK
53MODULE_AUTHOR("Qumranet");
54MODULE_LICENSE("GPL");
55
e9bda3b3
JT
56static const struct x86_cpu_id vmx_cpu_id[] = {
57 X86_FEATURE_MATCH(X86_FEATURE_VMX),
58 {}
59};
60MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
61
476bc001 62static bool __read_mostly enable_vpid = 1;
736caefe 63module_param_named(vpid, enable_vpid, bool, 0444);
2384d2b3 64
476bc001 65static bool __read_mostly flexpriority_enabled = 1;
736caefe 66module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
4c9fc8ef 67
476bc001 68static bool __read_mostly enable_ept = 1;
736caefe 69module_param_named(ept, enable_ept, bool, S_IRUGO);
d56f546d 70
476bc001 71static bool __read_mostly enable_unrestricted_guest = 1;
3a624e29
NK
72module_param_named(unrestricted_guest,
73 enable_unrestricted_guest, bool, S_IRUGO);
74
83c3a331
XH
75static bool __read_mostly enable_ept_ad_bits = 1;
76module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
77
a27685c3 78static bool __read_mostly emulate_invalid_guest_state = true;
c1f8bc04 79module_param(emulate_invalid_guest_state, bool, S_IRUGO);
04fa4d32 80
476bc001 81static bool __read_mostly vmm_exclusive = 1;
b923e62e
DX
82module_param(vmm_exclusive, bool, S_IRUGO);
83
476bc001 84static bool __read_mostly fasteoi = 1;
58fbbf26
KT
85module_param(fasteoi, bool, S_IRUGO);
86
257090f7 87static bool __read_mostly enable_apicv_reg_vid;
83d4c286 88
801d3424
NHE
89/*
90 * If nested=1, nested virtualization is supported, i.e., guests may use
91 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
92 * use VMX instructions.
93 */
476bc001 94static bool __read_mostly nested = 0;
801d3424
NHE
95module_param(nested, bool, S_IRUGO);
96
5037878e
GN
97#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
98#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
cdc0e244
AK
99#define KVM_VM_CR0_ALWAYS_ON \
100 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
4c38609a
AK
101#define KVM_CR4_GUEST_OWNED_BITS \
102 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
103 | X86_CR4_OSXMMEXCPT)
104
cdc0e244
AK
105#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
106#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
107
78ac8b47
AK
108#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
109
4b8d54f9
ZE
110/*
111 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
112 * ple_gap: upper bound on the amount of time between two successive
113 * executions of PAUSE in a loop. Also indicate if ple enabled.
00c25bce 114 * According to test, this time is usually smaller than 128 cycles.
4b8d54f9
ZE
115 * ple_window: upper bound on the amount of time a guest is allowed to execute
116 * in a PAUSE loop. Tests indicate that most spinlocks are held for
117 * less than 2^12 cycles
118 * Time is measured based on a counter that runs at the same rate as the TSC,
119 * refer SDM volume 3b section 21.6.13 & 22.1.3.
120 */
00c25bce 121#define KVM_VMX_DEFAULT_PLE_GAP 128
4b8d54f9
ZE
122#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
123static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
124module_param(ple_gap, int, S_IRUGO);
125
126static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
127module_param(ple_window, int, S_IRUGO);
128
83287ea4
AK
129extern const ulong vmx_return;
130
8bf00a52 131#define NR_AUTOLOAD_MSRS 8
ff2f6fe9 132#define VMCS02_POOL_SIZE 1
61d2ef2c 133
a2fa3e9f
GH
134struct vmcs {
135 u32 revision_id;
136 u32 abort;
137 char data[0];
138};
139
d462b819
NHE
140/*
141 * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
142 * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
143 * loaded on this CPU (so we can clear them if the CPU goes down).
144 */
145struct loaded_vmcs {
146 struct vmcs *vmcs;
147 int cpu;
148 int launched;
149 struct list_head loaded_vmcss_on_cpu_link;
150};
151
26bb0981
AK
152struct shared_msr_entry {
153 unsigned index;
154 u64 data;
d5696725 155 u64 mask;
26bb0981
AK
156};
157
a9d30f33
NHE
158/*
159 * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
160 * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
161 * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
162 * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
163 * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
164 * More than one of these structures may exist, if L1 runs multiple L2 guests.
165 * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
166 * underlying hardware which will be used to run L2.
167 * This structure is packed to ensure that its layout is identical across
168 * machines (necessary for live migration).
169 * If there are changes in this struct, VMCS12_REVISION must be changed.
170 */
22bd0358 171typedef u64 natural_width;
a9d30f33
NHE
172struct __packed vmcs12 {
173 /* According to the Intel spec, a VMCS region must start with the
174 * following two fields. Then follow implementation-specific data.
175 */
176 u32 revision_id;
177 u32 abort;
22bd0358 178
27d6c865
NHE
179 u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
180 u32 padding[7]; /* room for future expansion */
181
22bd0358
NHE
182 u64 io_bitmap_a;
183 u64 io_bitmap_b;
184 u64 msr_bitmap;
185 u64 vm_exit_msr_store_addr;
186 u64 vm_exit_msr_load_addr;
187 u64 vm_entry_msr_load_addr;
188 u64 tsc_offset;
189 u64 virtual_apic_page_addr;
190 u64 apic_access_addr;
191 u64 ept_pointer;
192 u64 guest_physical_address;
193 u64 vmcs_link_pointer;
194 u64 guest_ia32_debugctl;
195 u64 guest_ia32_pat;
196 u64 guest_ia32_efer;
197 u64 guest_ia32_perf_global_ctrl;
198 u64 guest_pdptr0;
199 u64 guest_pdptr1;
200 u64 guest_pdptr2;
201 u64 guest_pdptr3;
202 u64 host_ia32_pat;
203 u64 host_ia32_efer;
204 u64 host_ia32_perf_global_ctrl;
205 u64 padding64[8]; /* room for future expansion */
206 /*
207 * To allow migration of L1 (complete with its L2 guests) between
208 * machines of different natural widths (32 or 64 bit), we cannot have
209 * unsigned long fields with no explict size. We use u64 (aliased
210 * natural_width) instead. Luckily, x86 is little-endian.
211 */
212 natural_width cr0_guest_host_mask;
213 natural_width cr4_guest_host_mask;
214 natural_width cr0_read_shadow;
215 natural_width cr4_read_shadow;
216 natural_width cr3_target_value0;
217 natural_width cr3_target_value1;
218 natural_width cr3_target_value2;
219 natural_width cr3_target_value3;
220 natural_width exit_qualification;
221 natural_width guest_linear_address;
222 natural_width guest_cr0;
223 natural_width guest_cr3;
224 natural_width guest_cr4;
225 natural_width guest_es_base;
226 natural_width guest_cs_base;
227 natural_width guest_ss_base;
228 natural_width guest_ds_base;
229 natural_width guest_fs_base;
230 natural_width guest_gs_base;
231 natural_width guest_ldtr_base;
232 natural_width guest_tr_base;
233 natural_width guest_gdtr_base;
234 natural_width guest_idtr_base;
235 natural_width guest_dr7;
236 natural_width guest_rsp;
237 natural_width guest_rip;
238 natural_width guest_rflags;
239 natural_width guest_pending_dbg_exceptions;
240 natural_width guest_sysenter_esp;
241 natural_width guest_sysenter_eip;
242 natural_width host_cr0;
243 natural_width host_cr3;
244 natural_width host_cr4;
245 natural_width host_fs_base;
246 natural_width host_gs_base;
247 natural_width host_tr_base;
248 natural_width host_gdtr_base;
249 natural_width host_idtr_base;
250 natural_width host_ia32_sysenter_esp;
251 natural_width host_ia32_sysenter_eip;
252 natural_width host_rsp;
253 natural_width host_rip;
254 natural_width paddingl[8]; /* room for future expansion */
255 u32 pin_based_vm_exec_control;
256 u32 cpu_based_vm_exec_control;
257 u32 exception_bitmap;
258 u32 page_fault_error_code_mask;
259 u32 page_fault_error_code_match;
260 u32 cr3_target_count;
261 u32 vm_exit_controls;
262 u32 vm_exit_msr_store_count;
263 u32 vm_exit_msr_load_count;
264 u32 vm_entry_controls;
265 u32 vm_entry_msr_load_count;
266 u32 vm_entry_intr_info_field;
267 u32 vm_entry_exception_error_code;
268 u32 vm_entry_instruction_len;
269 u32 tpr_threshold;
270 u32 secondary_vm_exec_control;
271 u32 vm_instruction_error;
272 u32 vm_exit_reason;
273 u32 vm_exit_intr_info;
274 u32 vm_exit_intr_error_code;
275 u32 idt_vectoring_info_field;
276 u32 idt_vectoring_error_code;
277 u32 vm_exit_instruction_len;
278 u32 vmx_instruction_info;
279 u32 guest_es_limit;
280 u32 guest_cs_limit;
281 u32 guest_ss_limit;
282 u32 guest_ds_limit;
283 u32 guest_fs_limit;
284 u32 guest_gs_limit;
285 u32 guest_ldtr_limit;
286 u32 guest_tr_limit;
287 u32 guest_gdtr_limit;
288 u32 guest_idtr_limit;
289 u32 guest_es_ar_bytes;
290 u32 guest_cs_ar_bytes;
291 u32 guest_ss_ar_bytes;
292 u32 guest_ds_ar_bytes;
293 u32 guest_fs_ar_bytes;
294 u32 guest_gs_ar_bytes;
295 u32 guest_ldtr_ar_bytes;
296 u32 guest_tr_ar_bytes;
297 u32 guest_interruptibility_info;
298 u32 guest_activity_state;
299 u32 guest_sysenter_cs;
300 u32 host_ia32_sysenter_cs;
301 u32 padding32[8]; /* room for future expansion */
302 u16 virtual_processor_id;
303 u16 guest_es_selector;
304 u16 guest_cs_selector;
305 u16 guest_ss_selector;
306 u16 guest_ds_selector;
307 u16 guest_fs_selector;
308 u16 guest_gs_selector;
309 u16 guest_ldtr_selector;
310 u16 guest_tr_selector;
311 u16 host_es_selector;
312 u16 host_cs_selector;
313 u16 host_ss_selector;
314 u16 host_ds_selector;
315 u16 host_fs_selector;
316 u16 host_gs_selector;
317 u16 host_tr_selector;
a9d30f33
NHE
318};
319
320/*
321 * VMCS12_REVISION is an arbitrary id that should be changed if the content or
322 * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
323 * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
324 */
325#define VMCS12_REVISION 0x11e57ed0
326
327/*
328 * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
329 * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
330 * current implementation, 4K are reserved to avoid future complications.
331 */
332#define VMCS12_SIZE 0x1000
333
ff2f6fe9
NHE
334/* Used to remember the last vmcs02 used for some recently used vmcs12s */
335struct vmcs02_list {
336 struct list_head list;
337 gpa_t vmptr;
338 struct loaded_vmcs vmcs02;
339};
340
ec378aee
NHE
341/*
342 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
343 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
344 */
345struct nested_vmx {
346 /* Has the level1 guest done vmxon? */
347 bool vmxon;
a9d30f33
NHE
348
349 /* The guest-physical address of the current VMCS L1 keeps for L2 */
350 gpa_t current_vmptr;
351 /* The host-usable pointer to the above */
352 struct page *current_vmcs12_page;
353 struct vmcs12 *current_vmcs12;
ff2f6fe9
NHE
354
355 /* vmcs02_list cache of VMCSs recently used to run L2 guests */
356 struct list_head vmcs02_pool;
357 int vmcs02_num;
fe3ef05c 358 u64 vmcs01_tsc_offset;
644d711a
NHE
359 /* L2 must run next, and mustn't decide to exit to L1. */
360 bool nested_run_pending;
fe3ef05c
NHE
361 /*
362 * Guest pages referred to in vmcs02 with host-physical pointers, so
363 * we must keep them pinned while L2 runs.
364 */
365 struct page *apic_access_page;
ec378aee
NHE
366};
367
a2fa3e9f 368struct vcpu_vmx {
fb3f0f51 369 struct kvm_vcpu vcpu;
313dbd49 370 unsigned long host_rsp;
29bd8a78 371 u8 fail;
69c73028 372 u8 cpl;
9d58b931 373 bool nmi_known_unmasked;
51aa01d1 374 u32 exit_intr_info;
1155f76a 375 u32 idt_vectoring_info;
6de12732 376 ulong rflags;
26bb0981 377 struct shared_msr_entry *guest_msrs;
a2fa3e9f
GH
378 int nmsrs;
379 int save_nmsrs;
a2fa3e9f 380#ifdef CONFIG_X86_64
44ea2b17
AK
381 u64 msr_host_kernel_gs_base;
382 u64 msr_guest_kernel_gs_base;
a2fa3e9f 383#endif
d462b819
NHE
384 /*
385 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
386 * non-nested (L1) guest, it always points to vmcs01. For a nested
387 * guest (L2), it points to a different VMCS.
388 */
389 struct loaded_vmcs vmcs01;
390 struct loaded_vmcs *loaded_vmcs;
391 bool __launched; /* temporary, used in vmx_vcpu_run */
61d2ef2c
AK
392 struct msr_autoload {
393 unsigned nr;
394 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
395 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
396 } msr_autoload;
a2fa3e9f
GH
397 struct {
398 int loaded;
399 u16 fs_sel, gs_sel, ldt_sel;
b2da15ac
AK
400#ifdef CONFIG_X86_64
401 u16 ds_sel, es_sel;
402#endif
152d3f2f
LV
403 int gs_ldt_reload_needed;
404 int fs_reload_needed;
d77c26fc 405 } host_state;
9c8cba37 406 struct {
7ffd92c5 407 int vm86_active;
78ac8b47 408 ulong save_rflags;
f5f7b2fe
AK
409 struct kvm_segment segs[8];
410 } rmode;
411 struct {
412 u32 bitmask; /* 4 bits per segment (1 bit per field) */
7ffd92c5
AK
413 struct kvm_save_segment {
414 u16 selector;
415 unsigned long base;
416 u32 limit;
417 u32 ar;
f5f7b2fe 418 } seg[8];
2fb92db1 419 } segment_cache;
2384d2b3 420 int vpid;
04fa4d32 421 bool emulation_required;
3b86cd99
JK
422
423 /* Support for vnmi-less CPUs */
424 int soft_vnmi_blocked;
425 ktime_t entry_time;
426 s64 vnmi_blocked_time;
a0861c02 427 u32 exit_reason;
4e47c7a6
SY
428
429 bool rdtscp_enabled;
ec378aee
NHE
430
431 /* Support for a guest hypervisor (nested VMX) */
432 struct nested_vmx nested;
a2fa3e9f
GH
433};
434
2fb92db1
AK
435enum segment_cache_field {
436 SEG_FIELD_SEL = 0,
437 SEG_FIELD_BASE = 1,
438 SEG_FIELD_LIMIT = 2,
439 SEG_FIELD_AR = 3,
440
441 SEG_FIELD_NR = 4
442};
443
a2fa3e9f
GH
444static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
445{
fb3f0f51 446 return container_of(vcpu, struct vcpu_vmx, vcpu);
a2fa3e9f
GH
447}
448
22bd0358
NHE
449#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
450#define FIELD(number, name) [number] = VMCS12_OFFSET(name)
451#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
452 [number##_HIGH] = VMCS12_OFFSET(name)+4
453
772e0318 454static const unsigned short vmcs_field_to_offset_table[] = {
22bd0358
NHE
455 FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
456 FIELD(GUEST_ES_SELECTOR, guest_es_selector),
457 FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
458 FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
459 FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
460 FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
461 FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
462 FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
463 FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
464 FIELD(HOST_ES_SELECTOR, host_es_selector),
465 FIELD(HOST_CS_SELECTOR, host_cs_selector),
466 FIELD(HOST_SS_SELECTOR, host_ss_selector),
467 FIELD(HOST_DS_SELECTOR, host_ds_selector),
468 FIELD(HOST_FS_SELECTOR, host_fs_selector),
469 FIELD(HOST_GS_SELECTOR, host_gs_selector),
470 FIELD(HOST_TR_SELECTOR, host_tr_selector),
471 FIELD64(IO_BITMAP_A, io_bitmap_a),
472 FIELD64(IO_BITMAP_B, io_bitmap_b),
473 FIELD64(MSR_BITMAP, msr_bitmap),
474 FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
475 FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
476 FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
477 FIELD64(TSC_OFFSET, tsc_offset),
478 FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
479 FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
480 FIELD64(EPT_POINTER, ept_pointer),
481 FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
482 FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
483 FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
484 FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
485 FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
486 FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
487 FIELD64(GUEST_PDPTR0, guest_pdptr0),
488 FIELD64(GUEST_PDPTR1, guest_pdptr1),
489 FIELD64(GUEST_PDPTR2, guest_pdptr2),
490 FIELD64(GUEST_PDPTR3, guest_pdptr3),
491 FIELD64(HOST_IA32_PAT, host_ia32_pat),
492 FIELD64(HOST_IA32_EFER, host_ia32_efer),
493 FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
494 FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
495 FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
496 FIELD(EXCEPTION_BITMAP, exception_bitmap),
497 FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
498 FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
499 FIELD(CR3_TARGET_COUNT, cr3_target_count),
500 FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
501 FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
502 FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
503 FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
504 FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
505 FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
506 FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
507 FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
508 FIELD(TPR_THRESHOLD, tpr_threshold),
509 FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
510 FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
511 FIELD(VM_EXIT_REASON, vm_exit_reason),
512 FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
513 FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
514 FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
515 FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
516 FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
517 FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
518 FIELD(GUEST_ES_LIMIT, guest_es_limit),
519 FIELD(GUEST_CS_LIMIT, guest_cs_limit),
520 FIELD(GUEST_SS_LIMIT, guest_ss_limit),
521 FIELD(GUEST_DS_LIMIT, guest_ds_limit),
522 FIELD(GUEST_FS_LIMIT, guest_fs_limit),
523 FIELD(GUEST_GS_LIMIT, guest_gs_limit),
524 FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
525 FIELD(GUEST_TR_LIMIT, guest_tr_limit),
526 FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
527 FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
528 FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
529 FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
530 FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
531 FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
532 FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
533 FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
534 FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
535 FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
536 FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
537 FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
538 FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
539 FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
540 FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
541 FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
542 FIELD(CR0_READ_SHADOW, cr0_read_shadow),
543 FIELD(CR4_READ_SHADOW, cr4_read_shadow),
544 FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
545 FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
546 FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
547 FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
548 FIELD(EXIT_QUALIFICATION, exit_qualification),
549 FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
550 FIELD(GUEST_CR0, guest_cr0),
551 FIELD(GUEST_CR3, guest_cr3),
552 FIELD(GUEST_CR4, guest_cr4),
553 FIELD(GUEST_ES_BASE, guest_es_base),
554 FIELD(GUEST_CS_BASE, guest_cs_base),
555 FIELD(GUEST_SS_BASE, guest_ss_base),
556 FIELD(GUEST_DS_BASE, guest_ds_base),
557 FIELD(GUEST_FS_BASE, guest_fs_base),
558 FIELD(GUEST_GS_BASE, guest_gs_base),
559 FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
560 FIELD(GUEST_TR_BASE, guest_tr_base),
561 FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
562 FIELD(GUEST_IDTR_BASE, guest_idtr_base),
563 FIELD(GUEST_DR7, guest_dr7),
564 FIELD(GUEST_RSP, guest_rsp),
565 FIELD(GUEST_RIP, guest_rip),
566 FIELD(GUEST_RFLAGS, guest_rflags),
567 FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
568 FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
569 FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
570 FIELD(HOST_CR0, host_cr0),
571 FIELD(HOST_CR3, host_cr3),
572 FIELD(HOST_CR4, host_cr4),
573 FIELD(HOST_FS_BASE, host_fs_base),
574 FIELD(HOST_GS_BASE, host_gs_base),
575 FIELD(HOST_TR_BASE, host_tr_base),
576 FIELD(HOST_GDTR_BASE, host_gdtr_base),
577 FIELD(HOST_IDTR_BASE, host_idtr_base),
578 FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
579 FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
580 FIELD(HOST_RSP, host_rsp),
581 FIELD(HOST_RIP, host_rip),
582};
583static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
584
585static inline short vmcs_field_to_offset(unsigned long field)
586{
587 if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
588 return -1;
589 return vmcs_field_to_offset_table[field];
590}
591
a9d30f33
NHE
592static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
593{
594 return to_vmx(vcpu)->nested.current_vmcs12;
595}
596
597static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
598{
599 struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
32cad84f 600 if (is_error_page(page))
a9d30f33 601 return NULL;
32cad84f 602
a9d30f33
NHE
603 return page;
604}
605
606static void nested_release_page(struct page *page)
607{
608 kvm_release_page_dirty(page);
609}
610
611static void nested_release_page_clean(struct page *page)
612{
613 kvm_release_page_clean(page);
614}
615
4e1096d2 616static u64 construct_eptp(unsigned long root_hpa);
4610c9cc
DX
617static void kvm_cpu_vmxon(u64 addr);
618static void kvm_cpu_vmxoff(void);
aff48baa 619static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
776e58ea 620static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
b246dd5d
OW
621static void vmx_set_segment(struct kvm_vcpu *vcpu,
622 struct kvm_segment *var, int seg);
623static void vmx_get_segment(struct kvm_vcpu *vcpu,
624 struct kvm_segment *var, int seg);
d99e4152
GN
625static bool guest_state_valid(struct kvm_vcpu *vcpu);
626static u32 vmx_segment_access_rights(struct kvm_segment *var);
75880a01 627
6aa8b732
AK
628static DEFINE_PER_CPU(struct vmcs *, vmxarea);
629static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
d462b819
NHE
630/*
631 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
632 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
633 */
634static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
3444d7da 635static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
6aa8b732 636
3e7c73e9
AK
637static unsigned long *vmx_io_bitmap_a;
638static unsigned long *vmx_io_bitmap_b;
5897297b
AK
639static unsigned long *vmx_msr_bitmap_legacy;
640static unsigned long *vmx_msr_bitmap_longmode;
8d14695f
YZ
641static unsigned long *vmx_msr_bitmap_legacy_x2apic;
642static unsigned long *vmx_msr_bitmap_longmode_x2apic;
fdef3ad1 643
110312c8 644static bool cpu_has_load_ia32_efer;
8bf00a52 645static bool cpu_has_load_perf_global_ctrl;
110312c8 646
2384d2b3
SY
647static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
648static DEFINE_SPINLOCK(vmx_vpid_lock);
649
1c3d14fe 650static struct vmcs_config {
6aa8b732
AK
651 int size;
652 int order;
653 u32 revision_id;
1c3d14fe
YS
654 u32 pin_based_exec_ctrl;
655 u32 cpu_based_exec_ctrl;
f78e0e2e 656 u32 cpu_based_2nd_exec_ctrl;
1c3d14fe
YS
657 u32 vmexit_ctrl;
658 u32 vmentry_ctrl;
659} vmcs_config;
6aa8b732 660
efff9e53 661static struct vmx_capability {
d56f546d
SY
662 u32 ept;
663 u32 vpid;
664} vmx_capability;
665
6aa8b732
AK
666#define VMX_SEGMENT_FIELD(seg) \
667 [VCPU_SREG_##seg] = { \
668 .selector = GUEST_##seg##_SELECTOR, \
669 .base = GUEST_##seg##_BASE, \
670 .limit = GUEST_##seg##_LIMIT, \
671 .ar_bytes = GUEST_##seg##_AR_BYTES, \
672 }
673
772e0318 674static const struct kvm_vmx_segment_field {
6aa8b732
AK
675 unsigned selector;
676 unsigned base;
677 unsigned limit;
678 unsigned ar_bytes;
679} kvm_vmx_segment_fields[] = {
680 VMX_SEGMENT_FIELD(CS),
681 VMX_SEGMENT_FIELD(DS),
682 VMX_SEGMENT_FIELD(ES),
683 VMX_SEGMENT_FIELD(FS),
684 VMX_SEGMENT_FIELD(GS),
685 VMX_SEGMENT_FIELD(SS),
686 VMX_SEGMENT_FIELD(TR),
687 VMX_SEGMENT_FIELD(LDTR),
688};
689
26bb0981
AK
690static u64 host_efer;
691
6de4f3ad
AK
692static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
693
4d56c8a7 694/*
8c06585d 695 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
4d56c8a7
AK
696 * away by decrementing the array size.
697 */
6aa8b732 698static const u32 vmx_msr_index[] = {
05b3e0c2 699#ifdef CONFIG_X86_64
44ea2b17 700 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
6aa8b732 701#endif
8c06585d 702 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
6aa8b732 703};
9d8f549d 704#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
6aa8b732 705
31299944 706static inline bool is_page_fault(u32 intr_info)
6aa8b732
AK
707{
708 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
709 INTR_INFO_VALID_MASK)) ==
8ab2d2e2 710 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
6aa8b732
AK
711}
712
31299944 713static inline bool is_no_device(u32 intr_info)
2ab455cc
AL
714{
715 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
716 INTR_INFO_VALID_MASK)) ==
8ab2d2e2 717 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
2ab455cc
AL
718}
719
31299944 720static inline bool is_invalid_opcode(u32 intr_info)
7aa81cc0
AL
721{
722 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
723 INTR_INFO_VALID_MASK)) ==
8ab2d2e2 724 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
7aa81cc0
AL
725}
726
31299944 727static inline bool is_external_interrupt(u32 intr_info)
6aa8b732
AK
728{
729 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
730 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
731}
732
31299944 733static inline bool is_machine_check(u32 intr_info)
a0861c02
AK
734{
735 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
736 INTR_INFO_VALID_MASK)) ==
737 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
738}
739
31299944 740static inline bool cpu_has_vmx_msr_bitmap(void)
25c5f225 741{
04547156 742 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
25c5f225
SY
743}
744
31299944 745static inline bool cpu_has_vmx_tpr_shadow(void)
6e5d865c 746{
04547156 747 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
6e5d865c
YS
748}
749
31299944 750static inline bool vm_need_tpr_shadow(struct kvm *kvm)
6e5d865c 751{
04547156 752 return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
6e5d865c
YS
753}
754
31299944 755static inline bool cpu_has_secondary_exec_ctrls(void)
f78e0e2e 756{
04547156
SY
757 return vmcs_config.cpu_based_exec_ctrl &
758 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
f78e0e2e
SY
759}
760
774ead3a 761static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
f78e0e2e 762{
04547156
SY
763 return vmcs_config.cpu_based_2nd_exec_ctrl &
764 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
765}
766
8d14695f
YZ
767static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
768{
769 return vmcs_config.cpu_based_2nd_exec_ctrl &
770 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
771}
772
83d4c286
YZ
773static inline bool cpu_has_vmx_apic_register_virt(void)
774{
775 return vmcs_config.cpu_based_2nd_exec_ctrl &
776 SECONDARY_EXEC_APIC_REGISTER_VIRT;
777}
778
c7c9c56c
YZ
779static inline bool cpu_has_vmx_virtual_intr_delivery(void)
780{
781 return vmcs_config.cpu_based_2nd_exec_ctrl &
782 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
783}
784
04547156
SY
785static inline bool cpu_has_vmx_flexpriority(void)
786{
787 return cpu_has_vmx_tpr_shadow() &&
788 cpu_has_vmx_virtualize_apic_accesses();
f78e0e2e
SY
789}
790
e799794e
MT
791static inline bool cpu_has_vmx_ept_execute_only(void)
792{
31299944 793 return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
e799794e
MT
794}
795
796static inline bool cpu_has_vmx_eptp_uncacheable(void)
797{
31299944 798 return vmx_capability.ept & VMX_EPTP_UC_BIT;
e799794e
MT
799}
800
801static inline bool cpu_has_vmx_eptp_writeback(void)
802{
31299944 803 return vmx_capability.ept & VMX_EPTP_WB_BIT;
e799794e
MT
804}
805
806static inline bool cpu_has_vmx_ept_2m_page(void)
807{
31299944 808 return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
e799794e
MT
809}
810
878403b7
SY
811static inline bool cpu_has_vmx_ept_1g_page(void)
812{
31299944 813 return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
878403b7
SY
814}
815
4bc9b982
SY
816static inline bool cpu_has_vmx_ept_4levels(void)
817{
818 return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
819}
820
83c3a331
XH
821static inline bool cpu_has_vmx_ept_ad_bits(void)
822{
823 return vmx_capability.ept & VMX_EPT_AD_BIT;
824}
825
31299944 826static inline bool cpu_has_vmx_invept_context(void)
d56f546d 827{
31299944 828 return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
d56f546d
SY
829}
830
31299944 831static inline bool cpu_has_vmx_invept_global(void)
d56f546d 832{
31299944 833 return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
d56f546d
SY
834}
835
518c8aee
GJ
836static inline bool cpu_has_vmx_invvpid_single(void)
837{
838 return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
839}
840
b9d762fa
GJ
841static inline bool cpu_has_vmx_invvpid_global(void)
842{
843 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
844}
845
31299944 846static inline bool cpu_has_vmx_ept(void)
d56f546d 847{
04547156
SY
848 return vmcs_config.cpu_based_2nd_exec_ctrl &
849 SECONDARY_EXEC_ENABLE_EPT;
d56f546d
SY
850}
851
31299944 852static inline bool cpu_has_vmx_unrestricted_guest(void)
3a624e29
NK
853{
854 return vmcs_config.cpu_based_2nd_exec_ctrl &
855 SECONDARY_EXEC_UNRESTRICTED_GUEST;
856}
857
31299944 858static inline bool cpu_has_vmx_ple(void)
4b8d54f9
ZE
859{
860 return vmcs_config.cpu_based_2nd_exec_ctrl &
861 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
862}
863
31299944 864static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
f78e0e2e 865{
6d3e435e 866 return flexpriority_enabled && irqchip_in_kernel(kvm);
f78e0e2e
SY
867}
868
31299944 869static inline bool cpu_has_vmx_vpid(void)
2384d2b3 870{
04547156
SY
871 return vmcs_config.cpu_based_2nd_exec_ctrl &
872 SECONDARY_EXEC_ENABLE_VPID;
2384d2b3
SY
873}
874
31299944 875static inline bool cpu_has_vmx_rdtscp(void)
4e47c7a6
SY
876{
877 return vmcs_config.cpu_based_2nd_exec_ctrl &
878 SECONDARY_EXEC_RDTSCP;
879}
880
ad756a16
MJ
881static inline bool cpu_has_vmx_invpcid(void)
882{
883 return vmcs_config.cpu_based_2nd_exec_ctrl &
884 SECONDARY_EXEC_ENABLE_INVPCID;
885}
886
31299944 887static inline bool cpu_has_virtual_nmis(void)
f08864b4
SY
888{
889 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
890}
891
f5f48ee1
SY
892static inline bool cpu_has_vmx_wbinvd_exit(void)
893{
894 return vmcs_config.cpu_based_2nd_exec_ctrl &
895 SECONDARY_EXEC_WBINVD_EXITING;
896}
897
04547156
SY
898static inline bool report_flexpriority(void)
899{
900 return flexpriority_enabled;
901}
902
fe3ef05c
NHE
903static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
904{
905 return vmcs12->cpu_based_vm_exec_control & bit;
906}
907
908static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
909{
910 return (vmcs12->cpu_based_vm_exec_control &
911 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
912 (vmcs12->secondary_vm_exec_control & bit);
913}
914
644d711a
NHE
915static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12,
916 struct kvm_vcpu *vcpu)
917{
918 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
919}
920
921static inline bool is_exception(u32 intr_info)
922{
923 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
924 == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
925}
926
927static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
7c177938
NHE
928static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
929 struct vmcs12 *vmcs12,
930 u32 reason, unsigned long qualification);
931
8b9cf98c 932static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
7725f0ba
AK
933{
934 int i;
935
a2fa3e9f 936 for (i = 0; i < vmx->nmsrs; ++i)
26bb0981 937 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
a75beee6
ED
938 return i;
939 return -1;
940}
941
2384d2b3
SY
942static inline void __invvpid(int ext, u16 vpid, gva_t gva)
943{
944 struct {
945 u64 vpid : 16;
946 u64 rsvd : 48;
947 u64 gva;
948 } operand = { vpid, 0, gva };
949
4ecac3fd 950 asm volatile (__ex(ASM_VMX_INVVPID)
2384d2b3
SY
951 /* CF==1 or ZF==1 --> rc = -1 */
952 "; ja 1f ; ud2 ; 1:"
953 : : "a"(&operand), "c"(ext) : "cc", "memory");
954}
955
1439442c
SY
956static inline void __invept(int ext, u64 eptp, gpa_t gpa)
957{
958 struct {
959 u64 eptp, gpa;
960 } operand = {eptp, gpa};
961
4ecac3fd 962 asm volatile (__ex(ASM_VMX_INVEPT)
1439442c
SY
963 /* CF==1 or ZF==1 --> rc = -1 */
964 "; ja 1f ; ud2 ; 1:\n"
965 : : "a" (&operand), "c" (ext) : "cc", "memory");
966}
967
26bb0981 968static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
a75beee6
ED
969{
970 int i;
971
8b9cf98c 972 i = __find_msr_index(vmx, msr);
a75beee6 973 if (i >= 0)
a2fa3e9f 974 return &vmx->guest_msrs[i];
8b6d44c7 975 return NULL;
7725f0ba
AK
976}
977
6aa8b732
AK
978static void vmcs_clear(struct vmcs *vmcs)
979{
980 u64 phys_addr = __pa(vmcs);
981 u8 error;
982
4ecac3fd 983 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
16d8f72f 984 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
6aa8b732
AK
985 : "cc", "memory");
986 if (error)
987 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
988 vmcs, phys_addr);
989}
990
d462b819
NHE
991static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
992{
993 vmcs_clear(loaded_vmcs->vmcs);
994 loaded_vmcs->cpu = -1;
995 loaded_vmcs->launched = 0;
996}
997
7725b894
DX
998static void vmcs_load(struct vmcs *vmcs)
999{
1000 u64 phys_addr = __pa(vmcs);
1001 u8 error;
1002
1003 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
16d8f72f 1004 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
7725b894
DX
1005 : "cc", "memory");
1006 if (error)
2844d849 1007 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
7725b894
DX
1008 vmcs, phys_addr);
1009}
1010
8f536b76
ZY
1011#ifdef CONFIG_KEXEC
1012/*
1013 * This bitmap is used to indicate whether the vmclear
1014 * operation is enabled on all cpus. All disabled by
1015 * default.
1016 */
1017static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1018
1019static inline void crash_enable_local_vmclear(int cpu)
1020{
1021 cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1022}
1023
1024static inline void crash_disable_local_vmclear(int cpu)
1025{
1026 cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1027}
1028
1029static inline int crash_local_vmclear_enabled(int cpu)
1030{
1031 return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1032}
1033
1034static void crash_vmclear_local_loaded_vmcss(void)
1035{
1036 int cpu = raw_smp_processor_id();
1037 struct loaded_vmcs *v;
1038
1039 if (!crash_local_vmclear_enabled(cpu))
1040 return;
1041
1042 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1043 loaded_vmcss_on_cpu_link)
1044 vmcs_clear(v->vmcs);
1045}
1046#else
1047static inline void crash_enable_local_vmclear(int cpu) { }
1048static inline void crash_disable_local_vmclear(int cpu) { }
1049#endif /* CONFIG_KEXEC */
1050
d462b819 1051static void __loaded_vmcs_clear(void *arg)
6aa8b732 1052{
d462b819 1053 struct loaded_vmcs *loaded_vmcs = arg;
d3b2c338 1054 int cpu = raw_smp_processor_id();
6aa8b732 1055
d462b819
NHE
1056 if (loaded_vmcs->cpu != cpu)
1057 return; /* vcpu migration can race with cpu offline */
1058 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
6aa8b732 1059 per_cpu(current_vmcs, cpu) = NULL;
8f536b76 1060 crash_disable_local_vmclear(cpu);
d462b819 1061 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
5a560f8b
XG
1062
1063 /*
1064 * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1065 * is before setting loaded_vmcs->vcpu to -1 which is done in
1066 * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1067 * then adds the vmcs into percpu list before it is deleted.
1068 */
1069 smp_wmb();
1070
d462b819 1071 loaded_vmcs_init(loaded_vmcs);
8f536b76 1072 crash_enable_local_vmclear(cpu);
6aa8b732
AK
1073}
1074
d462b819 1075static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
8d0be2b3 1076{
e6c7d321
XG
1077 int cpu = loaded_vmcs->cpu;
1078
1079 if (cpu != -1)
1080 smp_call_function_single(cpu,
1081 __loaded_vmcs_clear, loaded_vmcs, 1);
8d0be2b3
AK
1082}
1083
1760dd49 1084static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
2384d2b3
SY
1085{
1086 if (vmx->vpid == 0)
1087 return;
1088
518c8aee
GJ
1089 if (cpu_has_vmx_invvpid_single())
1090 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
2384d2b3
SY
1091}
1092
b9d762fa
GJ
1093static inline void vpid_sync_vcpu_global(void)
1094{
1095 if (cpu_has_vmx_invvpid_global())
1096 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1097}
1098
1099static inline void vpid_sync_context(struct vcpu_vmx *vmx)
1100{
1101 if (cpu_has_vmx_invvpid_single())
1760dd49 1102 vpid_sync_vcpu_single(vmx);
b9d762fa
GJ
1103 else
1104 vpid_sync_vcpu_global();
1105}
1106
1439442c
SY
1107static inline void ept_sync_global(void)
1108{
1109 if (cpu_has_vmx_invept_global())
1110 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1111}
1112
1113static inline void ept_sync_context(u64 eptp)
1114{
089d034e 1115 if (enable_ept) {
1439442c
SY
1116 if (cpu_has_vmx_invept_context())
1117 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1118 else
1119 ept_sync_global();
1120 }
1121}
1122
96304217 1123static __always_inline unsigned long vmcs_readl(unsigned long field)
6aa8b732 1124{
5e520e62 1125 unsigned long value;
6aa8b732 1126
5e520e62
AK
1127 asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1128 : "=a"(value) : "d"(field) : "cc");
6aa8b732
AK
1129 return value;
1130}
1131
96304217 1132static __always_inline u16 vmcs_read16(unsigned long field)
6aa8b732
AK
1133{
1134 return vmcs_readl(field);
1135}
1136
96304217 1137static __always_inline u32 vmcs_read32(unsigned long field)
6aa8b732
AK
1138{
1139 return vmcs_readl(field);
1140}
1141
96304217 1142static __always_inline u64 vmcs_read64(unsigned long field)
6aa8b732 1143{
05b3e0c2 1144#ifdef CONFIG_X86_64
6aa8b732
AK
1145 return vmcs_readl(field);
1146#else
1147 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
1148#endif
1149}
1150
e52de1b8
AK
1151static noinline void vmwrite_error(unsigned long field, unsigned long value)
1152{
1153 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1154 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1155 dump_stack();
1156}
1157
6aa8b732
AK
1158static void vmcs_writel(unsigned long field, unsigned long value)
1159{
1160 u8 error;
1161
4ecac3fd 1162 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
d77c26fc 1163 : "=q"(error) : "a"(value), "d"(field) : "cc");
e52de1b8
AK
1164 if (unlikely(error))
1165 vmwrite_error(field, value);
6aa8b732
AK
1166}
1167
1168static void vmcs_write16(unsigned long field, u16 value)
1169{
1170 vmcs_writel(field, value);
1171}
1172
1173static void vmcs_write32(unsigned long field, u32 value)
1174{
1175 vmcs_writel(field, value);
1176}
1177
1178static void vmcs_write64(unsigned long field, u64 value)
1179{
6aa8b732 1180 vmcs_writel(field, value);
7682f2d0 1181#ifndef CONFIG_X86_64
6aa8b732
AK
1182 asm volatile ("");
1183 vmcs_writel(field+1, value >> 32);
1184#endif
1185}
1186
2ab455cc
AL
1187static void vmcs_clear_bits(unsigned long field, u32 mask)
1188{
1189 vmcs_writel(field, vmcs_readl(field) & ~mask);
1190}
1191
1192static void vmcs_set_bits(unsigned long field, u32 mask)
1193{
1194 vmcs_writel(field, vmcs_readl(field) | mask);
1195}
1196
2fb92db1
AK
1197static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1198{
1199 vmx->segment_cache.bitmask = 0;
1200}
1201
1202static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1203 unsigned field)
1204{
1205 bool ret;
1206 u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1207
1208 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1209 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1210 vmx->segment_cache.bitmask = 0;
1211 }
1212 ret = vmx->segment_cache.bitmask & mask;
1213 vmx->segment_cache.bitmask |= mask;
1214 return ret;
1215}
1216
1217static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1218{
1219 u16 *p = &vmx->segment_cache.seg[seg].selector;
1220
1221 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1222 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1223 return *p;
1224}
1225
1226static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1227{
1228 ulong *p = &vmx->segment_cache.seg[seg].base;
1229
1230 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1231 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1232 return *p;
1233}
1234
1235static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1236{
1237 u32 *p = &vmx->segment_cache.seg[seg].limit;
1238
1239 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1240 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1241 return *p;
1242}
1243
1244static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1245{
1246 u32 *p = &vmx->segment_cache.seg[seg].ar;
1247
1248 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1249 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1250 return *p;
1251}
1252
abd3f2d6
AK
1253static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1254{
1255 u32 eb;
1256
fd7373cc
JK
1257 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1258 (1u << NM_VECTOR) | (1u << DB_VECTOR);
1259 if ((vcpu->guest_debug &
1260 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1261 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1262 eb |= 1u << BP_VECTOR;
7ffd92c5 1263 if (to_vmx(vcpu)->rmode.vm86_active)
abd3f2d6 1264 eb = ~0;
089d034e 1265 if (enable_ept)
1439442c 1266 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
02daab21
AK
1267 if (vcpu->fpu_active)
1268 eb &= ~(1u << NM_VECTOR);
36cf24e0
NHE
1269
1270 /* When we are running a nested L2 guest and L1 specified for it a
1271 * certain exception bitmap, we must trap the same exceptions and pass
1272 * them to L1. When running L2, we will only handle the exceptions
1273 * specified above if L1 did not want them.
1274 */
1275 if (is_guest_mode(vcpu))
1276 eb |= get_vmcs12(vcpu)->exception_bitmap;
1277
abd3f2d6
AK
1278 vmcs_write32(EXCEPTION_BITMAP, eb);
1279}
1280
8bf00a52
GN
1281static void clear_atomic_switch_msr_special(unsigned long entry,
1282 unsigned long exit)
1283{
1284 vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
1285 vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
1286}
1287
61d2ef2c
AK
1288static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1289{
1290 unsigned i;
1291 struct msr_autoload *m = &vmx->msr_autoload;
1292
8bf00a52
GN
1293 switch (msr) {
1294 case MSR_EFER:
1295 if (cpu_has_load_ia32_efer) {
1296 clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1297 VM_EXIT_LOAD_IA32_EFER);
1298 return;
1299 }
1300 break;
1301 case MSR_CORE_PERF_GLOBAL_CTRL:
1302 if (cpu_has_load_perf_global_ctrl) {
1303 clear_atomic_switch_msr_special(
1304 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1305 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1306 return;
1307 }
1308 break;
110312c8
AK
1309 }
1310
61d2ef2c
AK
1311 for (i = 0; i < m->nr; ++i)
1312 if (m->guest[i].index == msr)
1313 break;
1314
1315 if (i == m->nr)
1316 return;
1317 --m->nr;
1318 m->guest[i] = m->guest[m->nr];
1319 m->host[i] = m->host[m->nr];
1320 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1321 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1322}
1323
8bf00a52
GN
1324static void add_atomic_switch_msr_special(unsigned long entry,
1325 unsigned long exit, unsigned long guest_val_vmcs,
1326 unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
1327{
1328 vmcs_write64(guest_val_vmcs, guest_val);
1329 vmcs_write64(host_val_vmcs, host_val);
1330 vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
1331 vmcs_set_bits(VM_EXIT_CONTROLS, exit);
1332}
1333
61d2ef2c
AK
1334static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1335 u64 guest_val, u64 host_val)
1336{
1337 unsigned i;
1338 struct msr_autoload *m = &vmx->msr_autoload;
1339
8bf00a52
GN
1340 switch (msr) {
1341 case MSR_EFER:
1342 if (cpu_has_load_ia32_efer) {
1343 add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1344 VM_EXIT_LOAD_IA32_EFER,
1345 GUEST_IA32_EFER,
1346 HOST_IA32_EFER,
1347 guest_val, host_val);
1348 return;
1349 }
1350 break;
1351 case MSR_CORE_PERF_GLOBAL_CTRL:
1352 if (cpu_has_load_perf_global_ctrl) {
1353 add_atomic_switch_msr_special(
1354 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1355 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1356 GUEST_IA32_PERF_GLOBAL_CTRL,
1357 HOST_IA32_PERF_GLOBAL_CTRL,
1358 guest_val, host_val);
1359 return;
1360 }
1361 break;
110312c8
AK
1362 }
1363
61d2ef2c
AK
1364 for (i = 0; i < m->nr; ++i)
1365 if (m->guest[i].index == msr)
1366 break;
1367
e7fc6f93
GN
1368 if (i == NR_AUTOLOAD_MSRS) {
1369 printk_once(KERN_WARNING"Not enough mst switch entries. "
1370 "Can't add msr %x\n", msr);
1371 return;
1372 } else if (i == m->nr) {
61d2ef2c
AK
1373 ++m->nr;
1374 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1375 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1376 }
1377
1378 m->guest[i].index = msr;
1379 m->guest[i].value = guest_val;
1380 m->host[i].index = msr;
1381 m->host[i].value = host_val;
1382}
1383
33ed6329
AK
1384static void reload_tss(void)
1385{
33ed6329
AK
1386 /*
1387 * VT restores TR but not its size. Useless.
1388 */
d359192f 1389 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
a5f61300 1390 struct desc_struct *descs;
33ed6329 1391
d359192f 1392 descs = (void *)gdt->address;
33ed6329
AK
1393 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
1394 load_TR_desc();
33ed6329
AK
1395}
1396
92c0d900 1397static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2cc51560 1398{
3a34a881 1399 u64 guest_efer;
51c6cf66
AK
1400 u64 ignore_bits;
1401
f6801dff 1402 guest_efer = vmx->vcpu.arch.efer;
3a34a881 1403
51c6cf66 1404 /*
0fa06071 1405 * NX is emulated; LMA and LME handled by hardware; SCE meaningless
51c6cf66
AK
1406 * outside long mode
1407 */
1408 ignore_bits = EFER_NX | EFER_SCE;
1409#ifdef CONFIG_X86_64
1410 ignore_bits |= EFER_LMA | EFER_LME;
1411 /* SCE is meaningful only in long mode on Intel */
1412 if (guest_efer & EFER_LMA)
1413 ignore_bits &= ~(u64)EFER_SCE;
1414#endif
51c6cf66
AK
1415 guest_efer &= ~ignore_bits;
1416 guest_efer |= host_efer & ignore_bits;
26bb0981 1417 vmx->guest_msrs[efer_offset].data = guest_efer;
d5696725 1418 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
84ad33ef
AK
1419
1420 clear_atomic_switch_msr(vmx, MSR_EFER);
1421 /* On ept, can't emulate nx, and must switch nx atomically */
1422 if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
1423 guest_efer = vmx->vcpu.arch.efer;
1424 if (!(guest_efer & EFER_LMA))
1425 guest_efer &= ~EFER_LME;
1426 add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
1427 return false;
1428 }
1429
26bb0981 1430 return true;
51c6cf66
AK
1431}
1432
2d49ec72
GN
1433static unsigned long segment_base(u16 selector)
1434{
d359192f 1435 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
2d49ec72
GN
1436 struct desc_struct *d;
1437 unsigned long table_base;
1438 unsigned long v;
1439
1440 if (!(selector & ~3))
1441 return 0;
1442
d359192f 1443 table_base = gdt->address;
2d49ec72
GN
1444
1445 if (selector & 4) { /* from ldt */
1446 u16 ldt_selector = kvm_read_ldt();
1447
1448 if (!(ldt_selector & ~3))
1449 return 0;
1450
1451 table_base = segment_base(ldt_selector);
1452 }
1453 d = (struct desc_struct *)(table_base + (selector & ~7));
1454 v = get_desc_base(d);
1455#ifdef CONFIG_X86_64
1456 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
1457 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
1458#endif
1459 return v;
1460}
1461
1462static inline unsigned long kvm_read_tr_base(void)
1463{
1464 u16 tr;
1465 asm("str %0" : "=g"(tr));
1466 return segment_base(tr);
1467}
1468
04d2cc77 1469static void vmx_save_host_state(struct kvm_vcpu *vcpu)
33ed6329 1470{
04d2cc77 1471 struct vcpu_vmx *vmx = to_vmx(vcpu);
26bb0981 1472 int i;
04d2cc77 1473
a2fa3e9f 1474 if (vmx->host_state.loaded)
33ed6329
AK
1475 return;
1476
a2fa3e9f 1477 vmx->host_state.loaded = 1;
33ed6329
AK
1478 /*
1479 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
1480 * allow segment selectors with cpl > 0 or ti == 1.
1481 */
d6e88aec 1482 vmx->host_state.ldt_sel = kvm_read_ldt();
152d3f2f 1483 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
9581d442 1484 savesegment(fs, vmx->host_state.fs_sel);
152d3f2f 1485 if (!(vmx->host_state.fs_sel & 7)) {
a2fa3e9f 1486 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
152d3f2f
LV
1487 vmx->host_state.fs_reload_needed = 0;
1488 } else {
33ed6329 1489 vmcs_write16(HOST_FS_SELECTOR, 0);
152d3f2f 1490 vmx->host_state.fs_reload_needed = 1;
33ed6329 1491 }
9581d442 1492 savesegment(gs, vmx->host_state.gs_sel);
a2fa3e9f
GH
1493 if (!(vmx->host_state.gs_sel & 7))
1494 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
33ed6329
AK
1495 else {
1496 vmcs_write16(HOST_GS_SELECTOR, 0);
152d3f2f 1497 vmx->host_state.gs_ldt_reload_needed = 1;
33ed6329
AK
1498 }
1499
b2da15ac
AK
1500#ifdef CONFIG_X86_64
1501 savesegment(ds, vmx->host_state.ds_sel);
1502 savesegment(es, vmx->host_state.es_sel);
1503#endif
1504
33ed6329
AK
1505#ifdef CONFIG_X86_64
1506 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1507 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1508#else
a2fa3e9f
GH
1509 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
1510 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
33ed6329 1511#endif
707c0874
AK
1512
1513#ifdef CONFIG_X86_64
c8770e7b
AK
1514 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1515 if (is_long_mode(&vmx->vcpu))
44ea2b17 1516 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
707c0874 1517#endif
26bb0981
AK
1518 for (i = 0; i < vmx->save_nmsrs; ++i)
1519 kvm_set_shared_msr(vmx->guest_msrs[i].index,
d5696725
AK
1520 vmx->guest_msrs[i].data,
1521 vmx->guest_msrs[i].mask);
33ed6329
AK
1522}
1523
a9b21b62 1524static void __vmx_load_host_state(struct vcpu_vmx *vmx)
33ed6329 1525{
a2fa3e9f 1526 if (!vmx->host_state.loaded)
33ed6329
AK
1527 return;
1528
e1beb1d3 1529 ++vmx->vcpu.stat.host_state_reload;
a2fa3e9f 1530 vmx->host_state.loaded = 0;
c8770e7b
AK
1531#ifdef CONFIG_X86_64
1532 if (is_long_mode(&vmx->vcpu))
1533 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1534#endif
152d3f2f 1535 if (vmx->host_state.gs_ldt_reload_needed) {
d6e88aec 1536 kvm_load_ldt(vmx->host_state.ldt_sel);
33ed6329 1537#ifdef CONFIG_X86_64
9581d442 1538 load_gs_index(vmx->host_state.gs_sel);
9581d442
AK
1539#else
1540 loadsegment(gs, vmx->host_state.gs_sel);
33ed6329 1541#endif
33ed6329 1542 }
0a77fe4c
AK
1543 if (vmx->host_state.fs_reload_needed)
1544 loadsegment(fs, vmx->host_state.fs_sel);
b2da15ac
AK
1545#ifdef CONFIG_X86_64
1546 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
1547 loadsegment(ds, vmx->host_state.ds_sel);
1548 loadsegment(es, vmx->host_state.es_sel);
1549 }
b2da15ac 1550#endif
152d3f2f 1551 reload_tss();
44ea2b17 1552#ifdef CONFIG_X86_64
c8770e7b 1553 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
44ea2b17 1554#endif
b1a74bf8
SS
1555 /*
1556 * If the FPU is not active (through the host task or
1557 * the guest vcpu), then restore the cr0.TS bit.
1558 */
1559 if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
1560 stts();
3444d7da 1561 load_gdt(&__get_cpu_var(host_gdt));
33ed6329
AK
1562}
1563
a9b21b62
AK
1564static void vmx_load_host_state(struct vcpu_vmx *vmx)
1565{
1566 preempt_disable();
1567 __vmx_load_host_state(vmx);
1568 preempt_enable();
1569}
1570
6aa8b732
AK
1571/*
1572 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1573 * vcpu mutex is already taken.
1574 */
15ad7146 1575static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
6aa8b732 1576{
a2fa3e9f 1577 struct vcpu_vmx *vmx = to_vmx(vcpu);
4610c9cc 1578 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
6aa8b732 1579
4610c9cc
DX
1580 if (!vmm_exclusive)
1581 kvm_cpu_vmxon(phys_addr);
d462b819
NHE
1582 else if (vmx->loaded_vmcs->cpu != cpu)
1583 loaded_vmcs_clear(vmx->loaded_vmcs);
6aa8b732 1584
d462b819
NHE
1585 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
1586 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1587 vmcs_load(vmx->loaded_vmcs->vmcs);
6aa8b732
AK
1588 }
1589
d462b819 1590 if (vmx->loaded_vmcs->cpu != cpu) {
d359192f 1591 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
6aa8b732
AK
1592 unsigned long sysenter_esp;
1593
a8eeb04a 1594 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
92fe13be 1595 local_irq_disable();
8f536b76 1596 crash_disable_local_vmclear(cpu);
5a560f8b
XG
1597
1598 /*
1599 * Read loaded_vmcs->cpu should be before fetching
1600 * loaded_vmcs->loaded_vmcss_on_cpu_link.
1601 * See the comments in __loaded_vmcs_clear().
1602 */
1603 smp_rmb();
1604
d462b819
NHE
1605 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1606 &per_cpu(loaded_vmcss_on_cpu, cpu));
8f536b76 1607 crash_enable_local_vmclear(cpu);
92fe13be
DX
1608 local_irq_enable();
1609
6aa8b732
AK
1610 /*
1611 * Linux uses per-cpu TSS and GDT, so set these when switching
1612 * processors.
1613 */
d6e88aec 1614 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
d359192f 1615 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
6aa8b732
AK
1616
1617 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
1618 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
d462b819 1619 vmx->loaded_vmcs->cpu = cpu;
6aa8b732 1620 }
6aa8b732
AK
1621}
1622
1623static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1624{
a9b21b62 1625 __vmx_load_host_state(to_vmx(vcpu));
4610c9cc 1626 if (!vmm_exclusive) {
d462b819
NHE
1627 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
1628 vcpu->cpu = -1;
4610c9cc
DX
1629 kvm_cpu_vmxoff();
1630 }
6aa8b732
AK
1631}
1632
5fd86fcf
AK
1633static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
1634{
81231c69
AK
1635 ulong cr0;
1636
5fd86fcf
AK
1637 if (vcpu->fpu_active)
1638 return;
1639 vcpu->fpu_active = 1;
81231c69
AK
1640 cr0 = vmcs_readl(GUEST_CR0);
1641 cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
1642 cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
1643 vmcs_writel(GUEST_CR0, cr0);
5fd86fcf 1644 update_exception_bitmap(vcpu);
edcafe3c 1645 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
36cf24e0
NHE
1646 if (is_guest_mode(vcpu))
1647 vcpu->arch.cr0_guest_owned_bits &=
1648 ~get_vmcs12(vcpu)->cr0_guest_host_mask;
edcafe3c 1649 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
5fd86fcf
AK
1650}
1651
edcafe3c
AK
1652static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
1653
fe3ef05c
NHE
1654/*
1655 * Return the cr0 value that a nested guest would read. This is a combination
1656 * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
1657 * its hypervisor (cr0_read_shadow).
1658 */
1659static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
1660{
1661 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
1662 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
1663}
1664static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
1665{
1666 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
1667 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
1668}
1669
5fd86fcf
AK
1670static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1671{
36cf24e0
NHE
1672 /* Note that there is no vcpu->fpu_active = 0 here. The caller must
1673 * set this *before* calling this function.
1674 */
edcafe3c 1675 vmx_decache_cr0_guest_bits(vcpu);
81231c69 1676 vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
5fd86fcf 1677 update_exception_bitmap(vcpu);
edcafe3c
AK
1678 vcpu->arch.cr0_guest_owned_bits = 0;
1679 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
36cf24e0
NHE
1680 if (is_guest_mode(vcpu)) {
1681 /*
1682 * L1's specified read shadow might not contain the TS bit,
1683 * so now that we turned on shadowing of this bit, we need to
1684 * set this bit of the shadow. Like in nested_vmx_run we need
1685 * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet
1686 * up-to-date here because we just decached cr0.TS (and we'll
1687 * only update vmcs12->guest_cr0 on nested exit).
1688 */
1689 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1690 vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
1691 (vcpu->arch.cr0 & X86_CR0_TS);
1692 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
1693 } else
1694 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
5fd86fcf
AK
1695}
1696
6aa8b732
AK
1697static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1698{
78ac8b47 1699 unsigned long rflags, save_rflags;
345dcaa8 1700
6de12732
AK
1701 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
1702 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1703 rflags = vmcs_readl(GUEST_RFLAGS);
1704 if (to_vmx(vcpu)->rmode.vm86_active) {
1705 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1706 save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1707 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1708 }
1709 to_vmx(vcpu)->rflags = rflags;
78ac8b47 1710 }
6de12732 1711 return to_vmx(vcpu)->rflags;
6aa8b732
AK
1712}
1713
1714static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1715{
6de12732
AK
1716 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1717 to_vmx(vcpu)->rflags = rflags;
78ac8b47
AK
1718 if (to_vmx(vcpu)->rmode.vm86_active) {
1719 to_vmx(vcpu)->rmode.save_rflags = rflags;
053de044 1720 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
78ac8b47 1721 }
6aa8b732
AK
1722 vmcs_writel(GUEST_RFLAGS, rflags);
1723}
1724
2809f5d2
GC
1725static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1726{
1727 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1728 int ret = 0;
1729
1730 if (interruptibility & GUEST_INTR_STATE_STI)
48005f64 1731 ret |= KVM_X86_SHADOW_INT_STI;
2809f5d2 1732 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
48005f64 1733 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2809f5d2
GC
1734
1735 return ret & mask;
1736}
1737
1738static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1739{
1740 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1741 u32 interruptibility = interruptibility_old;
1742
1743 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1744
48005f64 1745 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2809f5d2 1746 interruptibility |= GUEST_INTR_STATE_MOV_SS;
48005f64 1747 else if (mask & KVM_X86_SHADOW_INT_STI)
2809f5d2
GC
1748 interruptibility |= GUEST_INTR_STATE_STI;
1749
1750 if ((interruptibility != interruptibility_old))
1751 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1752}
1753
6aa8b732
AK
1754static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
1755{
1756 unsigned long rip;
6aa8b732 1757
5fdbf976 1758 rip = kvm_rip_read(vcpu);
6aa8b732 1759 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5fdbf976 1760 kvm_rip_write(vcpu, rip);
6aa8b732 1761
2809f5d2
GC
1762 /* skipping an emulated instruction also counts */
1763 vmx_set_interrupt_shadow(vcpu, 0);
6aa8b732
AK
1764}
1765
0b6ac343
NHE
1766/*
1767 * KVM wants to inject page-faults which it got to the guest. This function
1768 * checks whether in a nested guest, we need to inject them to L1 or L2.
1769 * This function assumes it is called with the exit reason in vmcs02 being
1770 * a #PF exception (this is the only case in which KVM injects a #PF when L2
1771 * is running).
1772 */
1773static int nested_pf_handled(struct kvm_vcpu *vcpu)
1774{
1775 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1776
1777 /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
95871901 1778 if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR)))
0b6ac343
NHE
1779 return 0;
1780
1781 nested_vmx_vmexit(vcpu);
1782 return 1;
1783}
1784
298101da 1785static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
ce7ddec4
JR
1786 bool has_error_code, u32 error_code,
1787 bool reinject)
298101da 1788{
77ab6db0 1789 struct vcpu_vmx *vmx = to_vmx(vcpu);
8ab2d2e2 1790 u32 intr_info = nr | INTR_INFO_VALID_MASK;
77ab6db0 1791
0b6ac343
NHE
1792 if (nr == PF_VECTOR && is_guest_mode(vcpu) &&
1793 nested_pf_handled(vcpu))
1794 return;
1795
8ab2d2e2 1796 if (has_error_code) {
77ab6db0 1797 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
8ab2d2e2
JK
1798 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1799 }
77ab6db0 1800
7ffd92c5 1801 if (vmx->rmode.vm86_active) {
71f9833b
SH
1802 int inc_eip = 0;
1803 if (kvm_exception_is_soft(nr))
1804 inc_eip = vcpu->arch.event_exit_inst_len;
1805 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
a92601bb 1806 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
77ab6db0
JK
1807 return;
1808 }
1809
66fd3f7f
GN
1810 if (kvm_exception_is_soft(nr)) {
1811 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1812 vmx->vcpu.arch.event_exit_inst_len);
8ab2d2e2
JK
1813 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1814 } else
1815 intr_info |= INTR_TYPE_HARD_EXCEPTION;
1816
1817 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
298101da
AK
1818}
1819
4e47c7a6
SY
1820static bool vmx_rdtscp_supported(void)
1821{
1822 return cpu_has_vmx_rdtscp();
1823}
1824
ad756a16
MJ
1825static bool vmx_invpcid_supported(void)
1826{
1827 return cpu_has_vmx_invpcid() && enable_ept;
1828}
1829
a75beee6
ED
1830/*
1831 * Swap MSR entry in host/guest MSR entry array.
1832 */
8b9cf98c 1833static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
a75beee6 1834{
26bb0981 1835 struct shared_msr_entry tmp;
a2fa3e9f
GH
1836
1837 tmp = vmx->guest_msrs[to];
1838 vmx->guest_msrs[to] = vmx->guest_msrs[from];
1839 vmx->guest_msrs[from] = tmp;
a75beee6
ED
1840}
1841
8d14695f
YZ
1842static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
1843{
1844 unsigned long *msr_bitmap;
1845
1846 if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
1847 if (is_long_mode(vcpu))
1848 msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
1849 else
1850 msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
1851 } else {
1852 if (is_long_mode(vcpu))
1853 msr_bitmap = vmx_msr_bitmap_longmode;
1854 else
1855 msr_bitmap = vmx_msr_bitmap_legacy;
1856 }
1857
1858 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
1859}
1860
e38aea3e
AK
1861/*
1862 * Set up the vmcs to automatically save and restore system
1863 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
1864 * mode, as fiddling with msrs is very expensive.
1865 */
8b9cf98c 1866static void setup_msrs(struct vcpu_vmx *vmx)
e38aea3e 1867{
26bb0981 1868 int save_nmsrs, index;
e38aea3e 1869
a75beee6
ED
1870 save_nmsrs = 0;
1871#ifdef CONFIG_X86_64
8b9cf98c 1872 if (is_long_mode(&vmx->vcpu)) {
8b9cf98c 1873 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
a75beee6 1874 if (index >= 0)
8b9cf98c
RR
1875 move_msr_up(vmx, index, save_nmsrs++);
1876 index = __find_msr_index(vmx, MSR_LSTAR);
a75beee6 1877 if (index >= 0)
8b9cf98c
RR
1878 move_msr_up(vmx, index, save_nmsrs++);
1879 index = __find_msr_index(vmx, MSR_CSTAR);
a75beee6 1880 if (index >= 0)
8b9cf98c 1881 move_msr_up(vmx, index, save_nmsrs++);
4e47c7a6
SY
1882 index = __find_msr_index(vmx, MSR_TSC_AUX);
1883 if (index >= 0 && vmx->rdtscp_enabled)
1884 move_msr_up(vmx, index, save_nmsrs++);
a75beee6 1885 /*
8c06585d 1886 * MSR_STAR is only needed on long mode guests, and only
a75beee6
ED
1887 * if efer.sce is enabled.
1888 */
8c06585d 1889 index = __find_msr_index(vmx, MSR_STAR);
f6801dff 1890 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
8b9cf98c 1891 move_msr_up(vmx, index, save_nmsrs++);
a75beee6
ED
1892 }
1893#endif
92c0d900
AK
1894 index = __find_msr_index(vmx, MSR_EFER);
1895 if (index >= 0 && update_transition_efer(vmx, index))
26bb0981 1896 move_msr_up(vmx, index, save_nmsrs++);
e38aea3e 1897
26bb0981 1898 vmx->save_nmsrs = save_nmsrs;
5897297b 1899
8d14695f
YZ
1900 if (cpu_has_vmx_msr_bitmap())
1901 vmx_set_msr_bitmap(&vmx->vcpu);
e38aea3e
AK
1902}
1903
6aa8b732
AK
1904/*
1905 * reads and returns guest's timestamp counter "register"
1906 * guest_tsc = host_tsc + tsc_offset -- 21.3
1907 */
1908static u64 guest_read_tsc(void)
1909{
1910 u64 host_tsc, tsc_offset;
1911
1912 rdtscll(host_tsc);
1913 tsc_offset = vmcs_read64(TSC_OFFSET);
1914 return host_tsc + tsc_offset;
1915}
1916
d5c1785d
NHE
1917/*
1918 * Like guest_read_tsc, but always returns L1's notion of the timestamp
1919 * counter, even if a nested guest (L2) is currently running.
1920 */
886b470c 1921u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
d5c1785d 1922{
886b470c 1923 u64 tsc_offset;
d5c1785d 1924
d5c1785d
NHE
1925 tsc_offset = is_guest_mode(vcpu) ?
1926 to_vmx(vcpu)->nested.vmcs01_tsc_offset :
1927 vmcs_read64(TSC_OFFSET);
1928 return host_tsc + tsc_offset;
1929}
1930
4051b188 1931/*
cc578287
ZA
1932 * Engage any workarounds for mis-matched TSC rates. Currently limited to
1933 * software catchup for faster rates on slower CPUs.
4051b188 1934 */
cc578287 1935static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
4051b188 1936{
cc578287
ZA
1937 if (!scale)
1938 return;
1939
1940 if (user_tsc_khz > tsc_khz) {
1941 vcpu->arch.tsc_catchup = 1;
1942 vcpu->arch.tsc_always_catchup = 1;
1943 } else
1944 WARN(1, "user requested TSC rate below hardware speed\n");
4051b188
JR
1945}
1946
ba904635
WA
1947static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
1948{
1949 return vmcs_read64(TSC_OFFSET);
1950}
1951
6aa8b732 1952/*
99e3e30a 1953 * writes 'offset' into guest's timestamp counter offset register
6aa8b732 1954 */
99e3e30a 1955static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
6aa8b732 1956{
27fc51b2 1957 if (is_guest_mode(vcpu)) {
7991825b 1958 /*
27fc51b2
NHE
1959 * We're here if L1 chose not to trap WRMSR to TSC. According
1960 * to the spec, this should set L1's TSC; The offset that L1
1961 * set for L2 remains unchanged, and still needs to be added
1962 * to the newly set TSC to get L2's TSC.
7991825b 1963 */
27fc51b2
NHE
1964 struct vmcs12 *vmcs12;
1965 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
1966 /* recalculate vmcs02.TSC_OFFSET: */
1967 vmcs12 = get_vmcs12(vcpu);
1968 vmcs_write64(TSC_OFFSET, offset +
1969 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
1970 vmcs12->tsc_offset : 0));
1971 } else {
1972 vmcs_write64(TSC_OFFSET, offset);
1973 }
6aa8b732
AK
1974}
1975
f1e2b260 1976static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
e48672fa
ZA
1977{
1978 u64 offset = vmcs_read64(TSC_OFFSET);
1979 vmcs_write64(TSC_OFFSET, offset + adjustment);
7991825b
NHE
1980 if (is_guest_mode(vcpu)) {
1981 /* Even when running L2, the adjustment needs to apply to L1 */
1982 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
1983 }
e48672fa
ZA
1984}
1985
857e4099
JR
1986static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1987{
1988 return target_tsc - native_read_tsc();
1989}
1990
801d3424
NHE
1991static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
1992{
1993 struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
1994 return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
1995}
1996
1997/*
1998 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
1999 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2000 * all guests if the "nested" module option is off, and can also be disabled
2001 * for a single guest by disabling its VMX cpuid bit.
2002 */
2003static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2004{
2005 return nested && guest_cpuid_has_vmx(vcpu);
2006}
2007
b87a51ae
NHE
2008/*
2009 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2010 * returned for the various VMX controls MSRs when nested VMX is enabled.
2011 * The same values should also be used to verify that vmcs12 control fields are
2012 * valid during nested entry from L1 to L2.
2013 * Each of these control msrs has a low and high 32-bit half: A low bit is on
2014 * if the corresponding bit in the (32-bit) control field *must* be on, and a
2015 * bit in the high half is on if the corresponding bit in the control field
2016 * may be on. See also vmx_control_verify().
2017 * TODO: allow these variables to be modified (downgraded) by module options
2018 * or other means.
2019 */
2020static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
2021static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
2022static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
2023static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
2024static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
2025static __init void nested_vmx_setup_ctls_msrs(void)
2026{
2027 /*
2028 * Note that as a general rule, the high half of the MSRs (bits in
2029 * the control fields which may be 1) should be initialized by the
2030 * intersection of the underlying hardware's MSR (i.e., features which
2031 * can be supported) and the list of features we want to expose -
2032 * because they are known to be properly supported in our code.
2033 * Also, usually, the low half of the MSRs (bits which must be 1) can
2034 * be set to 0, meaning that L1 may turn off any of these bits. The
2035 * reason is that if one of these bits is necessary, it will appear
2036 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2037 * fields of vmcs01 and vmcs02, will turn these bits off - and
2038 * nested_vmx_exit_handled() will not pass related exits to L1.
2039 * These rules have exceptions below.
2040 */
2041
2042 /* pin-based controls */
2043 /*
2044 * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is
2045 * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR.
2046 */
2047 nested_vmx_pinbased_ctls_low = 0x16 ;
2048 nested_vmx_pinbased_ctls_high = 0x16 |
2049 PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
2050 PIN_BASED_VIRTUAL_NMIS;
2051
33fb20c3
JK
2052 /*
2053 * Exit controls
2054 * If bit 55 of VMX_BASIC is off, bits 0-8 and 10, 11, 13, 14, 16 and
2055 * 17 must be 1.
2056 */
2057 nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
b6f1250e 2058 /* Note that guest use of VM_EXIT_ACK_INTR_ON_EXIT is not supported. */
b87a51ae
NHE
2059#ifdef CONFIG_X86_64
2060 nested_vmx_exit_ctls_high = VM_EXIT_HOST_ADDR_SPACE_SIZE;
2061#else
2062 nested_vmx_exit_ctls_high = 0;
2063#endif
33fb20c3 2064 nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
b87a51ae
NHE
2065
2066 /* entry controls */
2067 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2068 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
33fb20c3
JK
2069 /* If bit 55 of VMX_BASIC is off, bits 0-8 and 12 must be 1. */
2070 nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
b87a51ae
NHE
2071 nested_vmx_entry_ctls_high &=
2072 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_IA32E_MODE;
33fb20c3 2073 nested_vmx_entry_ctls_high |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
b87a51ae
NHE
2074
2075 /* cpu-based controls */
2076 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2077 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
2078 nested_vmx_procbased_ctls_low = 0;
2079 nested_vmx_procbased_ctls_high &=
2080 CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2081 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2082 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2083 CPU_BASED_CR3_STORE_EXITING |
2084#ifdef CONFIG_X86_64
2085 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2086#endif
2087 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2088 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
dbcb4e79 2089 CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING |
d6851fbe 2090 CPU_BASED_PAUSE_EXITING |
b87a51ae
NHE
2091 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2092 /*
2093 * We can allow some features even when not supported by the
2094 * hardware. For example, L1 can specify an MSR bitmap - and we
2095 * can use it to avoid exits to L1 - even when L0 runs L2
2096 * without MSR bitmaps.
2097 */
2098 nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS;
2099
2100 /* secondary cpu-based controls */
2101 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2102 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high);
2103 nested_vmx_secondary_ctls_low = 0;
2104 nested_vmx_secondary_ctls_high &=
d6851fbe
JK
2105 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2106 SECONDARY_EXEC_WBINVD_EXITING;
b87a51ae
NHE
2107}
2108
2109static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2110{
2111 /*
2112 * Bits 0 in high must be 0, and bits 1 in low must be 1.
2113 */
2114 return ((control & high) | low) == control;
2115}
2116
2117static inline u64 vmx_control_msr(u32 low, u32 high)
2118{
2119 return low | ((u64)high << 32);
2120}
2121
2122/*
2123 * If we allow our guest to use VMX instructions (i.e., nested VMX), we should
2124 * also let it use VMX-specific MSRs.
2125 * vmx_get_vmx_msr() and vmx_set_vmx_msr() return 1 when we handled a
2126 * VMX-specific MSR, or 0 when we haven't (and the caller should handle it
2127 * like all other MSRs).
2128 */
2129static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2130{
2131 if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC &&
2132 msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) {
2133 /*
2134 * According to the spec, processors which do not support VMX
2135 * should throw a #GP(0) when VMX capability MSRs are read.
2136 */
2137 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
2138 return 1;
2139 }
2140
2141 switch (msr_index) {
2142 case MSR_IA32_FEATURE_CONTROL:
2143 *pdata = 0;
2144 break;
2145 case MSR_IA32_VMX_BASIC:
2146 /*
2147 * This MSR reports some information about VMX support. We
2148 * should return information about the VMX we emulate for the
2149 * guest, and the VMCS structure we give it - not about the
2150 * VMX support of the underlying hardware.
2151 */
2152 *pdata = VMCS12_REVISION |
2153 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2154 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2155 break;
2156 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2157 case MSR_IA32_VMX_PINBASED_CTLS:
2158 *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low,
2159 nested_vmx_pinbased_ctls_high);
2160 break;
2161 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2162 case MSR_IA32_VMX_PROCBASED_CTLS:
2163 *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
2164 nested_vmx_procbased_ctls_high);
2165 break;
2166 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2167 case MSR_IA32_VMX_EXIT_CTLS:
2168 *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
2169 nested_vmx_exit_ctls_high);
2170 break;
2171 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2172 case MSR_IA32_VMX_ENTRY_CTLS:
2173 *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
2174 nested_vmx_entry_ctls_high);
2175 break;
2176 case MSR_IA32_VMX_MISC:
2177 *pdata = 0;
2178 break;
2179 /*
2180 * These MSRs specify bits which the guest must keep fixed (on or off)
2181 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2182 * We picked the standard core2 setting.
2183 */
2184#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2185#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
2186 case MSR_IA32_VMX_CR0_FIXED0:
2187 *pdata = VMXON_CR0_ALWAYSON;
2188 break;
2189 case MSR_IA32_VMX_CR0_FIXED1:
2190 *pdata = -1ULL;
2191 break;
2192 case MSR_IA32_VMX_CR4_FIXED0:
2193 *pdata = VMXON_CR4_ALWAYSON;
2194 break;
2195 case MSR_IA32_VMX_CR4_FIXED1:
2196 *pdata = -1ULL;
2197 break;
2198 case MSR_IA32_VMX_VMCS_ENUM:
2199 *pdata = 0x1f;
2200 break;
2201 case MSR_IA32_VMX_PROCBASED_CTLS2:
2202 *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
2203 nested_vmx_secondary_ctls_high);
2204 break;
2205 case MSR_IA32_VMX_EPT_VPID_CAP:
2206 /* Currently, no nested ept or nested vpid */
2207 *pdata = 0;
2208 break;
2209 default:
2210 return 0;
2211 }
2212
2213 return 1;
2214}
2215
2216static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
2217{
2218 if (!nested_vmx_allowed(vcpu))
2219 return 0;
2220
2221 if (msr_index == MSR_IA32_FEATURE_CONTROL)
2222 /* TODO: the right thing. */
2223 return 1;
2224 /*
2225 * No need to treat VMX capability MSRs specially: If we don't handle
2226 * them, handle_wrmsr will #GP(0), which is correct (they are readonly)
2227 */
2228 return 0;
2229}
2230
6aa8b732
AK
2231/*
2232 * Reads an msr value (of 'msr_index') into 'pdata'.
2233 * Returns 0 on success, non-0 otherwise.
2234 * Assumes vcpu_load() was already called.
2235 */
2236static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2237{
2238 u64 data;
26bb0981 2239 struct shared_msr_entry *msr;
6aa8b732
AK
2240
2241 if (!pdata) {
2242 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
2243 return -EINVAL;
2244 }
2245
2246 switch (msr_index) {
05b3e0c2 2247#ifdef CONFIG_X86_64
6aa8b732
AK
2248 case MSR_FS_BASE:
2249 data = vmcs_readl(GUEST_FS_BASE);
2250 break;
2251 case MSR_GS_BASE:
2252 data = vmcs_readl(GUEST_GS_BASE);
2253 break;
44ea2b17
AK
2254 case MSR_KERNEL_GS_BASE:
2255 vmx_load_host_state(to_vmx(vcpu));
2256 data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
2257 break;
26bb0981 2258#endif
6aa8b732 2259 case MSR_EFER:
3bab1f5d 2260 return kvm_get_msr_common(vcpu, msr_index, pdata);
af24a4e4 2261 case MSR_IA32_TSC:
6aa8b732
AK
2262 data = guest_read_tsc();
2263 break;
2264 case MSR_IA32_SYSENTER_CS:
2265 data = vmcs_read32(GUEST_SYSENTER_CS);
2266 break;
2267 case MSR_IA32_SYSENTER_EIP:
f5b42c33 2268 data = vmcs_readl(GUEST_SYSENTER_EIP);
6aa8b732
AK
2269 break;
2270 case MSR_IA32_SYSENTER_ESP:
f5b42c33 2271 data = vmcs_readl(GUEST_SYSENTER_ESP);
6aa8b732 2272 break;
4e47c7a6
SY
2273 case MSR_TSC_AUX:
2274 if (!to_vmx(vcpu)->rdtscp_enabled)
2275 return 1;
2276 /* Otherwise falls through */
6aa8b732 2277 default:
b87a51ae
NHE
2278 if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
2279 return 0;
8b9cf98c 2280 msr = find_msr_entry(to_vmx(vcpu), msr_index);
3bab1f5d
AK
2281 if (msr) {
2282 data = msr->data;
2283 break;
6aa8b732 2284 }
3bab1f5d 2285 return kvm_get_msr_common(vcpu, msr_index, pdata);
6aa8b732
AK
2286 }
2287
2288 *pdata = data;
2289 return 0;
2290}
2291
2292/*
2293 * Writes msr value into into the appropriate "register".
2294 * Returns 0 on success, non-0 otherwise.
2295 * Assumes vcpu_load() was already called.
2296 */
8fe8ab46 2297static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
6aa8b732 2298{
a2fa3e9f 2299 struct vcpu_vmx *vmx = to_vmx(vcpu);
26bb0981 2300 struct shared_msr_entry *msr;
2cc51560 2301 int ret = 0;
8fe8ab46
WA
2302 u32 msr_index = msr_info->index;
2303 u64 data = msr_info->data;
2cc51560 2304
6aa8b732 2305 switch (msr_index) {
3bab1f5d 2306 case MSR_EFER:
8fe8ab46 2307 ret = kvm_set_msr_common(vcpu, msr_info);
2cc51560 2308 break;
16175a79 2309#ifdef CONFIG_X86_64
6aa8b732 2310 case MSR_FS_BASE:
2fb92db1 2311 vmx_segment_cache_clear(vmx);
6aa8b732
AK
2312 vmcs_writel(GUEST_FS_BASE, data);
2313 break;
2314 case MSR_GS_BASE:
2fb92db1 2315 vmx_segment_cache_clear(vmx);
6aa8b732
AK
2316 vmcs_writel(GUEST_GS_BASE, data);
2317 break;
44ea2b17
AK
2318 case MSR_KERNEL_GS_BASE:
2319 vmx_load_host_state(vmx);
2320 vmx->msr_guest_kernel_gs_base = data;
2321 break;
6aa8b732
AK
2322#endif
2323 case MSR_IA32_SYSENTER_CS:
2324 vmcs_write32(GUEST_SYSENTER_CS, data);
2325 break;
2326 case MSR_IA32_SYSENTER_EIP:
f5b42c33 2327 vmcs_writel(GUEST_SYSENTER_EIP, data);
6aa8b732
AK
2328 break;
2329 case MSR_IA32_SYSENTER_ESP:
f5b42c33 2330 vmcs_writel(GUEST_SYSENTER_ESP, data);
6aa8b732 2331 break;
af24a4e4 2332 case MSR_IA32_TSC:
8fe8ab46 2333 kvm_write_tsc(vcpu, msr_info);
6aa8b732 2334 break;
468d472f
SY
2335 case MSR_IA32_CR_PAT:
2336 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2337 vmcs_write64(GUEST_IA32_PAT, data);
2338 vcpu->arch.pat = data;
2339 break;
2340 }
8fe8ab46 2341 ret = kvm_set_msr_common(vcpu, msr_info);
4e47c7a6 2342 break;
ba904635
WA
2343 case MSR_IA32_TSC_ADJUST:
2344 ret = kvm_set_msr_common(vcpu, msr_info);
4e47c7a6
SY
2345 break;
2346 case MSR_TSC_AUX:
2347 if (!vmx->rdtscp_enabled)
2348 return 1;
2349 /* Check reserved bit, higher 32 bits should be zero */
2350 if ((data >> 32) != 0)
2351 return 1;
2352 /* Otherwise falls through */
6aa8b732 2353 default:
b87a51ae
NHE
2354 if (vmx_set_vmx_msr(vcpu, msr_index, data))
2355 break;
8b9cf98c 2356 msr = find_msr_entry(vmx, msr_index);
3bab1f5d
AK
2357 if (msr) {
2358 msr->data = data;
2225fd56
AK
2359 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
2360 preempt_disable();
9ee73970
AK
2361 kvm_set_shared_msr(msr->index, msr->data,
2362 msr->mask);
2225fd56
AK
2363 preempt_enable();
2364 }
3bab1f5d 2365 break;
6aa8b732 2366 }
8fe8ab46 2367 ret = kvm_set_msr_common(vcpu, msr_info);
6aa8b732
AK
2368 }
2369
2cc51560 2370 return ret;
6aa8b732
AK
2371}
2372
5fdbf976 2373static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
6aa8b732 2374{
5fdbf976
MT
2375 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
2376 switch (reg) {
2377 case VCPU_REGS_RSP:
2378 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2379 break;
2380 case VCPU_REGS_RIP:
2381 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2382 break;
6de4f3ad
AK
2383 case VCPU_EXREG_PDPTR:
2384 if (enable_ept)
2385 ept_save_pdptrs(vcpu);
2386 break;
5fdbf976
MT
2387 default:
2388 break;
2389 }
6aa8b732
AK
2390}
2391
6aa8b732
AK
2392static __init int cpu_has_kvm_support(void)
2393{
6210e37b 2394 return cpu_has_vmx();
6aa8b732
AK
2395}
2396
2397static __init int vmx_disabled_by_bios(void)
2398{
2399 u64 msr;
2400
2401 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
cafd6659 2402 if (msr & FEATURE_CONTROL_LOCKED) {
23f3e991 2403 /* launched w/ TXT and VMX disabled */
cafd6659
SW
2404 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2405 && tboot_enabled())
2406 return 1;
23f3e991 2407 /* launched w/o TXT and VMX only enabled w/ TXT */
cafd6659 2408 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
23f3e991 2409 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
f9335afe
SW
2410 && !tboot_enabled()) {
2411 printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
23f3e991 2412 "activate TXT before enabling KVM\n");
cafd6659 2413 return 1;
f9335afe 2414 }
23f3e991
JC
2415 /* launched w/o TXT and VMX disabled */
2416 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2417 && !tboot_enabled())
2418 return 1;
cafd6659
SW
2419 }
2420
2421 return 0;
6aa8b732
AK
2422}
2423
7725b894
DX
2424static void kvm_cpu_vmxon(u64 addr)
2425{
2426 asm volatile (ASM_VMX_VMXON_RAX
2427 : : "a"(&addr), "m"(addr)
2428 : "memory", "cc");
2429}
2430
10474ae8 2431static int hardware_enable(void *garbage)
6aa8b732
AK
2432{
2433 int cpu = raw_smp_processor_id();
2434 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
cafd6659 2435 u64 old, test_bits;
6aa8b732 2436
10474ae8
AG
2437 if (read_cr4() & X86_CR4_VMXE)
2438 return -EBUSY;
2439
d462b819 2440 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
8f536b76
ZY
2441
2442 /*
2443 * Now we can enable the vmclear operation in kdump
2444 * since the loaded_vmcss_on_cpu list on this cpu
2445 * has been initialized.
2446 *
2447 * Though the cpu is not in VMX operation now, there
2448 * is no problem to enable the vmclear operation
2449 * for the loaded_vmcss_on_cpu list is empty!
2450 */
2451 crash_enable_local_vmclear(cpu);
2452
6aa8b732 2453 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
cafd6659
SW
2454
2455 test_bits = FEATURE_CONTROL_LOCKED;
2456 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
2457 if (tboot_enabled())
2458 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
2459
2460 if ((old & test_bits) != test_bits) {
6aa8b732 2461 /* enable and lock */
cafd6659
SW
2462 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
2463 }
66aee91a 2464 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
10474ae8 2465
4610c9cc
DX
2466 if (vmm_exclusive) {
2467 kvm_cpu_vmxon(phys_addr);
2468 ept_sync_global();
2469 }
10474ae8 2470
3444d7da
AK
2471 store_gdt(&__get_cpu_var(host_gdt));
2472
10474ae8 2473 return 0;
6aa8b732
AK
2474}
2475
d462b819 2476static void vmclear_local_loaded_vmcss(void)
543e4243
AK
2477{
2478 int cpu = raw_smp_processor_id();
d462b819 2479 struct loaded_vmcs *v, *n;
543e4243 2480
d462b819
NHE
2481 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2482 loaded_vmcss_on_cpu_link)
2483 __loaded_vmcs_clear(v);
543e4243
AK
2484}
2485
710ff4a8
EH
2486
2487/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
2488 * tricks.
2489 */
2490static void kvm_cpu_vmxoff(void)
6aa8b732 2491{
4ecac3fd 2492 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
6aa8b732
AK
2493}
2494
710ff4a8
EH
2495static void hardware_disable(void *garbage)
2496{
4610c9cc 2497 if (vmm_exclusive) {
d462b819 2498 vmclear_local_loaded_vmcss();
4610c9cc
DX
2499 kvm_cpu_vmxoff();
2500 }
7725b894 2501 write_cr4(read_cr4() & ~X86_CR4_VMXE);
710ff4a8
EH
2502}
2503
1c3d14fe 2504static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
d77c26fc 2505 u32 msr, u32 *result)
1c3d14fe
YS
2506{
2507 u32 vmx_msr_low, vmx_msr_high;
2508 u32 ctl = ctl_min | ctl_opt;
2509
2510 rdmsr(msr, vmx_msr_low, vmx_msr_high);
2511
2512 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2513 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
2514
2515 /* Ensure minimum (required) set of control bits are supported. */
2516 if (ctl_min & ~ctl)
002c7f7c 2517 return -EIO;
1c3d14fe
YS
2518
2519 *result = ctl;
2520 return 0;
2521}
2522
110312c8
AK
2523static __init bool allow_1_setting(u32 msr, u32 ctl)
2524{
2525 u32 vmx_msr_low, vmx_msr_high;
2526
2527 rdmsr(msr, vmx_msr_low, vmx_msr_high);
2528 return vmx_msr_high & ctl;
2529}
2530
002c7f7c 2531static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
6aa8b732
AK
2532{
2533 u32 vmx_msr_low, vmx_msr_high;
d56f546d 2534 u32 min, opt, min2, opt2;
1c3d14fe
YS
2535 u32 _pin_based_exec_control = 0;
2536 u32 _cpu_based_exec_control = 0;
f78e0e2e 2537 u32 _cpu_based_2nd_exec_control = 0;
1c3d14fe
YS
2538 u32 _vmexit_control = 0;
2539 u32 _vmentry_control = 0;
2540
2541 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
f08864b4 2542 opt = PIN_BASED_VIRTUAL_NMIS;
1c3d14fe
YS
2543 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
2544 &_pin_based_exec_control) < 0)
002c7f7c 2545 return -EIO;
1c3d14fe 2546
10166744 2547 min = CPU_BASED_HLT_EXITING |
1c3d14fe
YS
2548#ifdef CONFIG_X86_64
2549 CPU_BASED_CR8_LOAD_EXITING |
2550 CPU_BASED_CR8_STORE_EXITING |
2551#endif
d56f546d
SY
2552 CPU_BASED_CR3_LOAD_EXITING |
2553 CPU_BASED_CR3_STORE_EXITING |
1c3d14fe
YS
2554 CPU_BASED_USE_IO_BITMAPS |
2555 CPU_BASED_MOV_DR_EXITING |
a7052897 2556 CPU_BASED_USE_TSC_OFFSETING |
59708670
SY
2557 CPU_BASED_MWAIT_EXITING |
2558 CPU_BASED_MONITOR_EXITING |
fee84b07
AK
2559 CPU_BASED_INVLPG_EXITING |
2560 CPU_BASED_RDPMC_EXITING;
443381a8 2561
f78e0e2e 2562 opt = CPU_BASED_TPR_SHADOW |
25c5f225 2563 CPU_BASED_USE_MSR_BITMAPS |
f78e0e2e 2564 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1c3d14fe
YS
2565 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
2566 &_cpu_based_exec_control) < 0)
002c7f7c 2567 return -EIO;
6e5d865c
YS
2568#ifdef CONFIG_X86_64
2569 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2570 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
2571 ~CPU_BASED_CR8_STORE_EXITING;
2572#endif
f78e0e2e 2573 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
d56f546d
SY
2574 min2 = 0;
2575 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
8d14695f 2576 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2384d2b3 2577 SECONDARY_EXEC_WBINVD_EXITING |
d56f546d 2578 SECONDARY_EXEC_ENABLE_VPID |
3a624e29 2579 SECONDARY_EXEC_ENABLE_EPT |
4b8d54f9 2580 SECONDARY_EXEC_UNRESTRICTED_GUEST |
4e47c7a6 2581 SECONDARY_EXEC_PAUSE_LOOP_EXITING |
ad756a16 2582 SECONDARY_EXEC_RDTSCP |
83d4c286 2583 SECONDARY_EXEC_ENABLE_INVPCID |
c7c9c56c
YZ
2584 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2585 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
d56f546d
SY
2586 if (adjust_vmx_controls(min2, opt2,
2587 MSR_IA32_VMX_PROCBASED_CTLS2,
f78e0e2e
SY
2588 &_cpu_based_2nd_exec_control) < 0)
2589 return -EIO;
2590 }
2591#ifndef CONFIG_X86_64
2592 if (!(_cpu_based_2nd_exec_control &
2593 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2594 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2595#endif
83d4c286
YZ
2596
2597 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2598 _cpu_based_2nd_exec_control &= ~(
8d14695f 2599 SECONDARY_EXEC_APIC_REGISTER_VIRT |
c7c9c56c
YZ
2600 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2601 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
83d4c286 2602
d56f546d 2603 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
a7052897
MT
2604 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
2605 enabled */
5fff7d27
GN
2606 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
2607 CPU_BASED_CR3_STORE_EXITING |
2608 CPU_BASED_INVLPG_EXITING);
d56f546d
SY
2609 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
2610 vmx_capability.ept, vmx_capability.vpid);
2611 }
1c3d14fe
YS
2612
2613 min = 0;
2614#ifdef CONFIG_X86_64
2615 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
2616#endif
468d472f 2617 opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
1c3d14fe
YS
2618 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
2619 &_vmexit_control) < 0)
002c7f7c 2620 return -EIO;
1c3d14fe 2621
468d472f
SY
2622 min = 0;
2623 opt = VM_ENTRY_LOAD_IA32_PAT;
1c3d14fe
YS
2624 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
2625 &_vmentry_control) < 0)
002c7f7c 2626 return -EIO;
6aa8b732 2627
c68876fd 2628 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
1c3d14fe
YS
2629
2630 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2631 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
002c7f7c 2632 return -EIO;
1c3d14fe
YS
2633
2634#ifdef CONFIG_X86_64
2635 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2636 if (vmx_msr_high & (1u<<16))
002c7f7c 2637 return -EIO;
1c3d14fe
YS
2638#endif
2639
2640 /* Require Write-Back (WB) memory type for VMCS accesses. */
2641 if (((vmx_msr_high >> 18) & 15) != 6)
002c7f7c 2642 return -EIO;
1c3d14fe 2643
002c7f7c
YS
2644 vmcs_conf->size = vmx_msr_high & 0x1fff;
2645 vmcs_conf->order = get_order(vmcs_config.size);
2646 vmcs_conf->revision_id = vmx_msr_low;
1c3d14fe 2647
002c7f7c
YS
2648 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2649 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
f78e0e2e 2650 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
002c7f7c
YS
2651 vmcs_conf->vmexit_ctrl = _vmexit_control;
2652 vmcs_conf->vmentry_ctrl = _vmentry_control;
1c3d14fe 2653
110312c8
AK
2654 cpu_has_load_ia32_efer =
2655 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2656 VM_ENTRY_LOAD_IA32_EFER)
2657 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2658 VM_EXIT_LOAD_IA32_EFER);
2659
8bf00a52
GN
2660 cpu_has_load_perf_global_ctrl =
2661 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2662 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
2663 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2664 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2665
2666 /*
2667 * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
2668 * but due to arrata below it can't be used. Workaround is to use
2669 * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2670 *
2671 * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
2672 *
2673 * AAK155 (model 26)
2674 * AAP115 (model 30)
2675 * AAT100 (model 37)
2676 * BC86,AAY89,BD102 (model 44)
2677 * BA97 (model 46)
2678 *
2679 */
2680 if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
2681 switch (boot_cpu_data.x86_model) {
2682 case 26:
2683 case 30:
2684 case 37:
2685 case 44:
2686 case 46:
2687 cpu_has_load_perf_global_ctrl = false;
2688 printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2689 "does not work properly. Using workaround\n");
2690 break;
2691 default:
2692 break;
2693 }
2694 }
2695
1c3d14fe 2696 return 0;
c68876fd 2697}
6aa8b732
AK
2698
2699static struct vmcs *alloc_vmcs_cpu(int cpu)
2700{
2701 int node = cpu_to_node(cpu);
2702 struct page *pages;
2703 struct vmcs *vmcs;
2704
6484eb3e 2705 pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
6aa8b732
AK
2706 if (!pages)
2707 return NULL;
2708 vmcs = page_address(pages);
1c3d14fe
YS
2709 memset(vmcs, 0, vmcs_config.size);
2710 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
6aa8b732
AK
2711 return vmcs;
2712}
2713
2714static struct vmcs *alloc_vmcs(void)
2715{
d3b2c338 2716 return alloc_vmcs_cpu(raw_smp_processor_id());
6aa8b732
AK
2717}
2718
2719static void free_vmcs(struct vmcs *vmcs)
2720{
1c3d14fe 2721 free_pages((unsigned long)vmcs, vmcs_config.order);
6aa8b732
AK
2722}
2723
d462b819
NHE
2724/*
2725 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2726 */
2727static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2728{
2729 if (!loaded_vmcs->vmcs)
2730 return;
2731 loaded_vmcs_clear(loaded_vmcs);
2732 free_vmcs(loaded_vmcs->vmcs);
2733 loaded_vmcs->vmcs = NULL;
2734}
2735
39959588 2736static void free_kvm_area(void)
6aa8b732
AK
2737{
2738 int cpu;
2739
3230bb47 2740 for_each_possible_cpu(cpu) {
6aa8b732 2741 free_vmcs(per_cpu(vmxarea, cpu));
3230bb47
ZA
2742 per_cpu(vmxarea, cpu) = NULL;
2743 }
6aa8b732
AK
2744}
2745
6aa8b732
AK
2746static __init int alloc_kvm_area(void)
2747{
2748 int cpu;
2749
3230bb47 2750 for_each_possible_cpu(cpu) {
6aa8b732
AK
2751 struct vmcs *vmcs;
2752
2753 vmcs = alloc_vmcs_cpu(cpu);
2754 if (!vmcs) {
2755 free_kvm_area();
2756 return -ENOMEM;
2757 }
2758
2759 per_cpu(vmxarea, cpu) = vmcs;
2760 }
2761 return 0;
2762}
2763
2764static __init int hardware_setup(void)
2765{
002c7f7c
YS
2766 if (setup_vmcs_config(&vmcs_config) < 0)
2767 return -EIO;
50a37eb4
JR
2768
2769 if (boot_cpu_has(X86_FEATURE_NX))
2770 kvm_enable_efer_bits(EFER_NX);
2771
93ba03c2
SY
2772 if (!cpu_has_vmx_vpid())
2773 enable_vpid = 0;
2774
4bc9b982
SY
2775 if (!cpu_has_vmx_ept() ||
2776 !cpu_has_vmx_ept_4levels()) {
93ba03c2 2777 enable_ept = 0;
3a624e29 2778 enable_unrestricted_guest = 0;
83c3a331 2779 enable_ept_ad_bits = 0;
3a624e29
NK
2780 }
2781
83c3a331
XH
2782 if (!cpu_has_vmx_ept_ad_bits())
2783 enable_ept_ad_bits = 0;
2784
3a624e29
NK
2785 if (!cpu_has_vmx_unrestricted_guest())
2786 enable_unrestricted_guest = 0;
93ba03c2
SY
2787
2788 if (!cpu_has_vmx_flexpriority())
2789 flexpriority_enabled = 0;
2790
95ba8273
GN
2791 if (!cpu_has_vmx_tpr_shadow())
2792 kvm_x86_ops->update_cr8_intercept = NULL;
2793
54dee993
MT
2794 if (enable_ept && !cpu_has_vmx_ept_2m_page())
2795 kvm_disable_largepages();
2796
4b8d54f9
ZE
2797 if (!cpu_has_vmx_ple())
2798 ple_gap = 0;
2799
c7c9c56c
YZ
2800 if (!cpu_has_vmx_apic_register_virt() ||
2801 !cpu_has_vmx_virtual_intr_delivery())
2802 enable_apicv_reg_vid = 0;
2803
2804 if (enable_apicv_reg_vid)
2805 kvm_x86_ops->update_cr8_intercept = NULL;
2806 else
2807 kvm_x86_ops->hwapic_irr_update = NULL;
83d4c286 2808
b87a51ae
NHE
2809 if (nested)
2810 nested_vmx_setup_ctls_msrs();
2811
6aa8b732
AK
2812 return alloc_kvm_area();
2813}
2814
2815static __exit void hardware_unsetup(void)
2816{
2817 free_kvm_area();
2818}
2819
14168786
GN
2820static bool emulation_required(struct kvm_vcpu *vcpu)
2821{
2822 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2823}
2824
91b0aa2c 2825static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
d99e4152 2826 struct kvm_segment *save)
6aa8b732 2827{
d99e4152
GN
2828 if (!emulate_invalid_guest_state) {
2829 /*
2830 * CS and SS RPL should be equal during guest entry according
2831 * to VMX spec, but in reality it is not always so. Since vcpu
2832 * is in the middle of the transition from real mode to
2833 * protected mode it is safe to assume that RPL 0 is a good
2834 * default value.
2835 */
2836 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
2837 save->selector &= ~SELECTOR_RPL_MASK;
2838 save->dpl = save->selector & SELECTOR_RPL_MASK;
2839 save->s = 1;
6aa8b732 2840 }
d99e4152 2841 vmx_set_segment(vcpu, save, seg);
6aa8b732
AK
2842}
2843
2844static void enter_pmode(struct kvm_vcpu *vcpu)
2845{
2846 unsigned long flags;
a89a8fb9 2847 struct vcpu_vmx *vmx = to_vmx(vcpu);
6aa8b732 2848
d99e4152
GN
2849 /*
2850 * Update real mode segment cache. It may be not up-to-date if sement
2851 * register was written while vcpu was in a guest mode.
2852 */
2853 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
2854 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
2855 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
2856 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
2857 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
2858 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
2859
7ffd92c5 2860 vmx->rmode.vm86_active = 0;
6aa8b732 2861
2fb92db1
AK
2862 vmx_segment_cache_clear(vmx);
2863
f5f7b2fe 2864 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
6aa8b732
AK
2865
2866 flags = vmcs_readl(GUEST_RFLAGS);
78ac8b47
AK
2867 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2868 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
6aa8b732
AK
2869 vmcs_writel(GUEST_RFLAGS, flags);
2870
66aee91a
RR
2871 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
2872 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
6aa8b732
AK
2873
2874 update_exception_bitmap(vcpu);
2875
91b0aa2c
GN
2876 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
2877 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
2878 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
2879 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
2880 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
2881 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
1f3141e8
GN
2882
2883 /* CPL is always 0 when CPU enters protected mode */
2884 __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
2885 vmx->cpl = 0;
6aa8b732
AK
2886}
2887
d77c26fc 2888static gva_t rmode_tss_base(struct kvm *kvm)
6aa8b732 2889{
bfc6d222 2890 if (!kvm->arch.tss_addr) {
bc6678a3 2891 struct kvm_memslots *slots;
28a37544 2892 struct kvm_memory_slot *slot;
bc6678a3
MT
2893 gfn_t base_gfn;
2894
90d83dc3 2895 slots = kvm_memslots(kvm);
28a37544
XG
2896 slot = id_to_memslot(slots, 0);
2897 base_gfn = slot->base_gfn + slot->npages - 3;
2898
cbc94022
IE
2899 return base_gfn << PAGE_SHIFT;
2900 }
bfc6d222 2901 return kvm->arch.tss_addr;
6aa8b732
AK
2902}
2903
f5f7b2fe 2904static void fix_rmode_seg(int seg, struct kvm_segment *save)
6aa8b732 2905{
772e0318 2906 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
d99e4152
GN
2907 struct kvm_segment var = *save;
2908
2909 var.dpl = 0x3;
2910 if (seg == VCPU_SREG_CS)
2911 var.type = 0x3;
2912
2913 if (!emulate_invalid_guest_state) {
2914 var.selector = var.base >> 4;
2915 var.base = var.base & 0xffff0;
2916 var.limit = 0xffff;
2917 var.g = 0;
2918 var.db = 0;
2919 var.present = 1;
2920 var.s = 1;
2921 var.l = 0;
2922 var.unusable = 0;
2923 var.type = 0x3;
2924 var.avl = 0;
2925 if (save->base & 0xf)
2926 printk_once(KERN_WARNING "kvm: segment base is not "
2927 "paragraph aligned when entering "
2928 "protected mode (seg=%d)", seg);
2929 }
6aa8b732 2930
d99e4152
GN
2931 vmcs_write16(sf->selector, var.selector);
2932 vmcs_write32(sf->base, var.base);
2933 vmcs_write32(sf->limit, var.limit);
2934 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
6aa8b732
AK
2935}
2936
2937static void enter_rmode(struct kvm_vcpu *vcpu)
2938{
2939 unsigned long flags;
a89a8fb9 2940 struct vcpu_vmx *vmx = to_vmx(vcpu);
6aa8b732 2941
f5f7b2fe
AK
2942 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
2943 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
2944 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
2945 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
2946 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
c6ad1153
GN
2947 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
2948 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
f5f7b2fe 2949
7ffd92c5 2950 vmx->rmode.vm86_active = 1;
6aa8b732 2951
776e58ea
GN
2952 /*
2953 * Very old userspace does not call KVM_SET_TSS_ADDR before entering
2954 * vcpu. Call it here with phys address pointing 16M below 4G.
2955 */
2956 if (!vcpu->kvm->arch.tss_addr) {
2957 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
2958 "called before entering vcpu\n");
2959 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2960 vmx_set_tss_addr(vcpu->kvm, 0xfeffd000);
2961 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2962 }
2963
2fb92db1
AK
2964 vmx_segment_cache_clear(vmx);
2965
6aa8b732 2966 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
6aa8b732 2967 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
6aa8b732
AK
2968 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
2969
2970 flags = vmcs_readl(GUEST_RFLAGS);
78ac8b47 2971 vmx->rmode.save_rflags = flags;
6aa8b732 2972
053de044 2973 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
6aa8b732
AK
2974
2975 vmcs_writel(GUEST_RFLAGS, flags);
66aee91a 2976 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
6aa8b732
AK
2977 update_exception_bitmap(vcpu);
2978
d99e4152
GN
2979 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
2980 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
2981 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
2982 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
2983 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
2984 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
b246dd5d 2985
8668a3c4 2986 kvm_mmu_reset_context(vcpu);
6aa8b732
AK
2987}
2988
401d10de
AS
2989static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
2990{
2991 struct vcpu_vmx *vmx = to_vmx(vcpu);
26bb0981
AK
2992 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
2993
2994 if (!msr)
2995 return;
401d10de 2996
44ea2b17
AK
2997 /*
2998 * Force kernel_gs_base reloading before EFER changes, as control
2999 * of this msr depends on is_long_mode().
3000 */
3001 vmx_load_host_state(to_vmx(vcpu));
f6801dff 3002 vcpu->arch.efer = efer;
401d10de
AS
3003 if (efer & EFER_LMA) {
3004 vmcs_write32(VM_ENTRY_CONTROLS,
3005 vmcs_read32(VM_ENTRY_CONTROLS) |
3006 VM_ENTRY_IA32E_MODE);
3007 msr->data = efer;
3008 } else {
3009 vmcs_write32(VM_ENTRY_CONTROLS,
3010 vmcs_read32(VM_ENTRY_CONTROLS) &
3011 ~VM_ENTRY_IA32E_MODE);
3012
3013 msr->data = efer & ~EFER_LME;
3014 }
3015 setup_msrs(vmx);
3016}
3017
05b3e0c2 3018#ifdef CONFIG_X86_64
6aa8b732
AK
3019
3020static void enter_lmode(struct kvm_vcpu *vcpu)
3021{
3022 u32 guest_tr_ar;
3023
2fb92db1
AK
3024 vmx_segment_cache_clear(to_vmx(vcpu));
3025
6aa8b732
AK
3026 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3027 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
bd80158a
JK
3028 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3029 __func__);
6aa8b732
AK
3030 vmcs_write32(GUEST_TR_AR_BYTES,
3031 (guest_tr_ar & ~AR_TYPE_MASK)
3032 | AR_TYPE_BUSY_64_TSS);
3033 }
da38f438 3034 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
6aa8b732
AK
3035}
3036
3037static void exit_lmode(struct kvm_vcpu *vcpu)
3038{
6aa8b732
AK
3039 vmcs_write32(VM_ENTRY_CONTROLS,
3040 vmcs_read32(VM_ENTRY_CONTROLS)
1e4e6e00 3041 & ~VM_ENTRY_IA32E_MODE);
da38f438 3042 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
6aa8b732
AK
3043}
3044
3045#endif
3046
2384d2b3
SY
3047static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
3048{
b9d762fa 3049 vpid_sync_context(to_vmx(vcpu));
dd180b3e
XG
3050 if (enable_ept) {
3051 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3052 return;
4e1096d2 3053 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
dd180b3e 3054 }
2384d2b3
SY
3055}
3056
e8467fda
AK
3057static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
3058{
3059 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
3060
3061 vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
3062 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
3063}
3064
aff48baa
AK
3065static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
3066{
3067 if (enable_ept && is_paging(vcpu))
3068 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3069 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3070}
3071
25c4c276 3072static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
399badf3 3073{
fc78f519
AK
3074 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
3075
3076 vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
3077 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
399badf3
AK
3078}
3079
1439442c
SY
3080static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3081{
6de4f3ad
AK
3082 if (!test_bit(VCPU_EXREG_PDPTR,
3083 (unsigned long *)&vcpu->arch.regs_dirty))
3084 return;
3085
1439442c 3086 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
ff03a073
JR
3087 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
3088 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
3089 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
3090 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
1439442c
SY
3091 }
3092}
3093
8f5d549f
AK
3094static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3095{
3096 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
ff03a073
JR
3097 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3098 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3099 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3100 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
8f5d549f 3101 }
6de4f3ad
AK
3102
3103 __set_bit(VCPU_EXREG_PDPTR,
3104 (unsigned long *)&vcpu->arch.regs_avail);
3105 __set_bit(VCPU_EXREG_PDPTR,
3106 (unsigned long *)&vcpu->arch.regs_dirty);
8f5d549f
AK
3107}
3108
5e1746d6 3109static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1439442c
SY
3110
3111static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
3112 unsigned long cr0,
3113 struct kvm_vcpu *vcpu)
3114{
5233dd51
MT
3115 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3116 vmx_decache_cr3(vcpu);
1439442c
SY
3117 if (!(cr0 & X86_CR0_PG)) {
3118 /* From paging/starting to nonpaging */
3119 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
65267ea1 3120 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
1439442c
SY
3121 (CPU_BASED_CR3_LOAD_EXITING |
3122 CPU_BASED_CR3_STORE_EXITING));
3123 vcpu->arch.cr0 = cr0;
fc78f519 3124 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
1439442c
SY
3125 } else if (!is_paging(vcpu)) {
3126 /* From nonpaging to paging */
3127 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
65267ea1 3128 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
1439442c
SY
3129 ~(CPU_BASED_CR3_LOAD_EXITING |
3130 CPU_BASED_CR3_STORE_EXITING));
3131 vcpu->arch.cr0 = cr0;
fc78f519 3132 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
1439442c 3133 }
95eb84a7
SY
3134
3135 if (!(cr0 & X86_CR0_WP))
3136 *hw_cr0 &= ~X86_CR0_WP;
1439442c
SY
3137}
3138
6aa8b732
AK
3139static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3140{
7ffd92c5 3141 struct vcpu_vmx *vmx = to_vmx(vcpu);
3a624e29
NK
3142 unsigned long hw_cr0;
3143
5037878e 3144 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
3a624e29 3145 if (enable_unrestricted_guest)
5037878e 3146 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
218e763f 3147 else {
5037878e 3148 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
1439442c 3149
218e763f
GN
3150 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3151 enter_pmode(vcpu);
6aa8b732 3152
218e763f
GN
3153 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3154 enter_rmode(vcpu);
3155 }
6aa8b732 3156
05b3e0c2 3157#ifdef CONFIG_X86_64
f6801dff 3158 if (vcpu->arch.efer & EFER_LME) {
707d92fa 3159 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
6aa8b732 3160 enter_lmode(vcpu);
707d92fa 3161 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
6aa8b732
AK
3162 exit_lmode(vcpu);
3163 }
3164#endif
3165
089d034e 3166 if (enable_ept)
1439442c
SY
3167 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
3168
02daab21 3169 if (!vcpu->fpu_active)
81231c69 3170 hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
02daab21 3171
6aa8b732 3172 vmcs_writel(CR0_READ_SHADOW, cr0);
1439442c 3173 vmcs_writel(GUEST_CR0, hw_cr0);
ad312c7c 3174 vcpu->arch.cr0 = cr0;
14168786
GN
3175
3176 /* depends on vcpu->arch.cr0 to be set to a new value */
3177 vmx->emulation_required = emulation_required(vcpu);
6aa8b732
AK
3178}
3179
1439442c
SY
3180static u64 construct_eptp(unsigned long root_hpa)
3181{
3182 u64 eptp;
3183
3184 /* TODO write the value reading from MSR */
3185 eptp = VMX_EPT_DEFAULT_MT |
3186 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
b38f9934
XH
3187 if (enable_ept_ad_bits)
3188 eptp |= VMX_EPT_AD_ENABLE_BIT;
1439442c
SY
3189 eptp |= (root_hpa & PAGE_MASK);
3190
3191 return eptp;
3192}
3193
6aa8b732
AK
3194static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
3195{
1439442c
SY
3196 unsigned long guest_cr3;
3197 u64 eptp;
3198
3199 guest_cr3 = cr3;
089d034e 3200 if (enable_ept) {
1439442c
SY
3201 eptp = construct_eptp(cr3);
3202 vmcs_write64(EPT_POINTER, eptp);
9f8fe504 3203 guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
b927a3ce 3204 vcpu->kvm->arch.ept_identity_map_addr;
7c93be44 3205 ept_load_pdptrs(vcpu);
1439442c
SY
3206 }
3207
2384d2b3 3208 vmx_flush_tlb(vcpu);
1439442c 3209 vmcs_writel(GUEST_CR3, guest_cr3);
6aa8b732
AK
3210}
3211
5e1746d6 3212static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
6aa8b732 3213{
7ffd92c5 3214 unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
1439442c
SY
3215 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
3216
5e1746d6
NHE
3217 if (cr4 & X86_CR4_VMXE) {
3218 /*
3219 * To use VMXON (and later other VMX instructions), a guest
3220 * must first be able to turn on cr4.VMXE (see handle_vmon()).
3221 * So basically the check on whether to allow nested VMX
3222 * is here.
3223 */
3224 if (!nested_vmx_allowed(vcpu))
3225 return 1;
1a0d74e6
JK
3226 }
3227 if (to_vmx(vcpu)->nested.vmxon &&
3228 ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
5e1746d6
NHE
3229 return 1;
3230
ad312c7c 3231 vcpu->arch.cr4 = cr4;
bc23008b
AK
3232 if (enable_ept) {
3233 if (!is_paging(vcpu)) {
3234 hw_cr4 &= ~X86_CR4_PAE;
3235 hw_cr4 |= X86_CR4_PSE;
c08800a5
DX
3236 /*
3237 * SMEP is disabled if CPU is in non-paging mode in
3238 * hardware. However KVM always uses paging mode to
3239 * emulate guest non-paging mode with TDP.
3240 * To emulate this behavior, SMEP needs to be manually
3241 * disabled when guest switches to non-paging mode.
3242 */
3243 hw_cr4 &= ~X86_CR4_SMEP;
bc23008b
AK
3244 } else if (!(cr4 & X86_CR4_PAE)) {
3245 hw_cr4 &= ~X86_CR4_PAE;
3246 }
3247 }
1439442c
SY
3248
3249 vmcs_writel(CR4_READ_SHADOW, cr4);
3250 vmcs_writel(GUEST_CR4, hw_cr4);
5e1746d6 3251 return 0;
6aa8b732
AK
3252}
3253
6aa8b732
AK
3254static void vmx_get_segment(struct kvm_vcpu *vcpu,
3255 struct kvm_segment *var, int seg)
3256{
a9179499 3257 struct vcpu_vmx *vmx = to_vmx(vcpu);
6aa8b732
AK
3258 u32 ar;
3259
c6ad1153 3260 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
f5f7b2fe 3261 *var = vmx->rmode.segs[seg];
a9179499 3262 if (seg == VCPU_SREG_TR
2fb92db1 3263 || var->selector == vmx_read_guest_seg_selector(vmx, seg))
f5f7b2fe 3264 return;
1390a28b
AK
3265 var->base = vmx_read_guest_seg_base(vmx, seg);
3266 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3267 return;
a9179499 3268 }
2fb92db1
AK
3269 var->base = vmx_read_guest_seg_base(vmx, seg);
3270 var->limit = vmx_read_guest_seg_limit(vmx, seg);
3271 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3272 ar = vmx_read_guest_seg_ar(vmx, seg);
6aa8b732
AK
3273 var->type = ar & 15;
3274 var->s = (ar >> 4) & 1;
3275 var->dpl = (ar >> 5) & 3;
3276 var->present = (ar >> 7) & 1;
3277 var->avl = (ar >> 12) & 1;
3278 var->l = (ar >> 13) & 1;
3279 var->db = (ar >> 14) & 1;
3280 var->g = (ar >> 15) & 1;
3281 var->unusable = (ar >> 16) & 1;
3282}
3283
a9179499
AK
3284static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3285{
a9179499
AK
3286 struct kvm_segment s;
3287
3288 if (to_vmx(vcpu)->rmode.vm86_active) {
3289 vmx_get_segment(vcpu, &s, seg);
3290 return s.base;
3291 }
2fb92db1 3292 return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
a9179499
AK
3293}
3294
b09408d0 3295static int vmx_get_cpl(struct kvm_vcpu *vcpu)
2e4d2653 3296{
b09408d0
MT
3297 struct vcpu_vmx *vmx = to_vmx(vcpu);
3298
3eeb3288 3299 if (!is_protmode(vcpu))
2e4d2653
IE
3300 return 0;
3301
f4c63e5d
AK
3302 if (!is_long_mode(vcpu)
3303 && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */
2e4d2653
IE
3304 return 3;
3305
69c73028
AK
3306 if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
3307 __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
b09408d0 3308 vmx->cpl = vmx_read_guest_seg_selector(vmx, VCPU_SREG_CS) & 3;
69c73028 3309 }
d881e6f6
AK
3310
3311 return vmx->cpl;
69c73028
AK
3312}
3313
3314
653e3108 3315static u32 vmx_segment_access_rights(struct kvm_segment *var)
6aa8b732 3316{
6aa8b732
AK
3317 u32 ar;
3318
f0495f9b 3319 if (var->unusable || !var->present)
6aa8b732
AK
3320 ar = 1 << 16;
3321 else {
3322 ar = var->type & 15;
3323 ar |= (var->s & 1) << 4;
3324 ar |= (var->dpl & 3) << 5;
3325 ar |= (var->present & 1) << 7;
3326 ar |= (var->avl & 1) << 12;
3327 ar |= (var->l & 1) << 13;
3328 ar |= (var->db & 1) << 14;
3329 ar |= (var->g & 1) << 15;
3330 }
653e3108
AK
3331
3332 return ar;
3333}
3334
3335static void vmx_set_segment(struct kvm_vcpu *vcpu,
3336 struct kvm_segment *var, int seg)
3337{
7ffd92c5 3338 struct vcpu_vmx *vmx = to_vmx(vcpu);
772e0318 3339 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
653e3108 3340
2fb92db1 3341 vmx_segment_cache_clear(vmx);
2f143240
GN
3342 if (seg == VCPU_SREG_CS)
3343 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
2fb92db1 3344
1ecd50a9
GN
3345 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3346 vmx->rmode.segs[seg] = *var;
3347 if (seg == VCPU_SREG_TR)
3348 vmcs_write16(sf->selector, var->selector);
3349 else if (var->s)
3350 fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
d99e4152 3351 goto out;
653e3108 3352 }
1ecd50a9 3353
653e3108
AK
3354 vmcs_writel(sf->base, var->base);
3355 vmcs_write32(sf->limit, var->limit);
3356 vmcs_write16(sf->selector, var->selector);
3a624e29
NK
3357
3358 /*
3359 * Fix the "Accessed" bit in AR field of segment registers for older
3360 * qemu binaries.
3361 * IA32 arch specifies that at the time of processor reset the
3362 * "Accessed" bit in the AR field of segment registers is 1. And qemu
0fa06071 3363 * is setting it to 0 in the userland code. This causes invalid guest
3a624e29
NK
3364 * state vmexit when "unrestricted guest" mode is turned on.
3365 * Fix for this setup issue in cpu_reset is being pushed in the qemu
3366 * tree. Newer qemu binaries with that qemu fix would not need this
3367 * kvm hack.
3368 */
3369 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
f924d66d 3370 var->type |= 0x1; /* Accessed */
3a624e29 3371
f924d66d 3372 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
d99e4152
GN
3373
3374out:
14168786 3375 vmx->emulation_required |= emulation_required(vcpu);
6aa8b732
AK
3376}
3377
6aa8b732
AK
3378static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3379{
2fb92db1 3380 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
6aa8b732
AK
3381
3382 *db = (ar >> 14) & 1;
3383 *l = (ar >> 13) & 1;
3384}
3385
89a27f4d 3386static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 3387{
89a27f4d
GN
3388 dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3389 dt->address = vmcs_readl(GUEST_IDTR_BASE);
6aa8b732
AK
3390}
3391
89a27f4d 3392static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 3393{
89a27f4d
GN
3394 vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3395 vmcs_writel(GUEST_IDTR_BASE, dt->address);
6aa8b732
AK
3396}
3397
89a27f4d 3398static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 3399{
89a27f4d
GN
3400 dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3401 dt->address = vmcs_readl(GUEST_GDTR_BASE);
6aa8b732
AK
3402}
3403
89a27f4d 3404static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 3405{
89a27f4d
GN
3406 vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3407 vmcs_writel(GUEST_GDTR_BASE, dt->address);
6aa8b732
AK
3408}
3409
648dfaa7
MG
3410static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3411{
3412 struct kvm_segment var;
3413 u32 ar;
3414
3415 vmx_get_segment(vcpu, &var, seg);
07f42f5f 3416 var.dpl = 0x3;
0647f4aa
GN
3417 if (seg == VCPU_SREG_CS)
3418 var.type = 0x3;
648dfaa7
MG
3419 ar = vmx_segment_access_rights(&var);
3420
3421 if (var.base != (var.selector << 4))
3422 return false;
89efbed0 3423 if (var.limit != 0xffff)
648dfaa7 3424 return false;
07f42f5f 3425 if (ar != 0xf3)
648dfaa7
MG
3426 return false;
3427
3428 return true;
3429}
3430
3431static bool code_segment_valid(struct kvm_vcpu *vcpu)
3432{
3433 struct kvm_segment cs;
3434 unsigned int cs_rpl;
3435
3436 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3437 cs_rpl = cs.selector & SELECTOR_RPL_MASK;
3438
1872a3f4
AK
3439 if (cs.unusable)
3440 return false;
648dfaa7
MG
3441 if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
3442 return false;
3443 if (!cs.s)
3444 return false;
1872a3f4 3445 if (cs.type & AR_TYPE_WRITEABLE_MASK) {
648dfaa7
MG
3446 if (cs.dpl > cs_rpl)
3447 return false;
1872a3f4 3448 } else {
648dfaa7
MG
3449 if (cs.dpl != cs_rpl)
3450 return false;
3451 }
3452 if (!cs.present)
3453 return false;
3454
3455 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3456 return true;
3457}
3458
3459static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3460{
3461 struct kvm_segment ss;
3462 unsigned int ss_rpl;
3463
3464 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3465 ss_rpl = ss.selector & SELECTOR_RPL_MASK;
3466
1872a3f4
AK
3467 if (ss.unusable)
3468 return true;
3469 if (ss.type != 3 && ss.type != 7)
648dfaa7
MG
3470 return false;
3471 if (!ss.s)
3472 return false;
3473 if (ss.dpl != ss_rpl) /* DPL != RPL */
3474 return false;
3475 if (!ss.present)
3476 return false;
3477
3478 return true;
3479}
3480
3481static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3482{
3483 struct kvm_segment var;
3484 unsigned int rpl;
3485
3486 vmx_get_segment(vcpu, &var, seg);
3487 rpl = var.selector & SELECTOR_RPL_MASK;
3488
1872a3f4
AK
3489 if (var.unusable)
3490 return true;
648dfaa7
MG
3491 if (!var.s)
3492 return false;
3493 if (!var.present)
3494 return false;
3495 if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
3496 if (var.dpl < rpl) /* DPL < RPL */
3497 return false;
3498 }
3499
3500 /* TODO: Add other members to kvm_segment_field to allow checking for other access
3501 * rights flags
3502 */
3503 return true;
3504}
3505
3506static bool tr_valid(struct kvm_vcpu *vcpu)
3507{
3508 struct kvm_segment tr;
3509
3510 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3511
1872a3f4
AK
3512 if (tr.unusable)
3513 return false;
648dfaa7
MG
3514 if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
3515 return false;
1872a3f4 3516 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
648dfaa7
MG
3517 return false;
3518 if (!tr.present)
3519 return false;
3520
3521 return true;
3522}
3523
3524static bool ldtr_valid(struct kvm_vcpu *vcpu)
3525{
3526 struct kvm_segment ldtr;
3527
3528 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3529
1872a3f4
AK
3530 if (ldtr.unusable)
3531 return true;
648dfaa7
MG
3532 if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
3533 return false;
3534 if (ldtr.type != 2)
3535 return false;
3536 if (!ldtr.present)
3537 return false;
3538
3539 return true;
3540}
3541
3542static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3543{
3544 struct kvm_segment cs, ss;
3545
3546 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3547 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3548
3549 return ((cs.selector & SELECTOR_RPL_MASK) ==
3550 (ss.selector & SELECTOR_RPL_MASK));
3551}
3552
3553/*
3554 * Check if guest state is valid. Returns true if valid, false if
3555 * not.
3556 * We assume that registers are always usable
3557 */
3558static bool guest_state_valid(struct kvm_vcpu *vcpu)
3559{
c5e97c80
GN
3560 if (enable_unrestricted_guest)
3561 return true;
3562
648dfaa7 3563 /* real mode guest state checks */
3eeb3288 3564 if (!is_protmode(vcpu)) {
648dfaa7
MG
3565 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3566 return false;
3567 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3568 return false;
3569 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3570 return false;
3571 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3572 return false;
3573 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3574 return false;
3575 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3576 return false;
3577 } else {
3578 /* protected mode guest state checks */
3579 if (!cs_ss_rpl_check(vcpu))
3580 return false;
3581 if (!code_segment_valid(vcpu))
3582 return false;
3583 if (!stack_segment_valid(vcpu))
3584 return false;
3585 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3586 return false;
3587 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3588 return false;
3589 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3590 return false;
3591 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3592 return false;
3593 if (!tr_valid(vcpu))
3594 return false;
3595 if (!ldtr_valid(vcpu))
3596 return false;
3597 }
3598 /* TODO:
3599 * - Add checks on RIP
3600 * - Add checks on RFLAGS
3601 */
3602
3603 return true;
3604}
3605
d77c26fc 3606static int init_rmode_tss(struct kvm *kvm)
6aa8b732 3607{
40dcaa9f 3608 gfn_t fn;
195aefde 3609 u16 data = 0;
40dcaa9f 3610 int r, idx, ret = 0;
6aa8b732 3611
40dcaa9f
XG
3612 idx = srcu_read_lock(&kvm->srcu);
3613 fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
195aefde
IE
3614 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3615 if (r < 0)
10589a46 3616 goto out;
195aefde 3617 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
464d17c8
SY
3618 r = kvm_write_guest_page(kvm, fn++, &data,
3619 TSS_IOPB_BASE_OFFSET, sizeof(u16));
195aefde 3620 if (r < 0)
10589a46 3621 goto out;
195aefde
IE
3622 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
3623 if (r < 0)
10589a46 3624 goto out;
195aefde
IE
3625 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3626 if (r < 0)
10589a46 3627 goto out;
195aefde 3628 data = ~0;
10589a46
MT
3629 r = kvm_write_guest_page(kvm, fn, &data,
3630 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
3631 sizeof(u8));
195aefde 3632 if (r < 0)
10589a46
MT
3633 goto out;
3634
3635 ret = 1;
3636out:
40dcaa9f 3637 srcu_read_unlock(&kvm->srcu, idx);
10589a46 3638 return ret;
6aa8b732
AK
3639}
3640
b7ebfb05
SY
3641static int init_rmode_identity_map(struct kvm *kvm)
3642{
40dcaa9f 3643 int i, idx, r, ret;
b7ebfb05
SY
3644 pfn_t identity_map_pfn;
3645 u32 tmp;
3646
089d034e 3647 if (!enable_ept)
b7ebfb05
SY
3648 return 1;
3649 if (unlikely(!kvm->arch.ept_identity_pagetable)) {
3650 printk(KERN_ERR "EPT: identity-mapping pagetable "
3651 "haven't been allocated!\n");
3652 return 0;
3653 }
3654 if (likely(kvm->arch.ept_identity_pagetable_done))
3655 return 1;
3656 ret = 0;
b927a3ce 3657 identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
40dcaa9f 3658 idx = srcu_read_lock(&kvm->srcu);
b7ebfb05
SY
3659 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
3660 if (r < 0)
3661 goto out;
3662 /* Set up identity-mapping pagetable for EPT in real mode */
3663 for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
3664 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
3665 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
3666 r = kvm_write_guest_page(kvm, identity_map_pfn,
3667 &tmp, i * sizeof(tmp), sizeof(tmp));
3668 if (r < 0)
3669 goto out;
3670 }
3671 kvm->arch.ept_identity_pagetable_done = true;
3672 ret = 1;
3673out:
40dcaa9f 3674 srcu_read_unlock(&kvm->srcu, idx);
b7ebfb05
SY
3675 return ret;
3676}
3677
6aa8b732
AK
3678static void seg_setup(int seg)
3679{
772e0318 3680 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3a624e29 3681 unsigned int ar;
6aa8b732
AK
3682
3683 vmcs_write16(sf->selector, 0);
3684 vmcs_writel(sf->base, 0);
3685 vmcs_write32(sf->limit, 0xffff);
d54d07b2
GN
3686 ar = 0x93;
3687 if (seg == VCPU_SREG_CS)
3688 ar |= 0x08; /* code segment */
3a624e29
NK
3689
3690 vmcs_write32(sf->ar_bytes, ar);
6aa8b732
AK
3691}
3692
f78e0e2e
SY
3693static int alloc_apic_access_page(struct kvm *kvm)
3694{
4484141a 3695 struct page *page;
f78e0e2e
SY
3696 struct kvm_userspace_memory_region kvm_userspace_mem;
3697 int r = 0;
3698
79fac95e 3699 mutex_lock(&kvm->slots_lock);
bfc6d222 3700 if (kvm->arch.apic_access_page)
f78e0e2e
SY
3701 goto out;
3702 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
3703 kvm_userspace_mem.flags = 0;
3704 kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
3705 kvm_userspace_mem.memory_size = PAGE_SIZE;
47ae31e2 3706 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
f78e0e2e
SY
3707 if (r)
3708 goto out;
72dc67a6 3709
4484141a
XG
3710 page = gfn_to_page(kvm, 0xfee00);
3711 if (is_error_page(page)) {
3712 r = -EFAULT;
3713 goto out;
3714 }
3715
3716 kvm->arch.apic_access_page = page;
f78e0e2e 3717out:
79fac95e 3718 mutex_unlock(&kvm->slots_lock);
f78e0e2e
SY
3719 return r;
3720}
3721
b7ebfb05
SY
3722static int alloc_identity_pagetable(struct kvm *kvm)
3723{
4484141a 3724 struct page *page;
b7ebfb05
SY
3725 struct kvm_userspace_memory_region kvm_userspace_mem;
3726 int r = 0;
3727
79fac95e 3728 mutex_lock(&kvm->slots_lock);
b7ebfb05
SY
3729 if (kvm->arch.ept_identity_pagetable)
3730 goto out;
3731 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
3732 kvm_userspace_mem.flags = 0;
b927a3ce
SY
3733 kvm_userspace_mem.guest_phys_addr =
3734 kvm->arch.ept_identity_map_addr;
b7ebfb05 3735 kvm_userspace_mem.memory_size = PAGE_SIZE;
47ae31e2 3736 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
b7ebfb05
SY
3737 if (r)
3738 goto out;
3739
4484141a
XG
3740 page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
3741 if (is_error_page(page)) {
3742 r = -EFAULT;
3743 goto out;
3744 }
3745
3746 kvm->arch.ept_identity_pagetable = page;
b7ebfb05 3747out:
79fac95e 3748 mutex_unlock(&kvm->slots_lock);
b7ebfb05
SY
3749 return r;
3750}
3751
2384d2b3
SY
3752static void allocate_vpid(struct vcpu_vmx *vmx)
3753{
3754 int vpid;
3755
3756 vmx->vpid = 0;
919818ab 3757 if (!enable_vpid)
2384d2b3
SY
3758 return;
3759 spin_lock(&vmx_vpid_lock);
3760 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3761 if (vpid < VMX_NR_VPIDS) {
3762 vmx->vpid = vpid;
3763 __set_bit(vpid, vmx_vpid_bitmap);
3764 }
3765 spin_unlock(&vmx_vpid_lock);
3766}
3767
cdbecfc3
LJ
3768static void free_vpid(struct vcpu_vmx *vmx)
3769{
3770 if (!enable_vpid)
3771 return;
3772 spin_lock(&vmx_vpid_lock);
3773 if (vmx->vpid != 0)
3774 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
3775 spin_unlock(&vmx_vpid_lock);
3776}
3777
8d14695f
YZ
3778#define MSR_TYPE_R 1
3779#define MSR_TYPE_W 2
3780static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
3781 u32 msr, int type)
25c5f225 3782{
3e7c73e9 3783 int f = sizeof(unsigned long);
25c5f225
SY
3784
3785 if (!cpu_has_vmx_msr_bitmap())
3786 return;
3787
3788 /*
3789 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
3790 * have the write-low and read-high bitmap offsets the wrong way round.
3791 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
3792 */
25c5f225 3793 if (msr <= 0x1fff) {
8d14695f
YZ
3794 if (type & MSR_TYPE_R)
3795 /* read-low */
3796 __clear_bit(msr, msr_bitmap + 0x000 / f);
3797
3798 if (type & MSR_TYPE_W)
3799 /* write-low */
3800 __clear_bit(msr, msr_bitmap + 0x800 / f);
3801
25c5f225
SY
3802 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
3803 msr &= 0x1fff;
8d14695f
YZ
3804 if (type & MSR_TYPE_R)
3805 /* read-high */
3806 __clear_bit(msr, msr_bitmap + 0x400 / f);
3807
3808 if (type & MSR_TYPE_W)
3809 /* write-high */
3810 __clear_bit(msr, msr_bitmap + 0xc00 / f);
3811
3812 }
3813}
3814
3815static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
3816 u32 msr, int type)
3817{
3818 int f = sizeof(unsigned long);
3819
3820 if (!cpu_has_vmx_msr_bitmap())
3821 return;
3822
3823 /*
3824 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
3825 * have the write-low and read-high bitmap offsets the wrong way round.
3826 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
3827 */
3828 if (msr <= 0x1fff) {
3829 if (type & MSR_TYPE_R)
3830 /* read-low */
3831 __set_bit(msr, msr_bitmap + 0x000 / f);
3832
3833 if (type & MSR_TYPE_W)
3834 /* write-low */
3835 __set_bit(msr, msr_bitmap + 0x800 / f);
3836
3837 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
3838 msr &= 0x1fff;
3839 if (type & MSR_TYPE_R)
3840 /* read-high */
3841 __set_bit(msr, msr_bitmap + 0x400 / f);
3842
3843 if (type & MSR_TYPE_W)
3844 /* write-high */
3845 __set_bit(msr, msr_bitmap + 0xc00 / f);
3846
25c5f225 3847 }
25c5f225
SY
3848}
3849
5897297b
AK
3850static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
3851{
3852 if (!longmode_only)
8d14695f
YZ
3853 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
3854 msr, MSR_TYPE_R | MSR_TYPE_W);
3855 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
3856 msr, MSR_TYPE_R | MSR_TYPE_W);
3857}
3858
3859static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
3860{
3861 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
3862 msr, MSR_TYPE_R);
3863 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
3864 msr, MSR_TYPE_R);
3865}
3866
3867static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
3868{
3869 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
3870 msr, MSR_TYPE_R);
3871 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
3872 msr, MSR_TYPE_R);
3873}
3874
3875static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
3876{
3877 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
3878 msr, MSR_TYPE_W);
3879 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
3880 msr, MSR_TYPE_W);
5897297b
AK
3881}
3882
a3a8ff8e
NHE
3883/*
3884 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
3885 * will not change in the lifetime of the guest.
3886 * Note that host-state that does change is set elsewhere. E.g., host-state
3887 * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
3888 */
3889static void vmx_set_constant_host_state(void)
3890{
3891 u32 low32, high32;
3892 unsigned long tmpl;
3893 struct desc_ptr dt;
3894
b1a74bf8 3895 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
a3a8ff8e
NHE
3896 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
3897 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
3898
3899 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
b2da15ac
AK
3900#ifdef CONFIG_X86_64
3901 /*
3902 * Load null selectors, so we can avoid reloading them in
3903 * __vmx_load_host_state(), in case userspace uses the null selectors
3904 * too (the expected case).
3905 */
3906 vmcs_write16(HOST_DS_SELECTOR, 0);
3907 vmcs_write16(HOST_ES_SELECTOR, 0);
3908#else
a3a8ff8e
NHE
3909 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
3910 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
b2da15ac 3911#endif
a3a8ff8e
NHE
3912 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
3913 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
3914
3915 native_store_idt(&dt);
3916 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
3917
83287ea4 3918 vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
a3a8ff8e
NHE
3919
3920 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
3921 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
3922 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
3923 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
3924
3925 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
3926 rdmsr(MSR_IA32_CR_PAT, low32, high32);
3927 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
3928 }
3929}
3930
bf8179a0
NHE
3931static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
3932{
3933 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
3934 if (enable_ept)
3935 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
fe3ef05c
NHE
3936 if (is_guest_mode(&vmx->vcpu))
3937 vmx->vcpu.arch.cr4_guest_owned_bits &=
3938 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
bf8179a0
NHE
3939 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
3940}
3941
3942static u32 vmx_exec_control(struct vcpu_vmx *vmx)
3943{
3944 u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
3945 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
3946 exec_control &= ~CPU_BASED_TPR_SHADOW;
3947#ifdef CONFIG_X86_64
3948 exec_control |= CPU_BASED_CR8_STORE_EXITING |
3949 CPU_BASED_CR8_LOAD_EXITING;
3950#endif
3951 }
3952 if (!enable_ept)
3953 exec_control |= CPU_BASED_CR3_STORE_EXITING |
3954 CPU_BASED_CR3_LOAD_EXITING |
3955 CPU_BASED_INVLPG_EXITING;
3956 return exec_control;
3957}
3958
c7c9c56c
YZ
3959static int vmx_vm_has_apicv(struct kvm *kvm)
3960{
3961 return enable_apicv_reg_vid && irqchip_in_kernel(kvm);
3962}
3963
bf8179a0
NHE
3964static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
3965{
3966 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
3967 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
3968 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
3969 if (vmx->vpid == 0)
3970 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
3971 if (!enable_ept) {
3972 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
3973 enable_unrestricted_guest = 0;
ad756a16
MJ
3974 /* Enable INVPCID for non-ept guests may cause performance regression. */
3975 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
bf8179a0
NHE
3976 }
3977 if (!enable_unrestricted_guest)
3978 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
3979 if (!ple_gap)
3980 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
c7c9c56c
YZ
3981 if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
3982 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
3983 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
8d14695f 3984 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
bf8179a0
NHE
3985 return exec_control;
3986}
3987
ce88decf
XG
3988static void ept_set_mmio_spte_mask(void)
3989{
3990 /*
3991 * EPT Misconfigurations can be generated if the value of bits 2:0
3992 * of an EPT paging-structure entry is 110b (write/execute).
3993 * Also, magic bits (0xffull << 49) is set to quickly identify mmio
3994 * spte.
3995 */
3996 kvm_mmu_set_mmio_spte_mask(0xffull << 49 | 0x6ull);
3997}
3998
6aa8b732
AK
3999/*
4000 * Sets up the vmcs for emulated real mode.
4001 */
8b9cf98c 4002static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
6aa8b732 4003{
2e4ce7f5 4004#ifdef CONFIG_X86_64
6aa8b732 4005 unsigned long a;
2e4ce7f5 4006#endif
6aa8b732 4007 int i;
6aa8b732 4008
6aa8b732 4009 /* I/O */
3e7c73e9
AK
4010 vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
4011 vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
6aa8b732 4012
25c5f225 4013 if (cpu_has_vmx_msr_bitmap())
5897297b 4014 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
25c5f225 4015
6aa8b732
AK
4016 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
4017
6aa8b732 4018 /* Control */
1c3d14fe
YS
4019 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
4020 vmcs_config.pin_based_exec_ctrl);
6e5d865c 4021
bf8179a0 4022 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
6aa8b732 4023
83ff3b9d 4024 if (cpu_has_secondary_exec_ctrls()) {
bf8179a0
NHE
4025 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
4026 vmx_secondary_exec_control(vmx));
83ff3b9d 4027 }
f78e0e2e 4028
c7c9c56c
YZ
4029 if (enable_apicv_reg_vid) {
4030 vmcs_write64(EOI_EXIT_BITMAP0, 0);
4031 vmcs_write64(EOI_EXIT_BITMAP1, 0);
4032 vmcs_write64(EOI_EXIT_BITMAP2, 0);
4033 vmcs_write64(EOI_EXIT_BITMAP3, 0);
4034
4035 vmcs_write16(GUEST_INTR_STATUS, 0);
4036 }
4037
4b8d54f9
ZE
4038 if (ple_gap) {
4039 vmcs_write32(PLE_GAP, ple_gap);
4040 vmcs_write32(PLE_WINDOW, ple_window);
4041 }
4042
c3707958
XG
4043 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
4044 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
6aa8b732
AK
4045 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
4046
9581d442
AK
4047 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
4048 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
a3a8ff8e 4049 vmx_set_constant_host_state();
05b3e0c2 4050#ifdef CONFIG_X86_64
6aa8b732
AK
4051 rdmsrl(MSR_FS_BASE, a);
4052 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
4053 rdmsrl(MSR_GS_BASE, a);
4054 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
4055#else
4056 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
4057 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
4058#endif
4059
2cc51560
ED
4060 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
4061 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
61d2ef2c 4062 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
2cc51560 4063 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
61d2ef2c 4064 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
6aa8b732 4065
468d472f 4066 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
a3a8ff8e
NHE
4067 u32 msr_low, msr_high;
4068 u64 host_pat;
468d472f
SY
4069 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
4070 host_pat = msr_low | ((u64) msr_high << 32);
4071 /* Write the default value follow host pat */
4072 vmcs_write64(GUEST_IA32_PAT, host_pat);
4073 /* Keep arch.pat sync with GUEST_IA32_PAT */
4074 vmx->vcpu.arch.pat = host_pat;
4075 }
4076
6aa8b732
AK
4077 for (i = 0; i < NR_VMX_MSR; ++i) {
4078 u32 index = vmx_msr_index[i];
4079 u32 data_low, data_high;
a2fa3e9f 4080 int j = vmx->nmsrs;
6aa8b732
AK
4081
4082 if (rdmsr_safe(index, &data_low, &data_high) < 0)
4083 continue;
432bd6cb
AK
4084 if (wrmsr_safe(index, data_low, data_high) < 0)
4085 continue;
26bb0981
AK
4086 vmx->guest_msrs[j].index = i;
4087 vmx->guest_msrs[j].data = 0;
d5696725 4088 vmx->guest_msrs[j].mask = -1ull;
a2fa3e9f 4089 ++vmx->nmsrs;
6aa8b732 4090 }
6aa8b732 4091
1c3d14fe 4092 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
6aa8b732
AK
4093
4094 /* 22.2.1, 20.8.1 */
1c3d14fe
YS
4095 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
4096
e00c8cf2 4097 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
bf8179a0 4098 set_cr4_guest_host_mask(vmx);
e00c8cf2
AK
4099
4100 return 0;
4101}
4102
57f252f2 4103static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
e00c8cf2
AK
4104{
4105 struct vcpu_vmx *vmx = to_vmx(vcpu);
4106 u64 msr;
e00c8cf2 4107
7ffd92c5 4108 vmx->rmode.vm86_active = 0;
e00c8cf2 4109
3b86cd99
JK
4110 vmx->soft_vnmi_blocked = 0;
4111
ad312c7c 4112 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
2d3ad1f4 4113 kvm_set_cr8(&vmx->vcpu, 0);
e00c8cf2 4114 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
c5af89b6 4115 if (kvm_vcpu_is_bsp(&vmx->vcpu))
e00c8cf2
AK
4116 msr |= MSR_IA32_APICBASE_BSP;
4117 kvm_set_apic_base(&vmx->vcpu, msr);
4118
2fb92db1
AK
4119 vmx_segment_cache_clear(vmx);
4120
5706be0d 4121 seg_setup(VCPU_SREG_CS);
66450a21 4122 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
e00c8cf2
AK
4123
4124 seg_setup(VCPU_SREG_DS);
4125 seg_setup(VCPU_SREG_ES);
4126 seg_setup(VCPU_SREG_FS);
4127 seg_setup(VCPU_SREG_GS);
4128 seg_setup(VCPU_SREG_SS);
4129
4130 vmcs_write16(GUEST_TR_SELECTOR, 0);
4131 vmcs_writel(GUEST_TR_BASE, 0);
4132 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
4133 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
4134
4135 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
4136 vmcs_writel(GUEST_LDTR_BASE, 0);
4137 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
4138 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
4139
4140 vmcs_write32(GUEST_SYSENTER_CS, 0);
4141 vmcs_writel(GUEST_SYSENTER_ESP, 0);
4142 vmcs_writel(GUEST_SYSENTER_EIP, 0);
4143
4144 vmcs_writel(GUEST_RFLAGS, 0x02);
66450a21 4145 kvm_rip_write(vcpu, 0xfff0);
e00c8cf2 4146
e00c8cf2
AK
4147 vmcs_writel(GUEST_GDTR_BASE, 0);
4148 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
4149
4150 vmcs_writel(GUEST_IDTR_BASE, 0);
4151 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
4152
443381a8 4153 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
e00c8cf2
AK
4154 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
4155 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
4156
e00c8cf2
AK
4157 /* Special registers */
4158 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4159
4160 setup_msrs(vmx);
4161
6aa8b732
AK
4162 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
4163
f78e0e2e
SY
4164 if (cpu_has_vmx_tpr_shadow()) {
4165 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
4166 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
4167 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
afc20184 4168 __pa(vmx->vcpu.arch.apic->regs));
f78e0e2e
SY
4169 vmcs_write32(TPR_THRESHOLD, 0);
4170 }
4171
4172 if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
4173 vmcs_write64(APIC_ACCESS_ADDR,
bfc6d222 4174 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
6aa8b732 4175
2384d2b3
SY
4176 if (vmx->vpid != 0)
4177 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4178
fa40052c 4179 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
7a4f5ad0 4180 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4d4ec087 4181 vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
7a4f5ad0 4182 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
8b9cf98c 4183 vmx_set_cr4(&vmx->vcpu, 0);
8b9cf98c 4184 vmx_set_efer(&vmx->vcpu, 0);
8b9cf98c
RR
4185 vmx_fpu_activate(&vmx->vcpu);
4186 update_exception_bitmap(&vmx->vcpu);
6aa8b732 4187
b9d762fa 4188 vpid_sync_context(vmx);
6aa8b732
AK
4189}
4190
b6f1250e
NHE
4191/*
4192 * In nested virtualization, check if L1 asked to exit on external interrupts.
4193 * For most existing hypervisors, this will always return true.
4194 */
4195static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
4196{
4197 return get_vmcs12(vcpu)->pin_based_vm_exec_control &
4198 PIN_BASED_EXT_INTR_MASK;
4199}
4200
3b86cd99
JK
4201static void enable_irq_window(struct kvm_vcpu *vcpu)
4202{
4203 u32 cpu_based_vm_exec_control;
d6185f20
NHE
4204 if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
4205 /*
4206 * We get here if vmx_interrupt_allowed() said we can't
4207 * inject to L1 now because L2 must run. Ask L2 to exit
4208 * right after entry, so we can inject to L1 more promptly.
b6f1250e 4209 */
d6185f20 4210 kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
b6f1250e 4211 return;
d6185f20 4212 }
3b86cd99
JK
4213
4214 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4215 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
4216 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4217}
4218
4219static void enable_nmi_window(struct kvm_vcpu *vcpu)
4220{
4221 u32 cpu_based_vm_exec_control;
4222
4223 if (!cpu_has_virtual_nmis()) {
4224 enable_irq_window(vcpu);
4225 return;
4226 }
4227
30bd0c4c
AK
4228 if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
4229 enable_irq_window(vcpu);
4230 return;
4231 }
3b86cd99
JK
4232 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4233 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
4234 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4235}
4236
66fd3f7f 4237static void vmx_inject_irq(struct kvm_vcpu *vcpu)
85f455f7 4238{
9c8cba37 4239 struct vcpu_vmx *vmx = to_vmx(vcpu);
66fd3f7f
GN
4240 uint32_t intr;
4241 int irq = vcpu->arch.interrupt.nr;
9c8cba37 4242
229456fc 4243 trace_kvm_inj_virq(irq);
2714d1d3 4244
fa89a817 4245 ++vcpu->stat.irq_injections;
7ffd92c5 4246 if (vmx->rmode.vm86_active) {
71f9833b
SH
4247 int inc_eip = 0;
4248 if (vcpu->arch.interrupt.soft)
4249 inc_eip = vcpu->arch.event_exit_inst_len;
4250 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
a92601bb 4251 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
85f455f7
ED
4252 return;
4253 }
66fd3f7f
GN
4254 intr = irq | INTR_INFO_VALID_MASK;
4255 if (vcpu->arch.interrupt.soft) {
4256 intr |= INTR_TYPE_SOFT_INTR;
4257 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
4258 vmx->vcpu.arch.event_exit_inst_len);
4259 } else
4260 intr |= INTR_TYPE_EXT_INTR;
4261 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
85f455f7
ED
4262}
4263
f08864b4
SY
4264static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
4265{
66a5a347
JK
4266 struct vcpu_vmx *vmx = to_vmx(vcpu);
4267
0b6ac343
NHE
4268 if (is_guest_mode(vcpu))
4269 return;
4270
3b86cd99
JK
4271 if (!cpu_has_virtual_nmis()) {
4272 /*
4273 * Tracking the NMI-blocked state in software is built upon
4274 * finding the next open IRQ window. This, in turn, depends on
4275 * well-behaving guests: They have to keep IRQs disabled at
4276 * least as long as the NMI handler runs. Otherwise we may
4277 * cause NMI nesting, maybe breaking the guest. But as this is
4278 * highly unlikely, we can live with the residual risk.
4279 */
4280 vmx->soft_vnmi_blocked = 1;
4281 vmx->vnmi_blocked_time = 0;
4282 }
4283
487b391d 4284 ++vcpu->stat.nmi_injections;
9d58b931 4285 vmx->nmi_known_unmasked = false;
7ffd92c5 4286 if (vmx->rmode.vm86_active) {
71f9833b 4287 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
a92601bb 4288 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
66a5a347
JK
4289 return;
4290 }
f08864b4
SY
4291 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
4292 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
f08864b4
SY
4293}
4294
c4282df9 4295static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
33f089ca 4296{
3b86cd99 4297 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
c4282df9 4298 return 0;
33f089ca 4299
c4282df9 4300 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
30bd0c4c
AK
4301 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
4302 | GUEST_INTR_STATE_NMI));
33f089ca
JK
4303}
4304
3cfc3092
JK
4305static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
4306{
4307 if (!cpu_has_virtual_nmis())
4308 return to_vmx(vcpu)->soft_vnmi_blocked;
9d58b931
AK
4309 if (to_vmx(vcpu)->nmi_known_unmasked)
4310 return false;
c332c83a 4311 return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
3cfc3092
JK
4312}
4313
4314static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4315{
4316 struct vcpu_vmx *vmx = to_vmx(vcpu);
4317
4318 if (!cpu_has_virtual_nmis()) {
4319 if (vmx->soft_vnmi_blocked != masked) {
4320 vmx->soft_vnmi_blocked = masked;
4321 vmx->vnmi_blocked_time = 0;
4322 }
4323 } else {
9d58b931 4324 vmx->nmi_known_unmasked = !masked;
3cfc3092
JK
4325 if (masked)
4326 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
4327 GUEST_INTR_STATE_NMI);
4328 else
4329 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
4330 GUEST_INTR_STATE_NMI);
4331 }
4332}
4333
78646121
GN
4334static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
4335{
b6f1250e 4336 if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
51cfe38e
NHE
4337 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4338 if (to_vmx(vcpu)->nested.nested_run_pending ||
4339 (vmcs12->idt_vectoring_info_field &
4340 VECTORING_INFO_VALID_MASK))
b6f1250e
NHE
4341 return 0;
4342 nested_vmx_vmexit(vcpu);
b6f1250e
NHE
4343 vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
4344 vmcs12->vm_exit_intr_info = 0;
4345 /* fall through to normal code, but now in L1, not L2 */
4346 }
4347
c4282df9
GN
4348 return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
4349 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
4350 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
78646121
GN
4351}
4352
cbc94022
IE
4353static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
4354{
4355 int ret;
4356 struct kvm_userspace_memory_region tss_mem = {
6fe63979 4357 .slot = TSS_PRIVATE_MEMSLOT,
cbc94022
IE
4358 .guest_phys_addr = addr,
4359 .memory_size = PAGE_SIZE * 3,
4360 .flags = 0,
4361 };
4362
47ae31e2 4363 ret = kvm_set_memory_region(kvm, &tss_mem);
cbc94022
IE
4364 if (ret)
4365 return ret;
bfc6d222 4366 kvm->arch.tss_addr = addr;
93ea5388
GN
4367 if (!init_rmode_tss(kvm))
4368 return -ENOMEM;
4369
cbc94022
IE
4370 return 0;
4371}
4372
0ca1b4f4 4373static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
6aa8b732 4374{
77ab6db0 4375 switch (vec) {
77ab6db0 4376 case BP_VECTOR:
c573cd22
JK
4377 /*
4378 * Update instruction length as we may reinject the exception
4379 * from user space while in guest debugging mode.
4380 */
4381 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
4382 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
d0bfb940 4383 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
0ca1b4f4
GN
4384 return false;
4385 /* fall through */
4386 case DB_VECTOR:
4387 if (vcpu->guest_debug &
4388 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
4389 return false;
d0bfb940
JK
4390 /* fall through */
4391 case DE_VECTOR:
77ab6db0
JK
4392 case OF_VECTOR:
4393 case BR_VECTOR:
4394 case UD_VECTOR:
4395 case DF_VECTOR:
4396 case SS_VECTOR:
4397 case GP_VECTOR:
4398 case MF_VECTOR:
0ca1b4f4
GN
4399 return true;
4400 break;
77ab6db0 4401 }
0ca1b4f4
GN
4402 return false;
4403}
4404
4405static int handle_rmode_exception(struct kvm_vcpu *vcpu,
4406 int vec, u32 err_code)
4407{
4408 /*
4409 * Instruction with address size override prefix opcode 0x67
4410 * Cause the #SS fault with 0 error code in VM86 mode.
4411 */
4412 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
4413 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
4414 if (vcpu->arch.halt_request) {
4415 vcpu->arch.halt_request = 0;
4416 return kvm_emulate_halt(vcpu);
4417 }
4418 return 1;
4419 }
4420 return 0;
4421 }
4422
4423 /*
4424 * Forward all other exceptions that are valid in real mode.
4425 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
4426 * the required debugging infrastructure rework.
4427 */
4428 kvm_queue_exception(vcpu, vec);
4429 return 1;
6aa8b732
AK
4430}
4431
a0861c02
AK
4432/*
4433 * Trigger machine check on the host. We assume all the MSRs are already set up
4434 * by the CPU and that we still run on the same CPU as the MCE occurred on.
4435 * We pass a fake environment to the machine check handler because we want
4436 * the guest to be always treated like user space, no matter what context
4437 * it used internally.
4438 */
4439static void kvm_machine_check(void)
4440{
4441#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
4442 struct pt_regs regs = {
4443 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
4444 .flags = X86_EFLAGS_IF,
4445 };
4446
4447 do_machine_check(&regs, 0);
4448#endif
4449}
4450
851ba692 4451static int handle_machine_check(struct kvm_vcpu *vcpu)
a0861c02
AK
4452{
4453 /* already handled by vcpu_run */
4454 return 1;
4455}
4456
851ba692 4457static int handle_exception(struct kvm_vcpu *vcpu)
6aa8b732 4458{
1155f76a 4459 struct vcpu_vmx *vmx = to_vmx(vcpu);
851ba692 4460 struct kvm_run *kvm_run = vcpu->run;
d0bfb940 4461 u32 intr_info, ex_no, error_code;
42dbaa5a 4462 unsigned long cr2, rip, dr6;
6aa8b732
AK
4463 u32 vect_info;
4464 enum emulation_result er;
4465
1155f76a 4466 vect_info = vmx->idt_vectoring_info;
88786475 4467 intr_info = vmx->exit_intr_info;
6aa8b732 4468
a0861c02 4469 if (is_machine_check(intr_info))
851ba692 4470 return handle_machine_check(vcpu);
a0861c02 4471
e4a41889 4472 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
1b6269db 4473 return 1; /* already handled by vmx_vcpu_run() */
2ab455cc
AL
4474
4475 if (is_no_device(intr_info)) {
5fd86fcf 4476 vmx_fpu_activate(vcpu);
2ab455cc
AL
4477 return 1;
4478 }
4479
7aa81cc0 4480 if (is_invalid_opcode(intr_info)) {
51d8b661 4481 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
7aa81cc0 4482 if (er != EMULATE_DONE)
7ee5d940 4483 kvm_queue_exception(vcpu, UD_VECTOR);
7aa81cc0
AL
4484 return 1;
4485 }
4486
6aa8b732 4487 error_code = 0;
2e11384c 4488 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
6aa8b732 4489 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
bf4ca23e
XG
4490
4491 /*
4492 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
4493 * MMIO, it is better to report an internal error.
4494 * See the comments in vmx_handle_exit.
4495 */
4496 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
4497 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
4498 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4499 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
4500 vcpu->run->internal.ndata = 2;
4501 vcpu->run->internal.data[0] = vect_info;
4502 vcpu->run->internal.data[1] = intr_info;
4503 return 0;
4504 }
4505
6aa8b732 4506 if (is_page_fault(intr_info)) {
1439442c 4507 /* EPT won't cause page fault directly */
cf3ace79 4508 BUG_ON(enable_ept);
6aa8b732 4509 cr2 = vmcs_readl(EXIT_QUALIFICATION);
229456fc
MT
4510 trace_kvm_page_fault(cr2, error_code);
4511
3298b75c 4512 if (kvm_event_needs_reinjection(vcpu))
577bdc49 4513 kvm_mmu_unprotect_page_virt(vcpu, cr2);
dc25e89e 4514 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
6aa8b732
AK
4515 }
4516
d0bfb940 4517 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
0ca1b4f4
GN
4518
4519 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
4520 return handle_rmode_exception(vcpu, ex_no, error_code);
4521
42dbaa5a
JK
4522 switch (ex_no) {
4523 case DB_VECTOR:
4524 dr6 = vmcs_readl(EXIT_QUALIFICATION);
4525 if (!(vcpu->guest_debug &
4526 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
4527 vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
4528 kvm_queue_exception(vcpu, DB_VECTOR);
4529 return 1;
4530 }
4531 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
4532 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
4533 /* fall through */
4534 case BP_VECTOR:
c573cd22
JK
4535 /*
4536 * Update instruction length as we may reinject #BP from
4537 * user space while in guest debugging mode. Reading it for
4538 * #DB as well causes no harm, it is not used in that case.
4539 */
4540 vmx->vcpu.arch.event_exit_inst_len =
4541 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
6aa8b732 4542 kvm_run->exit_reason = KVM_EXIT_DEBUG;
0a434bb2 4543 rip = kvm_rip_read(vcpu);
d0bfb940
JK
4544 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
4545 kvm_run->debug.arch.exception = ex_no;
42dbaa5a
JK
4546 break;
4547 default:
d0bfb940
JK
4548 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
4549 kvm_run->ex.exception = ex_no;
4550 kvm_run->ex.error_code = error_code;
42dbaa5a 4551 break;
6aa8b732 4552 }
6aa8b732
AK
4553 return 0;
4554}
4555
851ba692 4556static int handle_external_interrupt(struct kvm_vcpu *vcpu)
6aa8b732 4557{
1165f5fe 4558 ++vcpu->stat.irq_exits;
6aa8b732
AK
4559 return 1;
4560}
4561
851ba692 4562static int handle_triple_fault(struct kvm_vcpu *vcpu)
988ad74f 4563{
851ba692 4564 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
988ad74f
AK
4565 return 0;
4566}
6aa8b732 4567
851ba692 4568static int handle_io(struct kvm_vcpu *vcpu)
6aa8b732 4569{
bfdaab09 4570 unsigned long exit_qualification;
34c33d16 4571 int size, in, string;
039576c0 4572 unsigned port;
6aa8b732 4573
bfdaab09 4574 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
039576c0 4575 string = (exit_qualification & 16) != 0;
cf8f70bf 4576 in = (exit_qualification & 8) != 0;
e70669ab 4577
cf8f70bf 4578 ++vcpu->stat.io_exits;
e70669ab 4579
cf8f70bf 4580 if (string || in)
51d8b661 4581 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
e70669ab 4582
cf8f70bf
GN
4583 port = exit_qualification >> 16;
4584 size = (exit_qualification & 7) + 1;
e93f36bc 4585 skip_emulated_instruction(vcpu);
cf8f70bf
GN
4586
4587 return kvm_fast_pio_out(vcpu, size, port);
6aa8b732
AK
4588}
4589
102d8325
IM
4590static void
4591vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4592{
4593 /*
4594 * Patch in the VMCALL instruction:
4595 */
4596 hypercall[0] = 0x0f;
4597 hypercall[1] = 0x01;
4598 hypercall[2] = 0xc1;
102d8325
IM
4599}
4600
0fa06071 4601/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
eeadf9e7
NHE
4602static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
4603{
eeadf9e7 4604 if (is_guest_mode(vcpu)) {
1a0d74e6
JK
4605 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4606 unsigned long orig_val = val;
4607
eeadf9e7
NHE
4608 /*
4609 * We get here when L2 changed cr0 in a way that did not change
4610 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
1a0d74e6
JK
4611 * but did change L0 shadowed bits. So we first calculate the
4612 * effective cr0 value that L1 would like to write into the
4613 * hardware. It consists of the L2-owned bits from the new
4614 * value combined with the L1-owned bits from L1's guest_cr0.
eeadf9e7 4615 */
1a0d74e6
JK
4616 val = (val & ~vmcs12->cr0_guest_host_mask) |
4617 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
4618
4619 /* TODO: will have to take unrestricted guest mode into
4620 * account */
4621 if ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)
eeadf9e7 4622 return 1;
1a0d74e6
JK
4623
4624 if (kvm_set_cr0(vcpu, val))
4625 return 1;
4626 vmcs_writel(CR0_READ_SHADOW, orig_val);
eeadf9e7 4627 return 0;
1a0d74e6
JK
4628 } else {
4629 if (to_vmx(vcpu)->nested.vmxon &&
4630 ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
4631 return 1;
eeadf9e7 4632 return kvm_set_cr0(vcpu, val);
1a0d74e6 4633 }
eeadf9e7
NHE
4634}
4635
4636static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
4637{
4638 if (is_guest_mode(vcpu)) {
1a0d74e6
JK
4639 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4640 unsigned long orig_val = val;
4641
4642 /* analogously to handle_set_cr0 */
4643 val = (val & ~vmcs12->cr4_guest_host_mask) |
4644 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
4645 if (kvm_set_cr4(vcpu, val))
eeadf9e7 4646 return 1;
1a0d74e6 4647 vmcs_writel(CR4_READ_SHADOW, orig_val);
eeadf9e7
NHE
4648 return 0;
4649 } else
4650 return kvm_set_cr4(vcpu, val);
4651}
4652
4653/* called to set cr0 as approriate for clts instruction exit. */
4654static void handle_clts(struct kvm_vcpu *vcpu)
4655{
4656 if (is_guest_mode(vcpu)) {
4657 /*
4658 * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS
4659 * but we did (!fpu_active). We need to keep GUEST_CR0.TS on,
4660 * just pretend it's off (also in arch.cr0 for fpu_activate).
4661 */
4662 vmcs_writel(CR0_READ_SHADOW,
4663 vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
4664 vcpu->arch.cr0 &= ~X86_CR0_TS;
4665 } else
4666 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
4667}
4668
851ba692 4669static int handle_cr(struct kvm_vcpu *vcpu)
6aa8b732 4670{
229456fc 4671 unsigned long exit_qualification, val;
6aa8b732
AK
4672 int cr;
4673 int reg;
49a9b07e 4674 int err;
6aa8b732 4675
bfdaab09 4676 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6aa8b732
AK
4677 cr = exit_qualification & 15;
4678 reg = (exit_qualification >> 8) & 15;
4679 switch ((exit_qualification >> 4) & 3) {
4680 case 0: /* mov to cr */
229456fc
MT
4681 val = kvm_register_read(vcpu, reg);
4682 trace_kvm_cr_write(cr, val);
6aa8b732
AK
4683 switch (cr) {
4684 case 0:
eeadf9e7 4685 err = handle_set_cr0(vcpu, val);
db8fcefa 4686 kvm_complete_insn_gp(vcpu, err);
6aa8b732
AK
4687 return 1;
4688 case 3:
2390218b 4689 err = kvm_set_cr3(vcpu, val);
db8fcefa 4690 kvm_complete_insn_gp(vcpu, err);
6aa8b732
AK
4691 return 1;
4692 case 4:
eeadf9e7 4693 err = handle_set_cr4(vcpu, val);
db8fcefa 4694 kvm_complete_insn_gp(vcpu, err);
6aa8b732 4695 return 1;
0a5fff19
GN
4696 case 8: {
4697 u8 cr8_prev = kvm_get_cr8(vcpu);
4698 u8 cr8 = kvm_register_read(vcpu, reg);
eea1cff9 4699 err = kvm_set_cr8(vcpu, cr8);
db8fcefa 4700 kvm_complete_insn_gp(vcpu, err);
0a5fff19
GN
4701 if (irqchip_in_kernel(vcpu->kvm))
4702 return 1;
4703 if (cr8_prev <= cr8)
4704 return 1;
851ba692 4705 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
0a5fff19
GN
4706 return 0;
4707 }
4b8073e4 4708 }
6aa8b732 4709 break;
25c4c276 4710 case 2: /* clts */
eeadf9e7 4711 handle_clts(vcpu);
4d4ec087 4712 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
25c4c276 4713 skip_emulated_instruction(vcpu);
6b52d186 4714 vmx_fpu_activate(vcpu);
25c4c276 4715 return 1;
6aa8b732
AK
4716 case 1: /*mov from cr*/
4717 switch (cr) {
4718 case 3:
9f8fe504
AK
4719 val = kvm_read_cr3(vcpu);
4720 kvm_register_write(vcpu, reg, val);
4721 trace_kvm_cr_read(cr, val);
6aa8b732
AK
4722 skip_emulated_instruction(vcpu);
4723 return 1;
4724 case 8:
229456fc
MT
4725 val = kvm_get_cr8(vcpu);
4726 kvm_register_write(vcpu, reg, val);
4727 trace_kvm_cr_read(cr, val);
6aa8b732
AK
4728 skip_emulated_instruction(vcpu);
4729 return 1;
4730 }
4731 break;
4732 case 3: /* lmsw */
a1f83a74 4733 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
4d4ec087 4734 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
a1f83a74 4735 kvm_lmsw(vcpu, val);
6aa8b732
AK
4736
4737 skip_emulated_instruction(vcpu);
4738 return 1;
4739 default:
4740 break;
4741 }
851ba692 4742 vcpu->run->exit_reason = 0;
a737f256 4743 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
6aa8b732
AK
4744 (int)(exit_qualification >> 4) & 3, cr);
4745 return 0;
4746}
4747
851ba692 4748static int handle_dr(struct kvm_vcpu *vcpu)
6aa8b732 4749{
bfdaab09 4750 unsigned long exit_qualification;
6aa8b732
AK
4751 int dr, reg;
4752
f2483415 4753 /* Do not handle if the CPL > 0, will trigger GP on re-entry */
0a79b009
AK
4754 if (!kvm_require_cpl(vcpu, 0))
4755 return 1;
42dbaa5a
JK
4756 dr = vmcs_readl(GUEST_DR7);
4757 if (dr & DR7_GD) {
4758 /*
4759 * As the vm-exit takes precedence over the debug trap, we
4760 * need to emulate the latter, either for the host or the
4761 * guest debugging itself.
4762 */
4763 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
851ba692
AK
4764 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
4765 vcpu->run->debug.arch.dr7 = dr;
4766 vcpu->run->debug.arch.pc =
42dbaa5a
JK
4767 vmcs_readl(GUEST_CS_BASE) +
4768 vmcs_readl(GUEST_RIP);
851ba692
AK
4769 vcpu->run->debug.arch.exception = DB_VECTOR;
4770 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
42dbaa5a
JK
4771 return 0;
4772 } else {
4773 vcpu->arch.dr7 &= ~DR7_GD;
4774 vcpu->arch.dr6 |= DR6_BD;
4775 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
4776 kvm_queue_exception(vcpu, DB_VECTOR);
4777 return 1;
4778 }
4779 }
4780
bfdaab09 4781 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
42dbaa5a
JK
4782 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
4783 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
4784 if (exit_qualification & TYPE_MOV_FROM_DR) {
020df079
GN
4785 unsigned long val;
4786 if (!kvm_get_dr(vcpu, dr, &val))
4787 kvm_register_write(vcpu, reg, val);
4788 } else
4789 kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
6aa8b732
AK
4790 skip_emulated_instruction(vcpu);
4791 return 1;
4792}
4793
020df079
GN
4794static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
4795{
4796 vmcs_writel(GUEST_DR7, val);
4797}
4798
851ba692 4799static int handle_cpuid(struct kvm_vcpu *vcpu)
6aa8b732 4800{
06465c5a
AK
4801 kvm_emulate_cpuid(vcpu);
4802 return 1;
6aa8b732
AK
4803}
4804
851ba692 4805static int handle_rdmsr(struct kvm_vcpu *vcpu)
6aa8b732 4806{
ad312c7c 4807 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
6aa8b732
AK
4808 u64 data;
4809
4810 if (vmx_get_msr(vcpu, ecx, &data)) {
59200273 4811 trace_kvm_msr_read_ex(ecx);
c1a5d4f9 4812 kvm_inject_gp(vcpu, 0);
6aa8b732
AK
4813 return 1;
4814 }
4815
229456fc 4816 trace_kvm_msr_read(ecx, data);
2714d1d3 4817
6aa8b732 4818 /* FIXME: handling of bits 32:63 of rax, rdx */
ad312c7c
ZX
4819 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
4820 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
6aa8b732
AK
4821 skip_emulated_instruction(vcpu);
4822 return 1;
4823}
4824
851ba692 4825static int handle_wrmsr(struct kvm_vcpu *vcpu)
6aa8b732 4826{
8fe8ab46 4827 struct msr_data msr;
ad312c7c
ZX
4828 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
4829 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
4830 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
6aa8b732 4831
8fe8ab46
WA
4832 msr.data = data;
4833 msr.index = ecx;
4834 msr.host_initiated = false;
4835 if (vmx_set_msr(vcpu, &msr) != 0) {
59200273 4836 trace_kvm_msr_write_ex(ecx, data);
c1a5d4f9 4837 kvm_inject_gp(vcpu, 0);
6aa8b732
AK
4838 return 1;
4839 }
4840
59200273 4841 trace_kvm_msr_write(ecx, data);
6aa8b732
AK
4842 skip_emulated_instruction(vcpu);
4843 return 1;
4844}
4845
851ba692 4846static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
6e5d865c 4847{
3842d135 4848 kvm_make_request(KVM_REQ_EVENT, vcpu);
6e5d865c
YS
4849 return 1;
4850}
4851
851ba692 4852static int handle_interrupt_window(struct kvm_vcpu *vcpu)
6aa8b732 4853{
85f455f7
ED
4854 u32 cpu_based_vm_exec_control;
4855
4856 /* clear pending irq */
4857 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4858 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
4859 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2714d1d3 4860
3842d135
AK
4861 kvm_make_request(KVM_REQ_EVENT, vcpu);
4862
a26bf12a 4863 ++vcpu->stat.irq_window_exits;
2714d1d3 4864
c1150d8c
DL
4865 /*
4866 * If the user space waits to inject interrupts, exit as soon as
4867 * possible
4868 */
8061823a 4869 if (!irqchip_in_kernel(vcpu->kvm) &&
851ba692 4870 vcpu->run->request_interrupt_window &&
8061823a 4871 !kvm_cpu_has_interrupt(vcpu)) {
851ba692 4872 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
c1150d8c
DL
4873 return 0;
4874 }
6aa8b732
AK
4875 return 1;
4876}
4877
851ba692 4878static int handle_halt(struct kvm_vcpu *vcpu)
6aa8b732
AK
4879{
4880 skip_emulated_instruction(vcpu);
d3bef15f 4881 return kvm_emulate_halt(vcpu);
6aa8b732
AK
4882}
4883
851ba692 4884static int handle_vmcall(struct kvm_vcpu *vcpu)
c21415e8 4885{
510043da 4886 skip_emulated_instruction(vcpu);
7aa81cc0
AL
4887 kvm_emulate_hypercall(vcpu);
4888 return 1;
c21415e8
IM
4889}
4890
ec25d5e6
GN
4891static int handle_invd(struct kvm_vcpu *vcpu)
4892{
51d8b661 4893 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
ec25d5e6
GN
4894}
4895
851ba692 4896static int handle_invlpg(struct kvm_vcpu *vcpu)
a7052897 4897{
f9c617f6 4898 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
a7052897
MT
4899
4900 kvm_mmu_invlpg(vcpu, exit_qualification);
4901 skip_emulated_instruction(vcpu);
4902 return 1;
4903}
4904
fee84b07
AK
4905static int handle_rdpmc(struct kvm_vcpu *vcpu)
4906{
4907 int err;
4908
4909 err = kvm_rdpmc(vcpu);
4910 kvm_complete_insn_gp(vcpu, err);
4911
4912 return 1;
4913}
4914
851ba692 4915static int handle_wbinvd(struct kvm_vcpu *vcpu)
e5edaa01
ED
4916{
4917 skip_emulated_instruction(vcpu);
f5f48ee1 4918 kvm_emulate_wbinvd(vcpu);
e5edaa01
ED
4919 return 1;
4920}
4921
2acf923e
DC
4922static int handle_xsetbv(struct kvm_vcpu *vcpu)
4923{
4924 u64 new_bv = kvm_read_edx_eax(vcpu);
4925 u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4926
4927 if (kvm_set_xcr(vcpu, index, new_bv) == 0)
4928 skip_emulated_instruction(vcpu);
4929 return 1;
4930}
4931
851ba692 4932static int handle_apic_access(struct kvm_vcpu *vcpu)
f78e0e2e 4933{
58fbbf26
KT
4934 if (likely(fasteoi)) {
4935 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4936 int access_type, offset;
4937
4938 access_type = exit_qualification & APIC_ACCESS_TYPE;
4939 offset = exit_qualification & APIC_ACCESS_OFFSET;
4940 /*
4941 * Sane guest uses MOV to write EOI, with written value
4942 * not cared. So make a short-circuit here by avoiding
4943 * heavy instruction emulation.
4944 */
4945 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
4946 (offset == APIC_EOI)) {
4947 kvm_lapic_set_eoi(vcpu);
4948 skip_emulated_instruction(vcpu);
4949 return 1;
4950 }
4951 }
51d8b661 4952 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
f78e0e2e
SY
4953}
4954
c7c9c56c
YZ
4955static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
4956{
4957 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4958 int vector = exit_qualification & 0xff;
4959
4960 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
4961 kvm_apic_set_eoi_accelerated(vcpu, vector);
4962 return 1;
4963}
4964
83d4c286
YZ
4965static int handle_apic_write(struct kvm_vcpu *vcpu)
4966{
4967 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4968 u32 offset = exit_qualification & 0xfff;
4969
4970 /* APIC-write VM exit is trap-like and thus no need to adjust IP */
4971 kvm_apic_write_nodecode(vcpu, offset);
4972 return 1;
4973}
4974
851ba692 4975static int handle_task_switch(struct kvm_vcpu *vcpu)
37817f29 4976{
60637aac 4977 struct vcpu_vmx *vmx = to_vmx(vcpu);
37817f29 4978 unsigned long exit_qualification;
e269fb21
JK
4979 bool has_error_code = false;
4980 u32 error_code = 0;
37817f29 4981 u16 tss_selector;
7f3d35fd 4982 int reason, type, idt_v, idt_index;
64a7ec06
GN
4983
4984 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
7f3d35fd 4985 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
64a7ec06 4986 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
37817f29
IE
4987
4988 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4989
4990 reason = (u32)exit_qualification >> 30;
64a7ec06
GN
4991 if (reason == TASK_SWITCH_GATE && idt_v) {
4992 switch (type) {
4993 case INTR_TYPE_NMI_INTR:
4994 vcpu->arch.nmi_injected = false;
654f06fc 4995 vmx_set_nmi_mask(vcpu, true);
64a7ec06
GN
4996 break;
4997 case INTR_TYPE_EXT_INTR:
66fd3f7f 4998 case INTR_TYPE_SOFT_INTR:
64a7ec06
GN
4999 kvm_clear_interrupt_queue(vcpu);
5000 break;
5001 case INTR_TYPE_HARD_EXCEPTION:
e269fb21
JK
5002 if (vmx->idt_vectoring_info &
5003 VECTORING_INFO_DELIVER_CODE_MASK) {
5004 has_error_code = true;
5005 error_code =
5006 vmcs_read32(IDT_VECTORING_ERROR_CODE);
5007 }
5008 /* fall through */
64a7ec06
GN
5009 case INTR_TYPE_SOFT_EXCEPTION:
5010 kvm_clear_exception_queue(vcpu);
5011 break;
5012 default:
5013 break;
5014 }
60637aac 5015 }
37817f29
IE
5016 tss_selector = exit_qualification;
5017
64a7ec06
GN
5018 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
5019 type != INTR_TYPE_EXT_INTR &&
5020 type != INTR_TYPE_NMI_INTR))
5021 skip_emulated_instruction(vcpu);
5022
7f3d35fd
KW
5023 if (kvm_task_switch(vcpu, tss_selector,
5024 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
5025 has_error_code, error_code) == EMULATE_FAIL) {
acb54517
GN
5026 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5027 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5028 vcpu->run->internal.ndata = 0;
42dbaa5a 5029 return 0;
acb54517 5030 }
42dbaa5a
JK
5031
5032 /* clear all local breakpoint enable flags */
5033 vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
5034
5035 /*
5036 * TODO: What about debug traps on tss switch?
5037 * Are we supposed to inject them and update dr6?
5038 */
5039
5040 return 1;
37817f29
IE
5041}
5042
851ba692 5043static int handle_ept_violation(struct kvm_vcpu *vcpu)
1439442c 5044{
f9c617f6 5045 unsigned long exit_qualification;
1439442c 5046 gpa_t gpa;
4f5982a5 5047 u32 error_code;
1439442c 5048 int gla_validity;
1439442c 5049
f9c617f6 5050 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
1439442c 5051
1439442c
SY
5052 gla_validity = (exit_qualification >> 7) & 0x3;
5053 if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
5054 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
5055 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
5056 (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
f9c617f6 5057 vmcs_readl(GUEST_LINEAR_ADDRESS));
1439442c
SY
5058 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
5059 (long unsigned int)exit_qualification);
851ba692
AK
5060 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5061 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
596ae895 5062 return 0;
1439442c
SY
5063 }
5064
5065 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
229456fc 5066 trace_kvm_page_fault(gpa, exit_qualification);
4f5982a5
XG
5067
5068 /* It is a write fault? */
5069 error_code = exit_qualification & (1U << 1);
5070 /* ept page table is present? */
5071 error_code |= (exit_qualification >> 3) & 0x1;
5072
5073 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
1439442c
SY
5074}
5075
68f89400
MT
5076static u64 ept_rsvd_mask(u64 spte, int level)
5077{
5078 int i;
5079 u64 mask = 0;
5080
5081 for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
5082 mask |= (1ULL << i);
5083
5084 if (level > 2)
5085 /* bits 7:3 reserved */
5086 mask |= 0xf8;
5087 else if (level == 2) {
5088 if (spte & (1ULL << 7))
5089 /* 2MB ref, bits 20:12 reserved */
5090 mask |= 0x1ff000;
5091 else
5092 /* bits 6:3 reserved */
5093 mask |= 0x78;
5094 }
5095
5096 return mask;
5097}
5098
5099static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
5100 int level)
5101{
5102 printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
5103
5104 /* 010b (write-only) */
5105 WARN_ON((spte & 0x7) == 0x2);
5106
5107 /* 110b (write/execute) */
5108 WARN_ON((spte & 0x7) == 0x6);
5109
5110 /* 100b (execute-only) and value not supported by logical processor */
5111 if (!cpu_has_vmx_ept_execute_only())
5112 WARN_ON((spte & 0x7) == 0x4);
5113
5114 /* not 000b */
5115 if ((spte & 0x7)) {
5116 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
5117
5118 if (rsvd_bits != 0) {
5119 printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
5120 __func__, rsvd_bits);
5121 WARN_ON(1);
5122 }
5123
5124 if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
5125 u64 ept_mem_type = (spte & 0x38) >> 3;
5126
5127 if (ept_mem_type == 2 || ept_mem_type == 3 ||
5128 ept_mem_type == 7) {
5129 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
5130 __func__, ept_mem_type);
5131 WARN_ON(1);
5132 }
5133 }
5134 }
5135}
5136
851ba692 5137static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
68f89400
MT
5138{
5139 u64 sptes[4];
ce88decf 5140 int nr_sptes, i, ret;
68f89400
MT
5141 gpa_t gpa;
5142
5143 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5144
ce88decf
XG
5145 ret = handle_mmio_page_fault_common(vcpu, gpa, true);
5146 if (likely(ret == 1))
5147 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
5148 EMULATE_DONE;
5149 if (unlikely(!ret))
5150 return 1;
5151
5152 /* It is the real ept misconfig */
68f89400
MT
5153 printk(KERN_ERR "EPT: Misconfiguration.\n");
5154 printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
5155
5156 nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
5157
5158 for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
5159 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
5160
851ba692
AK
5161 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5162 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
68f89400
MT
5163
5164 return 0;
5165}
5166
851ba692 5167static int handle_nmi_window(struct kvm_vcpu *vcpu)
f08864b4
SY
5168{
5169 u32 cpu_based_vm_exec_control;
5170
5171 /* clear pending NMI */
5172 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5173 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
5174 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5175 ++vcpu->stat.nmi_window_exits;
3842d135 5176 kvm_make_request(KVM_REQ_EVENT, vcpu);
f08864b4
SY
5177
5178 return 1;
5179}
5180
80ced186 5181static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
ea953ef0 5182{
8b3079a5
AK
5183 struct vcpu_vmx *vmx = to_vmx(vcpu);
5184 enum emulation_result err = EMULATE_DONE;
80ced186 5185 int ret = 1;
49e9d557
AK
5186 u32 cpu_exec_ctrl;
5187 bool intr_window_requested;
b8405c18 5188 unsigned count = 130;
49e9d557
AK
5189
5190 cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5191 intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
ea953ef0 5192
b8405c18 5193 while (!guest_state_valid(vcpu) && count-- != 0) {
bdea48e3 5194 if (intr_window_requested && vmx_interrupt_allowed(vcpu))
49e9d557
AK
5195 return handle_interrupt_window(&vmx->vcpu);
5196
de87dcdd
AK
5197 if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
5198 return 1;
5199
51d8b661 5200 err = emulate_instruction(vcpu, 0);
ea953ef0 5201
80ced186
MG
5202 if (err == EMULATE_DO_MMIO) {
5203 ret = 0;
5204 goto out;
5205 }
1d5a4d9b 5206
de5f70e0
AK
5207 if (err != EMULATE_DONE) {
5208 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5209 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5210 vcpu->run->internal.ndata = 0;
6d77dbfc 5211 return 0;
de5f70e0 5212 }
ea953ef0
MG
5213
5214 if (signal_pending(current))
80ced186 5215 goto out;
ea953ef0
MG
5216 if (need_resched())
5217 schedule();
5218 }
5219
14168786 5220 vmx->emulation_required = emulation_required(vcpu);
80ced186
MG
5221out:
5222 return ret;
ea953ef0
MG
5223}
5224
4b8d54f9
ZE
5225/*
5226 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5227 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5228 */
9fb41ba8 5229static int handle_pause(struct kvm_vcpu *vcpu)
4b8d54f9
ZE
5230{
5231 skip_emulated_instruction(vcpu);
5232 kvm_vcpu_on_spin(vcpu);
5233
5234 return 1;
5235}
5236
59708670
SY
5237static int handle_invalid_op(struct kvm_vcpu *vcpu)
5238{
5239 kvm_queue_exception(vcpu, UD_VECTOR);
5240 return 1;
5241}
5242
ff2f6fe9
NHE
5243/*
5244 * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
5245 * We could reuse a single VMCS for all the L2 guests, but we also want the
5246 * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
5247 * allows keeping them loaded on the processor, and in the future will allow
5248 * optimizations where prepare_vmcs02 doesn't need to set all the fields on
5249 * every entry if they never change.
5250 * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
5251 * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
5252 *
5253 * The following functions allocate and free a vmcs02 in this pool.
5254 */
5255
5256/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
5257static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
5258{
5259 struct vmcs02_list *item;
5260 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
5261 if (item->vmptr == vmx->nested.current_vmptr) {
5262 list_move(&item->list, &vmx->nested.vmcs02_pool);
5263 return &item->vmcs02;
5264 }
5265
5266 if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
5267 /* Recycle the least recently used VMCS. */
5268 item = list_entry(vmx->nested.vmcs02_pool.prev,
5269 struct vmcs02_list, list);
5270 item->vmptr = vmx->nested.current_vmptr;
5271 list_move(&item->list, &vmx->nested.vmcs02_pool);
5272 return &item->vmcs02;
5273 }
5274
5275 /* Create a new VMCS */
0fa24ce3 5276 item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
ff2f6fe9
NHE
5277 if (!item)
5278 return NULL;
5279 item->vmcs02.vmcs = alloc_vmcs();
5280 if (!item->vmcs02.vmcs) {
5281 kfree(item);
5282 return NULL;
5283 }
5284 loaded_vmcs_init(&item->vmcs02);
5285 item->vmptr = vmx->nested.current_vmptr;
5286 list_add(&(item->list), &(vmx->nested.vmcs02_pool));
5287 vmx->nested.vmcs02_num++;
5288 return &item->vmcs02;
5289}
5290
5291/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
5292static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
5293{
5294 struct vmcs02_list *item;
5295 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
5296 if (item->vmptr == vmptr) {
5297 free_loaded_vmcs(&item->vmcs02);
5298 list_del(&item->list);
5299 kfree(item);
5300 vmx->nested.vmcs02_num--;
5301 return;
5302 }
5303}
5304
5305/*
5306 * Free all VMCSs saved for this vcpu, except the one pointed by
5307 * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one
5308 * currently used, if running L2), and vmcs01 when running L2.
5309 */
5310static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
5311{
5312 struct vmcs02_list *item, *n;
5313 list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
5314 if (vmx->loaded_vmcs != &item->vmcs02)
5315 free_loaded_vmcs(&item->vmcs02);
5316 list_del(&item->list);
5317 kfree(item);
5318 }
5319 vmx->nested.vmcs02_num = 0;
5320
5321 if (vmx->loaded_vmcs != &vmx->vmcs01)
5322 free_loaded_vmcs(&vmx->vmcs01);
5323}
5324
ec378aee
NHE
5325/*
5326 * Emulate the VMXON instruction.
5327 * Currently, we just remember that VMX is active, and do not save or even
5328 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
5329 * do not currently need to store anything in that guest-allocated memory
5330 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
5331 * argument is different from the VMXON pointer (which the spec says they do).
5332 */
5333static int handle_vmon(struct kvm_vcpu *vcpu)
5334{
5335 struct kvm_segment cs;
5336 struct vcpu_vmx *vmx = to_vmx(vcpu);
5337
5338 /* The Intel VMX Instruction Reference lists a bunch of bits that
5339 * are prerequisite to running VMXON, most notably cr4.VMXE must be
5340 * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
5341 * Otherwise, we should fail with #UD. We test these now:
5342 */
5343 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
5344 !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
5345 (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
5346 kvm_queue_exception(vcpu, UD_VECTOR);
5347 return 1;
5348 }
5349
5350 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
5351 if (is_long_mode(vcpu) && !cs.l) {
5352 kvm_queue_exception(vcpu, UD_VECTOR);
5353 return 1;
5354 }
5355
5356 if (vmx_get_cpl(vcpu)) {
5357 kvm_inject_gp(vcpu, 0);
5358 return 1;
5359 }
5360
ff2f6fe9
NHE
5361 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
5362 vmx->nested.vmcs02_num = 0;
5363
ec378aee
NHE
5364 vmx->nested.vmxon = true;
5365
5366 skip_emulated_instruction(vcpu);
5367 return 1;
5368}
5369
5370/*
5371 * Intel's VMX Instruction Reference specifies a common set of prerequisites
5372 * for running VMX instructions (except VMXON, whose prerequisites are
5373 * slightly different). It also specifies what exception to inject otherwise.
5374 */
5375static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
5376{
5377 struct kvm_segment cs;
5378 struct vcpu_vmx *vmx = to_vmx(vcpu);
5379
5380 if (!vmx->nested.vmxon) {
5381 kvm_queue_exception(vcpu, UD_VECTOR);
5382 return 0;
5383 }
5384
5385 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
5386 if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
5387 (is_long_mode(vcpu) && !cs.l)) {
5388 kvm_queue_exception(vcpu, UD_VECTOR);
5389 return 0;
5390 }
5391
5392 if (vmx_get_cpl(vcpu)) {
5393 kvm_inject_gp(vcpu, 0);
5394 return 0;
5395 }
5396
5397 return 1;
5398}
5399
5400/*
5401 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
5402 * just stops using VMX.
5403 */
5404static void free_nested(struct vcpu_vmx *vmx)
5405{
5406 if (!vmx->nested.vmxon)
5407 return;
5408 vmx->nested.vmxon = false;
a9d30f33
NHE
5409 if (vmx->nested.current_vmptr != -1ull) {
5410 kunmap(vmx->nested.current_vmcs12_page);
5411 nested_release_page(vmx->nested.current_vmcs12_page);
5412 vmx->nested.current_vmptr = -1ull;
5413 vmx->nested.current_vmcs12 = NULL;
5414 }
fe3ef05c
NHE
5415 /* Unpin physical memory we referred to in current vmcs02 */
5416 if (vmx->nested.apic_access_page) {
5417 nested_release_page(vmx->nested.apic_access_page);
5418 vmx->nested.apic_access_page = 0;
5419 }
ff2f6fe9
NHE
5420
5421 nested_free_all_saved_vmcss(vmx);
ec378aee
NHE
5422}
5423
5424/* Emulate the VMXOFF instruction */
5425static int handle_vmoff(struct kvm_vcpu *vcpu)
5426{
5427 if (!nested_vmx_check_permission(vcpu))
5428 return 1;
5429 free_nested(to_vmx(vcpu));
5430 skip_emulated_instruction(vcpu);
5431 return 1;
5432}
5433
064aea77
NHE
5434/*
5435 * Decode the memory-address operand of a vmx instruction, as recorded on an
5436 * exit caused by such an instruction (run by a guest hypervisor).
5437 * On success, returns 0. When the operand is invalid, returns 1 and throws
5438 * #UD or #GP.
5439 */
5440static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
5441 unsigned long exit_qualification,
5442 u32 vmx_instruction_info, gva_t *ret)
5443{
5444 /*
5445 * According to Vol. 3B, "Information for VM Exits Due to Instruction
5446 * Execution", on an exit, vmx_instruction_info holds most of the
5447 * addressing components of the operand. Only the displacement part
5448 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
5449 * For how an actual address is calculated from all these components,
5450 * refer to Vol. 1, "Operand Addressing".
5451 */
5452 int scaling = vmx_instruction_info & 3;
5453 int addr_size = (vmx_instruction_info >> 7) & 7;
5454 bool is_reg = vmx_instruction_info & (1u << 10);
5455 int seg_reg = (vmx_instruction_info >> 15) & 7;
5456 int index_reg = (vmx_instruction_info >> 18) & 0xf;
5457 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
5458 int base_reg = (vmx_instruction_info >> 23) & 0xf;
5459 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
5460
5461 if (is_reg) {
5462 kvm_queue_exception(vcpu, UD_VECTOR);
5463 return 1;
5464 }
5465
5466 /* Addr = segment_base + offset */
5467 /* offset = base + [index * scale] + displacement */
5468 *ret = vmx_get_segment_base(vcpu, seg_reg);
5469 if (base_is_valid)
5470 *ret += kvm_register_read(vcpu, base_reg);
5471 if (index_is_valid)
5472 *ret += kvm_register_read(vcpu, index_reg)<<scaling;
5473 *ret += exit_qualification; /* holds the displacement */
5474
5475 if (addr_size == 1) /* 32 bit */
5476 *ret &= 0xffffffff;
5477
5478 /*
5479 * TODO: throw #GP (and return 1) in various cases that the VM*
5480 * instructions require it - e.g., offset beyond segment limit,
5481 * unusable or unreadable/unwritable segment, non-canonical 64-bit
5482 * address, and so on. Currently these are not checked.
5483 */
5484 return 0;
5485}
5486
0140caea
NHE
5487/*
5488 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
5489 * set the success or error code of an emulated VMX instruction, as specified
5490 * by Vol 2B, VMX Instruction Reference, "Conventions".
5491 */
5492static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
5493{
5494 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
5495 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5496 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
5497}
5498
5499static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
5500{
5501 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5502 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
5503 X86_EFLAGS_SF | X86_EFLAGS_OF))
5504 | X86_EFLAGS_CF);
5505}
5506
5507static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
5508 u32 vm_instruction_error)
5509{
5510 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
5511 /*
5512 * failValid writes the error number to the current VMCS, which
5513 * can't be done there isn't a current VMCS.
5514 */
5515 nested_vmx_failInvalid(vcpu);
5516 return;
5517 }
5518 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5519 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5520 X86_EFLAGS_SF | X86_EFLAGS_OF))
5521 | X86_EFLAGS_ZF);
5522 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
5523}
5524
27d6c865
NHE
5525/* Emulate the VMCLEAR instruction */
5526static int handle_vmclear(struct kvm_vcpu *vcpu)
5527{
5528 struct vcpu_vmx *vmx = to_vmx(vcpu);
5529 gva_t gva;
5530 gpa_t vmptr;
5531 struct vmcs12 *vmcs12;
5532 struct page *page;
5533 struct x86_exception e;
5534
5535 if (!nested_vmx_check_permission(vcpu))
5536 return 1;
5537
5538 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
5539 vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
5540 return 1;
5541
5542 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
5543 sizeof(vmptr), &e)) {
5544 kvm_inject_page_fault(vcpu, &e);
5545 return 1;
5546 }
5547
5548 if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
5549 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
5550 skip_emulated_instruction(vcpu);
5551 return 1;
5552 }
5553
5554 if (vmptr == vmx->nested.current_vmptr) {
5555 kunmap(vmx->nested.current_vmcs12_page);
5556 nested_release_page(vmx->nested.current_vmcs12_page);
5557 vmx->nested.current_vmptr = -1ull;
5558 vmx->nested.current_vmcs12 = NULL;
5559 }
5560
5561 page = nested_get_page(vcpu, vmptr);
5562 if (page == NULL) {
5563 /*
5564 * For accurate processor emulation, VMCLEAR beyond available
5565 * physical memory should do nothing at all. However, it is
5566 * possible that a nested vmx bug, not a guest hypervisor bug,
5567 * resulted in this case, so let's shut down before doing any
5568 * more damage:
5569 */
5570 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5571 return 1;
5572 }
5573 vmcs12 = kmap(page);
5574 vmcs12->launch_state = 0;
5575 kunmap(page);
5576 nested_release_page(page);
5577
5578 nested_free_vmcs02(vmx, vmptr);
5579
5580 skip_emulated_instruction(vcpu);
5581 nested_vmx_succeed(vcpu);
5582 return 1;
5583}
5584
cd232ad0
NHE
5585static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
5586
5587/* Emulate the VMLAUNCH instruction */
5588static int handle_vmlaunch(struct kvm_vcpu *vcpu)
5589{
5590 return nested_vmx_run(vcpu, true);
5591}
5592
5593/* Emulate the VMRESUME instruction */
5594static int handle_vmresume(struct kvm_vcpu *vcpu)
5595{
5596
5597 return nested_vmx_run(vcpu, false);
5598}
5599
49f705c5
NHE
5600enum vmcs_field_type {
5601 VMCS_FIELD_TYPE_U16 = 0,
5602 VMCS_FIELD_TYPE_U64 = 1,
5603 VMCS_FIELD_TYPE_U32 = 2,
5604 VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
5605};
5606
5607static inline int vmcs_field_type(unsigned long field)
5608{
5609 if (0x1 & field) /* the *_HIGH fields are all 32 bit */
5610 return VMCS_FIELD_TYPE_U32;
5611 return (field >> 13) & 0x3 ;
5612}
5613
5614static inline int vmcs_field_readonly(unsigned long field)
5615{
5616 return (((field >> 10) & 0x3) == 1);
5617}
5618
5619/*
5620 * Read a vmcs12 field. Since these can have varying lengths and we return
5621 * one type, we chose the biggest type (u64) and zero-extend the return value
5622 * to that size. Note that the caller, handle_vmread, might need to use only
5623 * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
5624 * 64-bit fields are to be returned).
5625 */
5626static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
5627 unsigned long field, u64 *ret)
5628{
5629 short offset = vmcs_field_to_offset(field);
5630 char *p;
5631
5632 if (offset < 0)
5633 return 0;
5634
5635 p = ((char *)(get_vmcs12(vcpu))) + offset;
5636
5637 switch (vmcs_field_type(field)) {
5638 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
5639 *ret = *((natural_width *)p);
5640 return 1;
5641 case VMCS_FIELD_TYPE_U16:
5642 *ret = *((u16 *)p);
5643 return 1;
5644 case VMCS_FIELD_TYPE_U32:
5645 *ret = *((u32 *)p);
5646 return 1;
5647 case VMCS_FIELD_TYPE_U64:
5648 *ret = *((u64 *)p);
5649 return 1;
5650 default:
5651 return 0; /* can never happen. */
5652 }
5653}
5654
5655/*
5656 * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
5657 * used before) all generate the same failure when it is missing.
5658 */
5659static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
5660{
5661 struct vcpu_vmx *vmx = to_vmx(vcpu);
5662 if (vmx->nested.current_vmptr == -1ull) {
5663 nested_vmx_failInvalid(vcpu);
5664 skip_emulated_instruction(vcpu);
5665 return 0;
5666 }
5667 return 1;
5668}
5669
5670static int handle_vmread(struct kvm_vcpu *vcpu)
5671{
5672 unsigned long field;
5673 u64 field_value;
5674 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5675 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5676 gva_t gva = 0;
5677
5678 if (!nested_vmx_check_permission(vcpu) ||
5679 !nested_vmx_check_vmcs12(vcpu))
5680 return 1;
5681
5682 /* Decode instruction info and find the field to read */
5683 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5684 /* Read the field, zero-extended to a u64 field_value */
5685 if (!vmcs12_read_any(vcpu, field, &field_value)) {
5686 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5687 skip_emulated_instruction(vcpu);
5688 return 1;
5689 }
5690 /*
5691 * Now copy part of this value to register or memory, as requested.
5692 * Note that the number of bits actually copied is 32 or 64 depending
5693 * on the guest's mode (32 or 64 bit), not on the given field's length.
5694 */
5695 if (vmx_instruction_info & (1u << 10)) {
5696 kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
5697 field_value);
5698 } else {
5699 if (get_vmx_mem_address(vcpu, exit_qualification,
5700 vmx_instruction_info, &gva))
5701 return 1;
5702 /* _system ok, as nested_vmx_check_permission verified cpl=0 */
5703 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
5704 &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
5705 }
5706
5707 nested_vmx_succeed(vcpu);
5708 skip_emulated_instruction(vcpu);
5709 return 1;
5710}
5711
5712
5713static int handle_vmwrite(struct kvm_vcpu *vcpu)
5714{
5715 unsigned long field;
5716 gva_t gva;
5717 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5718 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5719 char *p;
5720 short offset;
5721 /* The value to write might be 32 or 64 bits, depending on L1's long
5722 * mode, and eventually we need to write that into a field of several
5723 * possible lengths. The code below first zero-extends the value to 64
5724 * bit (field_value), and then copies only the approriate number of
5725 * bits into the vmcs12 field.
5726 */
5727 u64 field_value = 0;
5728 struct x86_exception e;
5729
5730 if (!nested_vmx_check_permission(vcpu) ||
5731 !nested_vmx_check_vmcs12(vcpu))
5732 return 1;
5733
5734 if (vmx_instruction_info & (1u << 10))
5735 field_value = kvm_register_read(vcpu,
5736 (((vmx_instruction_info) >> 3) & 0xf));
5737 else {
5738 if (get_vmx_mem_address(vcpu, exit_qualification,
5739 vmx_instruction_info, &gva))
5740 return 1;
5741 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
5742 &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) {
5743 kvm_inject_page_fault(vcpu, &e);
5744 return 1;
5745 }
5746 }
5747
5748
5749 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5750 if (vmcs_field_readonly(field)) {
5751 nested_vmx_failValid(vcpu,
5752 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
5753 skip_emulated_instruction(vcpu);
5754 return 1;
5755 }
5756
5757 offset = vmcs_field_to_offset(field);
5758 if (offset < 0) {
5759 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5760 skip_emulated_instruction(vcpu);
5761 return 1;
5762 }
5763 p = ((char *) get_vmcs12(vcpu)) + offset;
5764
5765 switch (vmcs_field_type(field)) {
5766 case VMCS_FIELD_TYPE_U16:
5767 *(u16 *)p = field_value;
5768 break;
5769 case VMCS_FIELD_TYPE_U32:
5770 *(u32 *)p = field_value;
5771 break;
5772 case VMCS_FIELD_TYPE_U64:
5773 *(u64 *)p = field_value;
5774 break;
5775 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
5776 *(natural_width *)p = field_value;
5777 break;
5778 default:
5779 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5780 skip_emulated_instruction(vcpu);
5781 return 1;
5782 }
5783
5784 nested_vmx_succeed(vcpu);
5785 skip_emulated_instruction(vcpu);
5786 return 1;
5787}
5788
63846663
NHE
5789/* Emulate the VMPTRLD instruction */
5790static int handle_vmptrld(struct kvm_vcpu *vcpu)
5791{
5792 struct vcpu_vmx *vmx = to_vmx(vcpu);
5793 gva_t gva;
5794 gpa_t vmptr;
5795 struct x86_exception e;
5796
5797 if (!nested_vmx_check_permission(vcpu))
5798 return 1;
5799
5800 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
5801 vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
5802 return 1;
5803
5804 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
5805 sizeof(vmptr), &e)) {
5806 kvm_inject_page_fault(vcpu, &e);
5807 return 1;
5808 }
5809
5810 if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
5811 nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
5812 skip_emulated_instruction(vcpu);
5813 return 1;
5814 }
5815
5816 if (vmx->nested.current_vmptr != vmptr) {
5817 struct vmcs12 *new_vmcs12;
5818 struct page *page;
5819 page = nested_get_page(vcpu, vmptr);
5820 if (page == NULL) {
5821 nested_vmx_failInvalid(vcpu);
5822 skip_emulated_instruction(vcpu);
5823 return 1;
5824 }
5825 new_vmcs12 = kmap(page);
5826 if (new_vmcs12->revision_id != VMCS12_REVISION) {
5827 kunmap(page);
5828 nested_release_page_clean(page);
5829 nested_vmx_failValid(vcpu,
5830 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5831 skip_emulated_instruction(vcpu);
5832 return 1;
5833 }
5834 if (vmx->nested.current_vmptr != -1ull) {
5835 kunmap(vmx->nested.current_vmcs12_page);
5836 nested_release_page(vmx->nested.current_vmcs12_page);
5837 }
5838
5839 vmx->nested.current_vmptr = vmptr;
5840 vmx->nested.current_vmcs12 = new_vmcs12;
5841 vmx->nested.current_vmcs12_page = page;
5842 }
5843
5844 nested_vmx_succeed(vcpu);
5845 skip_emulated_instruction(vcpu);
5846 return 1;
5847}
5848
6a4d7550
NHE
5849/* Emulate the VMPTRST instruction */
5850static int handle_vmptrst(struct kvm_vcpu *vcpu)
5851{
5852 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5853 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5854 gva_t vmcs_gva;
5855 struct x86_exception e;
5856
5857 if (!nested_vmx_check_permission(vcpu))
5858 return 1;
5859
5860 if (get_vmx_mem_address(vcpu, exit_qualification,
5861 vmx_instruction_info, &vmcs_gva))
5862 return 1;
5863 /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
5864 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
5865 (void *)&to_vmx(vcpu)->nested.current_vmptr,
5866 sizeof(u64), &e)) {
5867 kvm_inject_page_fault(vcpu, &e);
5868 return 1;
5869 }
5870 nested_vmx_succeed(vcpu);
5871 skip_emulated_instruction(vcpu);
5872 return 1;
5873}
5874
6aa8b732
AK
5875/*
5876 * The exit handlers return 1 if the exit was handled fully and guest execution
5877 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
5878 * to be done to userspace and return 0.
5879 */
772e0318 5880static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6aa8b732
AK
5881 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
5882 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
988ad74f 5883 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
f08864b4 5884 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
6aa8b732 5885 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
6aa8b732
AK
5886 [EXIT_REASON_CR_ACCESS] = handle_cr,
5887 [EXIT_REASON_DR_ACCESS] = handle_dr,
5888 [EXIT_REASON_CPUID] = handle_cpuid,
5889 [EXIT_REASON_MSR_READ] = handle_rdmsr,
5890 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
5891 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
5892 [EXIT_REASON_HLT] = handle_halt,
ec25d5e6 5893 [EXIT_REASON_INVD] = handle_invd,
a7052897 5894 [EXIT_REASON_INVLPG] = handle_invlpg,
fee84b07 5895 [EXIT_REASON_RDPMC] = handle_rdpmc,
c21415e8 5896 [EXIT_REASON_VMCALL] = handle_vmcall,
27d6c865 5897 [EXIT_REASON_VMCLEAR] = handle_vmclear,
cd232ad0 5898 [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
63846663 5899 [EXIT_REASON_VMPTRLD] = handle_vmptrld,
6a4d7550 5900 [EXIT_REASON_VMPTRST] = handle_vmptrst,
49f705c5 5901 [EXIT_REASON_VMREAD] = handle_vmread,
cd232ad0 5902 [EXIT_REASON_VMRESUME] = handle_vmresume,
49f705c5 5903 [EXIT_REASON_VMWRITE] = handle_vmwrite,
ec378aee
NHE
5904 [EXIT_REASON_VMOFF] = handle_vmoff,
5905 [EXIT_REASON_VMON] = handle_vmon,
f78e0e2e
SY
5906 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
5907 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
83d4c286 5908 [EXIT_REASON_APIC_WRITE] = handle_apic_write,
c7c9c56c 5909 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
e5edaa01 5910 [EXIT_REASON_WBINVD] = handle_wbinvd,
2acf923e 5911 [EXIT_REASON_XSETBV] = handle_xsetbv,
37817f29 5912 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
a0861c02 5913 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
68f89400
MT
5914 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
5915 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
4b8d54f9 5916 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
59708670
SY
5917 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
5918 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
6aa8b732
AK
5919};
5920
5921static const int kvm_vmx_max_exit_handlers =
50a3485c 5922 ARRAY_SIZE(kvm_vmx_exit_handlers);
6aa8b732 5923
908a7bdd
JK
5924static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
5925 struct vmcs12 *vmcs12)
5926{
5927 unsigned long exit_qualification;
5928 gpa_t bitmap, last_bitmap;
5929 unsigned int port;
5930 int size;
5931 u8 b;
5932
5933 if (nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING))
5934 return 1;
5935
5936 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5937 return 0;
5938
5939 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5940
5941 port = exit_qualification >> 16;
5942 size = (exit_qualification & 7) + 1;
5943
5944 last_bitmap = (gpa_t)-1;
5945 b = -1;
5946
5947 while (size > 0) {
5948 if (port < 0x8000)
5949 bitmap = vmcs12->io_bitmap_a;
5950 else if (port < 0x10000)
5951 bitmap = vmcs12->io_bitmap_b;
5952 else
5953 return 1;
5954 bitmap += (port & 0x7fff) / 8;
5955
5956 if (last_bitmap != bitmap)
5957 if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1))
5958 return 1;
5959 if (b & (1 << (port & 7)))
5960 return 1;
5961
5962 port++;
5963 size--;
5964 last_bitmap = bitmap;
5965 }
5966
5967 return 0;
5968}
5969
644d711a
NHE
5970/*
5971 * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
5972 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5973 * disinterest in the current event (read or write a specific MSR) by using an
5974 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5975 */
5976static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5977 struct vmcs12 *vmcs12, u32 exit_reason)
5978{
5979 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
5980 gpa_t bitmap;
5981
cbd29cb6 5982 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
644d711a
NHE
5983 return 1;
5984
5985 /*
5986 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5987 * for the four combinations of read/write and low/high MSR numbers.
5988 * First we need to figure out which of the four to use:
5989 */
5990 bitmap = vmcs12->msr_bitmap;
5991 if (exit_reason == EXIT_REASON_MSR_WRITE)
5992 bitmap += 2048;
5993 if (msr_index >= 0xc0000000) {
5994 msr_index -= 0xc0000000;
5995 bitmap += 1024;
5996 }
5997
5998 /* Then read the msr_index'th bit from this bitmap: */
5999 if (msr_index < 1024*8) {
6000 unsigned char b;
bd31a7f5
JK
6001 if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1))
6002 return 1;
644d711a
NHE
6003 return 1 & (b >> (msr_index & 7));
6004 } else
6005 return 1; /* let L1 handle the wrong parameter */
6006}
6007
6008/*
6009 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
6010 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
6011 * intercept (via guest_host_mask etc.) the current event.
6012 */
6013static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
6014 struct vmcs12 *vmcs12)
6015{
6016 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6017 int cr = exit_qualification & 15;
6018 int reg = (exit_qualification >> 8) & 15;
6019 unsigned long val = kvm_register_read(vcpu, reg);
6020
6021 switch ((exit_qualification >> 4) & 3) {
6022 case 0: /* mov to cr */
6023 switch (cr) {
6024 case 0:
6025 if (vmcs12->cr0_guest_host_mask &
6026 (val ^ vmcs12->cr0_read_shadow))
6027 return 1;
6028 break;
6029 case 3:
6030 if ((vmcs12->cr3_target_count >= 1 &&
6031 vmcs12->cr3_target_value0 == val) ||
6032 (vmcs12->cr3_target_count >= 2 &&
6033 vmcs12->cr3_target_value1 == val) ||
6034 (vmcs12->cr3_target_count >= 3 &&
6035 vmcs12->cr3_target_value2 == val) ||
6036 (vmcs12->cr3_target_count >= 4 &&
6037 vmcs12->cr3_target_value3 == val))
6038 return 0;
6039 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
6040 return 1;
6041 break;
6042 case 4:
6043 if (vmcs12->cr4_guest_host_mask &
6044 (vmcs12->cr4_read_shadow ^ val))
6045 return 1;
6046 break;
6047 case 8:
6048 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
6049 return 1;
6050 break;
6051 }
6052 break;
6053 case 2: /* clts */
6054 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
6055 (vmcs12->cr0_read_shadow & X86_CR0_TS))
6056 return 1;
6057 break;
6058 case 1: /* mov from cr */
6059 switch (cr) {
6060 case 3:
6061 if (vmcs12->cpu_based_vm_exec_control &
6062 CPU_BASED_CR3_STORE_EXITING)
6063 return 1;
6064 break;
6065 case 8:
6066 if (vmcs12->cpu_based_vm_exec_control &
6067 CPU_BASED_CR8_STORE_EXITING)
6068 return 1;
6069 break;
6070 }
6071 break;
6072 case 3: /* lmsw */
6073 /*
6074 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
6075 * cr0. Other attempted changes are ignored, with no exit.
6076 */
6077 if (vmcs12->cr0_guest_host_mask & 0xe &
6078 (val ^ vmcs12->cr0_read_shadow))
6079 return 1;
6080 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
6081 !(vmcs12->cr0_read_shadow & 0x1) &&
6082 (val & 0x1))
6083 return 1;
6084 break;
6085 }
6086 return 0;
6087}
6088
6089/*
6090 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
6091 * should handle it ourselves in L0 (and then continue L2). Only call this
6092 * when in is_guest_mode (L2).
6093 */
6094static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
6095{
644d711a
NHE
6096 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
6097 struct vcpu_vmx *vmx = to_vmx(vcpu);
6098 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
957c897e 6099 u32 exit_reason = vmx->exit_reason;
644d711a
NHE
6100
6101 if (vmx->nested.nested_run_pending)
6102 return 0;
6103
6104 if (unlikely(vmx->fail)) {
bd80158a
JK
6105 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
6106 vmcs_read32(VM_INSTRUCTION_ERROR));
644d711a
NHE
6107 return 1;
6108 }
6109
6110 switch (exit_reason) {
6111 case EXIT_REASON_EXCEPTION_NMI:
6112 if (!is_exception(intr_info))
6113 return 0;
6114 else if (is_page_fault(intr_info))
6115 return enable_ept;
6116 return vmcs12->exception_bitmap &
6117 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
6118 case EXIT_REASON_EXTERNAL_INTERRUPT:
6119 return 0;
6120 case EXIT_REASON_TRIPLE_FAULT:
6121 return 1;
6122 case EXIT_REASON_PENDING_INTERRUPT:
6123 case EXIT_REASON_NMI_WINDOW:
6124 /*
6125 * prepare_vmcs02() set the CPU_BASED_VIRTUAL_INTR_PENDING bit
6126 * (aka Interrupt Window Exiting) only when L1 turned it on,
6127 * so if we got a PENDING_INTERRUPT exit, this must be for L1.
6128 * Same for NMI Window Exiting.
6129 */
6130 return 1;
6131 case EXIT_REASON_TASK_SWITCH:
6132 return 1;
6133 case EXIT_REASON_CPUID:
6134 return 1;
6135 case EXIT_REASON_HLT:
6136 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
6137 case EXIT_REASON_INVD:
6138 return 1;
6139 case EXIT_REASON_INVLPG:
6140 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6141 case EXIT_REASON_RDPMC:
6142 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
6143 case EXIT_REASON_RDTSC:
6144 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
6145 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
6146 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
6147 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
6148 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
6149 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
6150 /*
6151 * VMX instructions trap unconditionally. This allows L1 to
6152 * emulate them for its L2 guest, i.e., allows 3-level nesting!
6153 */
6154 return 1;
6155 case EXIT_REASON_CR_ACCESS:
6156 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
6157 case EXIT_REASON_DR_ACCESS:
6158 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
6159 case EXIT_REASON_IO_INSTRUCTION:
908a7bdd 6160 return nested_vmx_exit_handled_io(vcpu, vmcs12);
644d711a
NHE
6161 case EXIT_REASON_MSR_READ:
6162 case EXIT_REASON_MSR_WRITE:
6163 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
6164 case EXIT_REASON_INVALID_STATE:
6165 return 1;
6166 case EXIT_REASON_MWAIT_INSTRUCTION:
6167 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
6168 case EXIT_REASON_MONITOR_INSTRUCTION:
6169 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
6170 case EXIT_REASON_PAUSE_INSTRUCTION:
6171 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6172 nested_cpu_has2(vmcs12,
6173 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
6174 case EXIT_REASON_MCE_DURING_VMENTRY:
6175 return 0;
6176 case EXIT_REASON_TPR_BELOW_THRESHOLD:
6177 return 1;
6178 case EXIT_REASON_APIC_ACCESS:
6179 return nested_cpu_has2(vmcs12,
6180 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
6181 case EXIT_REASON_EPT_VIOLATION:
6182 case EXIT_REASON_EPT_MISCONFIG:
6183 return 0;
6184 case EXIT_REASON_WBINVD:
6185 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6186 case EXIT_REASON_XSETBV:
6187 return 1;
6188 default:
6189 return 1;
6190 }
6191}
6192
586f9607
AK
6193static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
6194{
6195 *info1 = vmcs_readl(EXIT_QUALIFICATION);
6196 *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
6197}
6198
6aa8b732
AK
6199/*
6200 * The guest has exited. See if we can fix it or if we need userspace
6201 * assistance.
6202 */
851ba692 6203static int vmx_handle_exit(struct kvm_vcpu *vcpu)
6aa8b732 6204{
29bd8a78 6205 struct vcpu_vmx *vmx = to_vmx(vcpu);
a0861c02 6206 u32 exit_reason = vmx->exit_reason;
1155f76a 6207 u32 vectoring_info = vmx->idt_vectoring_info;
29bd8a78 6208
80ced186 6209 /* If guest state is invalid, start emulating */
14168786 6210 if (vmx->emulation_required)
80ced186 6211 return handle_invalid_guest_state(vcpu);
1d5a4d9b 6212
b6f1250e
NHE
6213 /*
6214 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
6215 * we did not inject a still-pending event to L1 now because of
6216 * nested_run_pending, we need to re-enable this bit.
6217 */
6218 if (vmx->nested.nested_run_pending)
6219 kvm_make_request(KVM_REQ_EVENT, vcpu);
6220
509c75ea
NHE
6221 if (!is_guest_mode(vcpu) && (exit_reason == EXIT_REASON_VMLAUNCH ||
6222 exit_reason == EXIT_REASON_VMRESUME))
644d711a
NHE
6223 vmx->nested.nested_run_pending = 1;
6224 else
6225 vmx->nested.nested_run_pending = 0;
6226
6227 if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
6228 nested_vmx_vmexit(vcpu);
6229 return 1;
6230 }
6231
5120702e
MG
6232 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
6233 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6234 vcpu->run->fail_entry.hardware_entry_failure_reason
6235 = exit_reason;
6236 return 0;
6237 }
6238
29bd8a78 6239 if (unlikely(vmx->fail)) {
851ba692
AK
6240 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6241 vcpu->run->fail_entry.hardware_entry_failure_reason
29bd8a78
AK
6242 = vmcs_read32(VM_INSTRUCTION_ERROR);
6243 return 0;
6244 }
6aa8b732 6245
b9bf6882
XG
6246 /*
6247 * Note:
6248 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
6249 * delivery event since it indicates guest is accessing MMIO.
6250 * The vm-exit can be triggered again after return to guest that
6251 * will cause infinite loop.
6252 */
d77c26fc 6253 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
1439442c 6254 (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
60637aac 6255 exit_reason != EXIT_REASON_EPT_VIOLATION &&
b9bf6882
XG
6256 exit_reason != EXIT_REASON_TASK_SWITCH)) {
6257 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6258 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
6259 vcpu->run->internal.ndata = 2;
6260 vcpu->run->internal.data[0] = vectoring_info;
6261 vcpu->run->internal.data[1] = exit_reason;
6262 return 0;
6263 }
3b86cd99 6264
644d711a
NHE
6265 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
6266 !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
6267 get_vmcs12(vcpu), vcpu)))) {
c4282df9 6268 if (vmx_interrupt_allowed(vcpu)) {
3b86cd99 6269 vmx->soft_vnmi_blocked = 0;
3b86cd99 6270 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
4531220b 6271 vcpu->arch.nmi_pending) {
3b86cd99
JK
6272 /*
6273 * This CPU don't support us in finding the end of an
6274 * NMI-blocked window if the guest runs with IRQs
6275 * disabled. So we pull the trigger after 1 s of
6276 * futile waiting, but inform the user about this.
6277 */
6278 printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
6279 "state on VCPU %d after 1 s timeout\n",
6280 __func__, vcpu->vcpu_id);
6281 vmx->soft_vnmi_blocked = 0;
3b86cd99 6282 }
3b86cd99
JK
6283 }
6284
6aa8b732
AK
6285 if (exit_reason < kvm_vmx_max_exit_handlers
6286 && kvm_vmx_exit_handlers[exit_reason])
851ba692 6287 return kvm_vmx_exit_handlers[exit_reason](vcpu);
6aa8b732 6288 else {
851ba692
AK
6289 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
6290 vcpu->run->hw.hardware_exit_reason = exit_reason;
6aa8b732
AK
6291 }
6292 return 0;
6293}
6294
95ba8273 6295static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6e5d865c 6296{
95ba8273 6297 if (irr == -1 || tpr < irr) {
6e5d865c
YS
6298 vmcs_write32(TPR_THRESHOLD, 0);
6299 return;
6300 }
6301
95ba8273 6302 vmcs_write32(TPR_THRESHOLD, irr);
6e5d865c
YS
6303}
6304
8d14695f
YZ
6305static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
6306{
6307 u32 sec_exec_control;
6308
6309 /*
6310 * There is not point to enable virtualize x2apic without enable
6311 * apicv
6312 */
c7c9c56c
YZ
6313 if (!cpu_has_vmx_virtualize_x2apic_mode() ||
6314 !vmx_vm_has_apicv(vcpu->kvm))
8d14695f
YZ
6315 return;
6316
6317 if (!vm_need_tpr_shadow(vcpu->kvm))
6318 return;
6319
6320 sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6321
6322 if (set) {
6323 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6324 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6325 } else {
6326 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6327 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6328 }
6329 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
6330
6331 vmx_set_msr_bitmap(vcpu);
6332}
6333
c7c9c56c
YZ
6334static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
6335{
6336 u16 status;
6337 u8 old;
6338
6339 if (!vmx_vm_has_apicv(kvm))
6340 return;
6341
6342 if (isr == -1)
6343 isr = 0;
6344
6345 status = vmcs_read16(GUEST_INTR_STATUS);
6346 old = status >> 8;
6347 if (isr != old) {
6348 status &= 0xff;
6349 status |= isr << 8;
6350 vmcs_write16(GUEST_INTR_STATUS, status);
6351 }
6352}
6353
6354static void vmx_set_rvi(int vector)
6355{
6356 u16 status;
6357 u8 old;
6358
6359 status = vmcs_read16(GUEST_INTR_STATUS);
6360 old = (u8)status & 0xff;
6361 if ((u8)vector != old) {
6362 status &= ~0xff;
6363 status |= (u8)vector;
6364 vmcs_write16(GUEST_INTR_STATUS, status);
6365 }
6366}
6367
6368static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
6369{
6370 if (max_irr == -1)
6371 return;
6372
6373 vmx_set_rvi(max_irr);
6374}
6375
6376static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
6377{
6378 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
6379 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
6380 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
6381 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
6382}
6383
51aa01d1 6384static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
cf393f75 6385{
00eba012
AK
6386 u32 exit_intr_info;
6387
6388 if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
6389 || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
6390 return;
6391
c5ca8e57 6392 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
00eba012 6393 exit_intr_info = vmx->exit_intr_info;
a0861c02
AK
6394
6395 /* Handle machine checks before interrupts are enabled */
00eba012 6396 if (is_machine_check(exit_intr_info))
a0861c02
AK
6397 kvm_machine_check();
6398
20f65983 6399 /* We need to handle NMIs before interrupts are enabled */
00eba012 6400 if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
ff9d07a0
ZY
6401 (exit_intr_info & INTR_INFO_VALID_MASK)) {
6402 kvm_before_handle_nmi(&vmx->vcpu);
20f65983 6403 asm("int $2");
ff9d07a0
ZY
6404 kvm_after_handle_nmi(&vmx->vcpu);
6405 }
51aa01d1 6406}
20f65983 6407
51aa01d1
AK
6408static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
6409{
c5ca8e57 6410 u32 exit_intr_info;
51aa01d1
AK
6411 bool unblock_nmi;
6412 u8 vector;
6413 bool idtv_info_valid;
6414
6415 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
20f65983 6416
cf393f75 6417 if (cpu_has_virtual_nmis()) {
9d58b931
AK
6418 if (vmx->nmi_known_unmasked)
6419 return;
c5ca8e57
AK
6420 /*
6421 * Can't use vmx->exit_intr_info since we're not sure what
6422 * the exit reason is.
6423 */
6424 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
cf393f75
AK
6425 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
6426 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
6427 /*
7b4a25cb 6428 * SDM 3: 27.7.1.2 (September 2008)
cf393f75
AK
6429 * Re-set bit "block by NMI" before VM entry if vmexit caused by
6430 * a guest IRET fault.
7b4a25cb
GN
6431 * SDM 3: 23.2.2 (September 2008)
6432 * Bit 12 is undefined in any of the following cases:
6433 * If the VM exit sets the valid bit in the IDT-vectoring
6434 * information field.
6435 * If the VM exit is due to a double fault.
cf393f75 6436 */
7b4a25cb
GN
6437 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
6438 vector != DF_VECTOR && !idtv_info_valid)
cf393f75
AK
6439 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6440 GUEST_INTR_STATE_NMI);
9d58b931
AK
6441 else
6442 vmx->nmi_known_unmasked =
6443 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
6444 & GUEST_INTR_STATE_NMI);
3b86cd99
JK
6445 } else if (unlikely(vmx->soft_vnmi_blocked))
6446 vmx->vnmi_blocked_time +=
6447 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
51aa01d1
AK
6448}
6449
3ab66e8a 6450static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
83422e17
AK
6451 u32 idt_vectoring_info,
6452 int instr_len_field,
6453 int error_code_field)
51aa01d1 6454{
51aa01d1
AK
6455 u8 vector;
6456 int type;
6457 bool idtv_info_valid;
6458
6459 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
668f612f 6460
3ab66e8a
JK
6461 vcpu->arch.nmi_injected = false;
6462 kvm_clear_exception_queue(vcpu);
6463 kvm_clear_interrupt_queue(vcpu);
37b96e98
GN
6464
6465 if (!idtv_info_valid)
6466 return;
6467
3ab66e8a 6468 kvm_make_request(KVM_REQ_EVENT, vcpu);
3842d135 6469
668f612f
AK
6470 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
6471 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
37b96e98 6472
64a7ec06 6473 switch (type) {
37b96e98 6474 case INTR_TYPE_NMI_INTR:
3ab66e8a 6475 vcpu->arch.nmi_injected = true;
668f612f 6476 /*
7b4a25cb 6477 * SDM 3: 27.7.1.2 (September 2008)
37b96e98
GN
6478 * Clear bit "block by NMI" before VM entry if a NMI
6479 * delivery faulted.
668f612f 6480 */
3ab66e8a 6481 vmx_set_nmi_mask(vcpu, false);
37b96e98 6482 break;
37b96e98 6483 case INTR_TYPE_SOFT_EXCEPTION:
3ab66e8a 6484 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
66fd3f7f
GN
6485 /* fall through */
6486 case INTR_TYPE_HARD_EXCEPTION:
35920a35 6487 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
83422e17 6488 u32 err = vmcs_read32(error_code_field);
3ab66e8a 6489 kvm_queue_exception_e(vcpu, vector, err);
35920a35 6490 } else
3ab66e8a 6491 kvm_queue_exception(vcpu, vector);
37b96e98 6492 break;
66fd3f7f 6493 case INTR_TYPE_SOFT_INTR:
3ab66e8a 6494 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
66fd3f7f 6495 /* fall through */
37b96e98 6496 case INTR_TYPE_EXT_INTR:
3ab66e8a 6497 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
37b96e98
GN
6498 break;
6499 default:
6500 break;
f7d9238f 6501 }
cf393f75
AK
6502}
6503
83422e17
AK
6504static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
6505{
66c78ae4
NHE
6506 if (is_guest_mode(&vmx->vcpu))
6507 return;
3ab66e8a 6508 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
83422e17
AK
6509 VM_EXIT_INSTRUCTION_LEN,
6510 IDT_VECTORING_ERROR_CODE);
6511}
6512
b463a6f7
AK
6513static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
6514{
66c78ae4
NHE
6515 if (is_guest_mode(vcpu))
6516 return;
3ab66e8a 6517 __vmx_complete_interrupts(vcpu,
b463a6f7
AK
6518 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
6519 VM_ENTRY_INSTRUCTION_LEN,
6520 VM_ENTRY_EXCEPTION_ERROR_CODE);
6521
6522 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
6523}
6524
d7cd9796
GN
6525static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
6526{
6527 int i, nr_msrs;
6528 struct perf_guest_switch_msr *msrs;
6529
6530 msrs = perf_guest_get_msrs(&nr_msrs);
6531
6532 if (!msrs)
6533 return;
6534
6535 for (i = 0; i < nr_msrs; i++)
6536 if (msrs[i].host == msrs[i].guest)
6537 clear_atomic_switch_msr(vmx, msrs[i].msr);
6538 else
6539 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
6540 msrs[i].host);
6541}
6542
a3b5ba49 6543static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6aa8b732 6544{
a2fa3e9f 6545 struct vcpu_vmx *vmx = to_vmx(vcpu);
2a7921b7 6546 unsigned long debugctlmsr;
104f226b 6547
66c78ae4
NHE
6548 if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
6549 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6550 if (vmcs12->idt_vectoring_info_field &
6551 VECTORING_INFO_VALID_MASK) {
6552 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
6553 vmcs12->idt_vectoring_info_field);
6554 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
6555 vmcs12->vm_exit_instruction_len);
6556 if (vmcs12->idt_vectoring_info_field &
6557 VECTORING_INFO_DELIVER_CODE_MASK)
6558 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
6559 vmcs12->idt_vectoring_error_code);
6560 }
6561 }
6562
104f226b
AK
6563 /* Record the guest's net vcpu time for enforced NMI injections. */
6564 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
6565 vmx->entry_time = ktime_get();
6566
6567 /* Don't enter VMX if guest state is invalid, let the exit handler
6568 start emulation until we arrive back to a valid state */
14168786 6569 if (vmx->emulation_required)
104f226b
AK
6570 return;
6571
6572 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
6573 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
6574 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
6575 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
6576
6577 /* When single-stepping over STI and MOV SS, we must clear the
6578 * corresponding interruptibility bits in the guest state. Otherwise
6579 * vmentry fails as it then expects bit 14 (BS) in pending debug
6580 * exceptions being set, but that's not correct for the guest debugging
6581 * case. */
6582 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6583 vmx_set_interrupt_shadow(vcpu, 0);
6584
d7cd9796 6585 atomic_switch_perf_msrs(vmx);
2a7921b7 6586 debugctlmsr = get_debugctlmsr();
d7cd9796 6587
d462b819 6588 vmx->__launched = vmx->loaded_vmcs->launched;
104f226b 6589 asm(
6aa8b732 6590 /* Store host registers */
b188c81f
AK
6591 "push %%" _ASM_DX "; push %%" _ASM_BP ";"
6592 "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
6593 "push %%" _ASM_CX " \n\t"
6594 "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
313dbd49 6595 "je 1f \n\t"
b188c81f 6596 "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
4ecac3fd 6597 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
313dbd49 6598 "1: \n\t"
d3edefc0 6599 /* Reload cr2 if changed */
b188c81f
AK
6600 "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
6601 "mov %%cr2, %%" _ASM_DX " \n\t"
6602 "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
d3edefc0 6603 "je 2f \n\t"
b188c81f 6604 "mov %%" _ASM_AX", %%cr2 \n\t"
d3edefc0 6605 "2: \n\t"
6aa8b732 6606 /* Check if vmlaunch of vmresume is needed */
e08aa78a 6607 "cmpl $0, %c[launched](%0) \n\t"
6aa8b732 6608 /* Load guest registers. Don't clobber flags. */
b188c81f
AK
6609 "mov %c[rax](%0), %%" _ASM_AX " \n\t"
6610 "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
6611 "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
6612 "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
6613 "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
6614 "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
05b3e0c2 6615#ifdef CONFIG_X86_64
e08aa78a
AK
6616 "mov %c[r8](%0), %%r8 \n\t"
6617 "mov %c[r9](%0), %%r9 \n\t"
6618 "mov %c[r10](%0), %%r10 \n\t"
6619 "mov %c[r11](%0), %%r11 \n\t"
6620 "mov %c[r12](%0), %%r12 \n\t"
6621 "mov %c[r13](%0), %%r13 \n\t"
6622 "mov %c[r14](%0), %%r14 \n\t"
6623 "mov %c[r15](%0), %%r15 \n\t"
6aa8b732 6624#endif
b188c81f 6625 "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
c801949d 6626
6aa8b732 6627 /* Enter guest mode */
83287ea4 6628 "jne 1f \n\t"
4ecac3fd 6629 __ex(ASM_VMX_VMLAUNCH) "\n\t"
83287ea4
AK
6630 "jmp 2f \n\t"
6631 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
6632 "2: "
6aa8b732 6633 /* Save guest registers, load host registers, keep flags */
b188c81f 6634 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
40712fae 6635 "pop %0 \n\t"
b188c81f
AK
6636 "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
6637 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
6638 __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
6639 "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
6640 "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
6641 "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
6642 "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
05b3e0c2 6643#ifdef CONFIG_X86_64
e08aa78a
AK
6644 "mov %%r8, %c[r8](%0) \n\t"
6645 "mov %%r9, %c[r9](%0) \n\t"
6646 "mov %%r10, %c[r10](%0) \n\t"
6647 "mov %%r11, %c[r11](%0) \n\t"
6648 "mov %%r12, %c[r12](%0) \n\t"
6649 "mov %%r13, %c[r13](%0) \n\t"
6650 "mov %%r14, %c[r14](%0) \n\t"
6651 "mov %%r15, %c[r15](%0) \n\t"
6aa8b732 6652#endif
b188c81f
AK
6653 "mov %%cr2, %%" _ASM_AX " \n\t"
6654 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
c801949d 6655
b188c81f 6656 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
e08aa78a 6657 "setbe %c[fail](%0) \n\t"
83287ea4
AK
6658 ".pushsection .rodata \n\t"
6659 ".global vmx_return \n\t"
6660 "vmx_return: " _ASM_PTR " 2b \n\t"
6661 ".popsection"
e08aa78a 6662 : : "c"(vmx), "d"((unsigned long)HOST_RSP),
d462b819 6663 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
e08aa78a 6664 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
313dbd49 6665 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
ad312c7c
ZX
6666 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
6667 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
6668 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
6669 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
6670 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
6671 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
6672 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
05b3e0c2 6673#ifdef CONFIG_X86_64
ad312c7c
ZX
6674 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
6675 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
6676 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
6677 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
6678 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
6679 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
6680 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
6681 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
6aa8b732 6682#endif
40712fae
AK
6683 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
6684 [wordsize]"i"(sizeof(ulong))
c2036300
LV
6685 : "cc", "memory"
6686#ifdef CONFIG_X86_64
b188c81f 6687 , "rax", "rbx", "rdi", "rsi"
c2036300 6688 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
b188c81f
AK
6689#else
6690 , "eax", "ebx", "edi", "esi"
c2036300
LV
6691#endif
6692 );
6aa8b732 6693
2a7921b7
GN
6694 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
6695 if (debugctlmsr)
6696 update_debugctlmsr(debugctlmsr);
6697
aa67f609
AK
6698#ifndef CONFIG_X86_64
6699 /*
6700 * The sysexit path does not restore ds/es, so we must set them to
6701 * a reasonable value ourselves.
6702 *
6703 * We can't defer this to vmx_load_host_state() since that function
6704 * may be executed in interrupt context, which saves and restore segments
6705 * around it, nullifying its effect.
6706 */
6707 loadsegment(ds, __USER_DS);
6708 loadsegment(es, __USER_DS);
6709#endif
6710
6de4f3ad 6711 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
6de12732 6712 | (1 << VCPU_EXREG_RFLAGS)
69c73028 6713 | (1 << VCPU_EXREG_CPL)
aff48baa 6714 | (1 << VCPU_EXREG_PDPTR)
2fb92db1 6715 | (1 << VCPU_EXREG_SEGMENTS)
aff48baa 6716 | (1 << VCPU_EXREG_CR3));
5fdbf976
MT
6717 vcpu->arch.regs_dirty = 0;
6718
1155f76a
AK
6719 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
6720
66c78ae4
NHE
6721 if (is_guest_mode(vcpu)) {
6722 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6723 vmcs12->idt_vectoring_info_field = vmx->idt_vectoring_info;
6724 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
6725 vmcs12->idt_vectoring_error_code =
6726 vmcs_read32(IDT_VECTORING_ERROR_CODE);
6727 vmcs12->vm_exit_instruction_len =
6728 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
6729 }
6730 }
6731
d462b819 6732 vmx->loaded_vmcs->launched = 1;
1b6269db 6733
51aa01d1 6734 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
1e2b1dd7 6735 trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
51aa01d1
AK
6736
6737 vmx_complete_atomic_exit(vmx);
6738 vmx_recover_nmi_blocking(vmx);
cf393f75 6739 vmx_complete_interrupts(vmx);
6aa8b732
AK
6740}
6741
6aa8b732
AK
6742static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
6743{
fb3f0f51
RR
6744 struct vcpu_vmx *vmx = to_vmx(vcpu);
6745
cdbecfc3 6746 free_vpid(vmx);
ec378aee 6747 free_nested(vmx);
d462b819 6748 free_loaded_vmcs(vmx->loaded_vmcs);
fb3f0f51
RR
6749 kfree(vmx->guest_msrs);
6750 kvm_vcpu_uninit(vcpu);
a4770347 6751 kmem_cache_free(kvm_vcpu_cache, vmx);
6aa8b732
AK
6752}
6753
fb3f0f51 6754static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
6aa8b732 6755{
fb3f0f51 6756 int err;
c16f862d 6757 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
15ad7146 6758 int cpu;
6aa8b732 6759
a2fa3e9f 6760 if (!vmx)
fb3f0f51
RR
6761 return ERR_PTR(-ENOMEM);
6762
2384d2b3
SY
6763 allocate_vpid(vmx);
6764
fb3f0f51
RR
6765 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
6766 if (err)
6767 goto free_vcpu;
965b58a5 6768
a2fa3e9f 6769 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
be6d05cf 6770 err = -ENOMEM;
fb3f0f51 6771 if (!vmx->guest_msrs) {
fb3f0f51
RR
6772 goto uninit_vcpu;
6773 }
965b58a5 6774
d462b819
NHE
6775 vmx->loaded_vmcs = &vmx->vmcs01;
6776 vmx->loaded_vmcs->vmcs = alloc_vmcs();
6777 if (!vmx->loaded_vmcs->vmcs)
fb3f0f51 6778 goto free_msrs;
d462b819
NHE
6779 if (!vmm_exclusive)
6780 kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
6781 loaded_vmcs_init(vmx->loaded_vmcs);
6782 if (!vmm_exclusive)
6783 kvm_cpu_vmxoff();
a2fa3e9f 6784
15ad7146
AK
6785 cpu = get_cpu();
6786 vmx_vcpu_load(&vmx->vcpu, cpu);
e48672fa 6787 vmx->vcpu.cpu = cpu;
8b9cf98c 6788 err = vmx_vcpu_setup(vmx);
fb3f0f51 6789 vmx_vcpu_put(&vmx->vcpu);
15ad7146 6790 put_cpu();
fb3f0f51
RR
6791 if (err)
6792 goto free_vmcs;
5e4a0b3c 6793 if (vm_need_virtualize_apic_accesses(kvm))
be6d05cf
JK
6794 err = alloc_apic_access_page(kvm);
6795 if (err)
5e4a0b3c 6796 goto free_vmcs;
fb3f0f51 6797
b927a3ce
SY
6798 if (enable_ept) {
6799 if (!kvm->arch.ept_identity_map_addr)
6800 kvm->arch.ept_identity_map_addr =
6801 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
93ea5388 6802 err = -ENOMEM;
b7ebfb05
SY
6803 if (alloc_identity_pagetable(kvm) != 0)
6804 goto free_vmcs;
93ea5388
GN
6805 if (!init_rmode_identity_map(kvm))
6806 goto free_vmcs;
b927a3ce 6807 }
b7ebfb05 6808
a9d30f33
NHE
6809 vmx->nested.current_vmptr = -1ull;
6810 vmx->nested.current_vmcs12 = NULL;
6811
fb3f0f51
RR
6812 return &vmx->vcpu;
6813
6814free_vmcs:
5f3fbc34 6815 free_loaded_vmcs(vmx->loaded_vmcs);
fb3f0f51 6816free_msrs:
fb3f0f51
RR
6817 kfree(vmx->guest_msrs);
6818uninit_vcpu:
6819 kvm_vcpu_uninit(&vmx->vcpu);
6820free_vcpu:
cdbecfc3 6821 free_vpid(vmx);
a4770347 6822 kmem_cache_free(kvm_vcpu_cache, vmx);
fb3f0f51 6823 return ERR_PTR(err);
6aa8b732
AK
6824}
6825
002c7f7c
YS
6826static void __init vmx_check_processor_compat(void *rtn)
6827{
6828 struct vmcs_config vmcs_conf;
6829
6830 *(int *)rtn = 0;
6831 if (setup_vmcs_config(&vmcs_conf) < 0)
6832 *(int *)rtn = -EIO;
6833 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
6834 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
6835 smp_processor_id());
6836 *(int *)rtn = -EIO;
6837 }
6838}
6839
67253af5
SY
6840static int get_ept_level(void)
6841{
6842 return VMX_EPT_DEFAULT_GAW + 1;
6843}
6844
4b12f0de 6845static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
64d4d521 6846{
4b12f0de
SY
6847 u64 ret;
6848
522c68c4
SY
6849 /* For VT-d and EPT combination
6850 * 1. MMIO: always map as UC
6851 * 2. EPT with VT-d:
6852 * a. VT-d without snooping control feature: can't guarantee the
6853 * result, try to trust guest.
6854 * b. VT-d with snooping control feature: snooping control feature of
6855 * VT-d engine can guarantee the cache correctness. Just set it
6856 * to WB to keep consistent with host. So the same as item 3.
a19a6d11 6857 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
522c68c4
SY
6858 * consistent with host MTRR
6859 */
4b12f0de
SY
6860 if (is_mmio)
6861 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
522c68c4
SY
6862 else if (vcpu->kvm->arch.iommu_domain &&
6863 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
6864 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
6865 VMX_EPT_MT_EPTE_SHIFT;
4b12f0de 6866 else
522c68c4 6867 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
a19a6d11 6868 | VMX_EPT_IPAT_BIT;
4b12f0de
SY
6869
6870 return ret;
64d4d521
SY
6871}
6872
17cc3935 6873static int vmx_get_lpage_level(void)
344f414f 6874{
878403b7
SY
6875 if (enable_ept && !cpu_has_vmx_ept_1g_page())
6876 return PT_DIRECTORY_LEVEL;
6877 else
6878 /* For shadow and EPT supported 1GB page */
6879 return PT_PDPE_LEVEL;
344f414f
JR
6880}
6881
0e851880
SY
6882static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
6883{
4e47c7a6
SY
6884 struct kvm_cpuid_entry2 *best;
6885 struct vcpu_vmx *vmx = to_vmx(vcpu);
6886 u32 exec_control;
6887
6888 vmx->rdtscp_enabled = false;
6889 if (vmx_rdtscp_supported()) {
6890 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6891 if (exec_control & SECONDARY_EXEC_RDTSCP) {
6892 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
6893 if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
6894 vmx->rdtscp_enabled = true;
6895 else {
6896 exec_control &= ~SECONDARY_EXEC_RDTSCP;
6897 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
6898 exec_control);
6899 }
6900 }
6901 }
ad756a16 6902
ad756a16
MJ
6903 /* Exposing INVPCID only when PCID is exposed */
6904 best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
6905 if (vmx_invpcid_supported() &&
4f977045 6906 best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
ad756a16 6907 guest_cpuid_has_pcid(vcpu)) {
29282fde 6908 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
ad756a16
MJ
6909 exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
6910 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
6911 exec_control);
6912 } else {
29282fde
TI
6913 if (cpu_has_secondary_exec_ctrls()) {
6914 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6915 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
6916 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
6917 exec_control);
6918 }
ad756a16 6919 if (best)
4f977045 6920 best->ebx &= ~bit(X86_FEATURE_INVPCID);
ad756a16 6921 }
0e851880
SY
6922}
6923
d4330ef2
JR
6924static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
6925{
7b8050f5
NHE
6926 if (func == 1 && nested)
6927 entry->ecx |= bit(X86_FEATURE_VMX);
d4330ef2
JR
6928}
6929
fe3ef05c
NHE
6930/*
6931 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
6932 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
6933 * with L0's requirements for its guest (a.k.a. vmsc01), so we can run the L2
6934 * guest in a way that will both be appropriate to L1's requests, and our
6935 * needs. In addition to modifying the active vmcs (which is vmcs02), this
6936 * function also has additional necessary side-effects, like setting various
6937 * vcpu->arch fields.
6938 */
6939static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
6940{
6941 struct vcpu_vmx *vmx = to_vmx(vcpu);
6942 u32 exec_control;
6943
6944 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
6945 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
6946 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
6947 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
6948 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
6949 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
6950 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
6951 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
6952 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
6953 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
6954 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
6955 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
6956 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
6957 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
6958 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
6959 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
6960 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
6961 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
6962 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
6963 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
6964 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
6965 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
6966 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
6967 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
6968 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
6969 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
6970 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
6971 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
6972 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
6973 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
6974 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
6975 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
6976 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
6977 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
6978 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
6979 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
6980
6981 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
6982 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
6983 vmcs12->vm_entry_intr_info_field);
6984 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
6985 vmcs12->vm_entry_exception_error_code);
6986 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
6987 vmcs12->vm_entry_instruction_len);
6988 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
6989 vmcs12->guest_interruptibility_info);
6990 vmcs_write32(GUEST_ACTIVITY_STATE, vmcs12->guest_activity_state);
6991 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
503cd0c5 6992 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
fe3ef05c
NHE
6993 vmcs_writel(GUEST_RFLAGS, vmcs12->guest_rflags);
6994 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
6995 vmcs12->guest_pending_dbg_exceptions);
6996 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
6997 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
6998
6999 vmcs_write64(VMCS_LINK_POINTER, -1ull);
7000
7001 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
7002 (vmcs_config.pin_based_exec_ctrl |
7003 vmcs12->pin_based_vm_exec_control));
7004
7005 /*
7006 * Whether page-faults are trapped is determined by a combination of
7007 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
7008 * If enable_ept, L0 doesn't care about page faults and we should
7009 * set all of these to L1's desires. However, if !enable_ept, L0 does
7010 * care about (at least some) page faults, and because it is not easy
7011 * (if at all possible?) to merge L0 and L1's desires, we simply ask
7012 * to exit on each and every L2 page fault. This is done by setting
7013 * MASK=MATCH=0 and (see below) EB.PF=1.
7014 * Note that below we don't need special code to set EB.PF beyond the
7015 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
7016 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
7017 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
7018 *
7019 * A problem with this approach (when !enable_ept) is that L1 may be
7020 * injected with more page faults than it asked for. This could have
7021 * caused problems, but in practice existing hypervisors don't care.
7022 * To fix this, we will need to emulate the PFEC checking (on the L1
7023 * page tables), using walk_addr(), when injecting PFs to L1.
7024 */
7025 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
7026 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
7027 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
7028 enable_ept ? vmcs12->page_fault_error_code_match : 0);
7029
7030 if (cpu_has_secondary_exec_ctrls()) {
7031 u32 exec_control = vmx_secondary_exec_control(vmx);
7032 if (!vmx->rdtscp_enabled)
7033 exec_control &= ~SECONDARY_EXEC_RDTSCP;
7034 /* Take the following fields only from vmcs12 */
7035 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
7036 if (nested_cpu_has(vmcs12,
7037 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
7038 exec_control |= vmcs12->secondary_vm_exec_control;
7039
7040 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
7041 /*
7042 * Translate L1 physical address to host physical
7043 * address for vmcs02. Keep the page pinned, so this
7044 * physical address remains valid. We keep a reference
7045 * to it so we can release it later.
7046 */
7047 if (vmx->nested.apic_access_page) /* shouldn't happen */
7048 nested_release_page(vmx->nested.apic_access_page);
7049 vmx->nested.apic_access_page =
7050 nested_get_page(vcpu, vmcs12->apic_access_addr);
7051 /*
7052 * If translation failed, no matter: This feature asks
7053 * to exit when accessing the given address, and if it
7054 * can never be accessed, this feature won't do
7055 * anything anyway.
7056 */
7057 if (!vmx->nested.apic_access_page)
7058 exec_control &=
7059 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
7060 else
7061 vmcs_write64(APIC_ACCESS_ADDR,
7062 page_to_phys(vmx->nested.apic_access_page));
7063 }
7064
7065 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
7066 }
7067
7068
7069 /*
7070 * Set host-state according to L0's settings (vmcs12 is irrelevant here)
7071 * Some constant fields are set here by vmx_set_constant_host_state().
7072 * Other fields are different per CPU, and will be set later when
7073 * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
7074 */
7075 vmx_set_constant_host_state();
7076
7077 /*
7078 * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
7079 * entry, but only if the current (host) sp changed from the value
7080 * we wrote last (vmx->host_rsp). This cache is no longer relevant
7081 * if we switch vmcs, and rather than hold a separate cache per vmcs,
7082 * here we just force the write to happen on entry.
7083 */
7084 vmx->host_rsp = 0;
7085
7086 exec_control = vmx_exec_control(vmx); /* L0's desires */
7087 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
7088 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
7089 exec_control &= ~CPU_BASED_TPR_SHADOW;
7090 exec_control |= vmcs12->cpu_based_vm_exec_control;
7091 /*
7092 * Merging of IO and MSR bitmaps not currently supported.
7093 * Rather, exit every time.
7094 */
7095 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
7096 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
7097 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
7098
7099 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
7100
7101 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
7102 * bitwise-or of what L1 wants to trap for L2, and what we want to
7103 * trap. Note that CR0.TS also needs updating - we do this later.
7104 */
7105 update_exception_bitmap(vcpu);
7106 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
7107 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
7108
7109 /* Note: IA32_MODE, LOAD_IA32_EFER are modified by vmx_set_efer below */
7110 vmcs_write32(VM_EXIT_CONTROLS,
7111 vmcs12->vm_exit_controls | vmcs_config.vmexit_ctrl);
7112 vmcs_write32(VM_ENTRY_CONTROLS, vmcs12->vm_entry_controls |
7113 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
7114
7115 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)
7116 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
7117 else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
7118 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
7119
7120
7121 set_cr4_guest_host_mask(vmx);
7122
27fc51b2
NHE
7123 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
7124 vmcs_write64(TSC_OFFSET,
7125 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
7126 else
7127 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
fe3ef05c
NHE
7128
7129 if (enable_vpid) {
7130 /*
7131 * Trivially support vpid by letting L2s share their parent
7132 * L1's vpid. TODO: move to a more elaborate solution, giving
7133 * each L2 its own vpid and exposing the vpid feature to L1.
7134 */
7135 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
7136 vmx_flush_tlb(vcpu);
7137 }
7138
7139 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
7140 vcpu->arch.efer = vmcs12->guest_ia32_efer;
7141 if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
7142 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
7143 else
7144 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
7145 /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
7146 vmx_set_efer(vcpu, vcpu->arch.efer);
7147
7148 /*
7149 * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
7150 * TS bit (for lazy fpu) and bits which we consider mandatory enabled.
7151 * The CR0_READ_SHADOW is what L2 should have expected to read given
7152 * the specifications by L1; It's not enough to take
7153 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
7154 * have more bits than L1 expected.
7155 */
7156 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
7157 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
7158
7159 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
7160 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
7161
7162 /* shadow page tables on either EPT or shadow page tables */
7163 kvm_set_cr3(vcpu, vmcs12->guest_cr3);
7164 kvm_mmu_reset_context(vcpu);
7165
7166 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
7167 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
7168}
7169
cd232ad0
NHE
7170/*
7171 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
7172 * for running an L2 nested guest.
7173 */
7174static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
7175{
7176 struct vmcs12 *vmcs12;
7177 struct vcpu_vmx *vmx = to_vmx(vcpu);
7178 int cpu;
7179 struct loaded_vmcs *vmcs02;
7180
7181 if (!nested_vmx_check_permission(vcpu) ||
7182 !nested_vmx_check_vmcs12(vcpu))
7183 return 1;
7184
7185 skip_emulated_instruction(vcpu);
7186 vmcs12 = get_vmcs12(vcpu);
7187
7c177938
NHE
7188 /*
7189 * The nested entry process starts with enforcing various prerequisites
7190 * on vmcs12 as required by the Intel SDM, and act appropriately when
7191 * they fail: As the SDM explains, some conditions should cause the
7192 * instruction to fail, while others will cause the instruction to seem
7193 * to succeed, but return an EXIT_REASON_INVALID_STATE.
7194 * To speed up the normal (success) code path, we should avoid checking
7195 * for misconfigurations which will anyway be caught by the processor
7196 * when using the merged vmcs02.
7197 */
7198 if (vmcs12->launch_state == launch) {
7199 nested_vmx_failValid(vcpu,
7200 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
7201 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
7202 return 1;
7203 }
7204
7205 if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
7206 !IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) {
7207 /*TODO: Also verify bits beyond physical address width are 0*/
7208 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
7209 return 1;
7210 }
7211
7212 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
7213 !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) {
7214 /*TODO: Also verify bits beyond physical address width are 0*/
7215 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
7216 return 1;
7217 }
7218
7219 if (vmcs12->vm_entry_msr_load_count > 0 ||
7220 vmcs12->vm_exit_msr_load_count > 0 ||
7221 vmcs12->vm_exit_msr_store_count > 0) {
bd80158a
JK
7222 pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n",
7223 __func__);
7c177938
NHE
7224 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
7225 return 1;
7226 }
7227
7228 if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
7229 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) ||
7230 !vmx_control_verify(vmcs12->secondary_vm_exec_control,
7231 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) ||
7232 !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
7233 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
7234 !vmx_control_verify(vmcs12->vm_exit_controls,
7235 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) ||
7236 !vmx_control_verify(vmcs12->vm_entry_controls,
7237 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high))
7238 {
7239 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
7240 return 1;
7241 }
7242
7243 if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
7244 ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
7245 nested_vmx_failValid(vcpu,
7246 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
7247 return 1;
7248 }
7249
7250 if (((vmcs12->guest_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
7251 ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
7252 nested_vmx_entry_failure(vcpu, vmcs12,
7253 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
7254 return 1;
7255 }
7256 if (vmcs12->vmcs_link_pointer != -1ull) {
7257 nested_vmx_entry_failure(vcpu, vmcs12,
7258 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
7259 return 1;
7260 }
7261
7262 /*
7263 * We're finally done with prerequisite checking, and can start with
7264 * the nested entry.
7265 */
7266
cd232ad0
NHE
7267 vmcs02 = nested_get_current_vmcs02(vmx);
7268 if (!vmcs02)
7269 return -ENOMEM;
7270
7271 enter_guest_mode(vcpu);
7272
7273 vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
7274
7275 cpu = get_cpu();
7276 vmx->loaded_vmcs = vmcs02;
7277 vmx_vcpu_put(vcpu);
7278 vmx_vcpu_load(vcpu, cpu);
7279 vcpu->cpu = cpu;
7280 put_cpu();
7281
36c3cc42
JK
7282 vmx_segment_cache_clear(vmx);
7283
cd232ad0
NHE
7284 vmcs12->launch_state = 1;
7285
7286 prepare_vmcs02(vcpu, vmcs12);
7287
7288 /*
7289 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
7290 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
7291 * returned as far as L1 is concerned. It will only return (and set
7292 * the success flag) when L2 exits (see nested_vmx_vmexit()).
7293 */
7294 return 1;
7295}
7296
4704d0be
NHE
7297/*
7298 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
7299 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
7300 * This function returns the new value we should put in vmcs12.guest_cr0.
7301 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
7302 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
7303 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
7304 * didn't trap the bit, because if L1 did, so would L0).
7305 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
7306 * been modified by L2, and L1 knows it. So just leave the old value of
7307 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
7308 * isn't relevant, because if L0 traps this bit it can set it to anything.
7309 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
7310 * changed these bits, and therefore they need to be updated, but L0
7311 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
7312 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
7313 */
7314static inline unsigned long
7315vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7316{
7317 return
7318 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
7319 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
7320 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
7321 vcpu->arch.cr0_guest_owned_bits));
7322}
7323
7324static inline unsigned long
7325vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7326{
7327 return
7328 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
7329 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
7330 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
7331 vcpu->arch.cr4_guest_owned_bits));
7332}
7333
7334/*
7335 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
7336 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
7337 * and this function updates it to reflect the changes to the guest state while
7338 * L2 was running (and perhaps made some exits which were handled directly by L0
7339 * without going back to L1), and to reflect the exit reason.
7340 * Note that we do not have to copy here all VMCS fields, just those that
7341 * could have changed by the L2 guest or the exit - i.e., the guest-state and
7342 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
7343 * which already writes to vmcs12 directly.
7344 */
733568f9 7345static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
4704d0be
NHE
7346{
7347 /* update guest state fields: */
7348 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
7349 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
7350
7351 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
7352 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
7353 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
7354 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
7355
7356 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
7357 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
7358 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
7359 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
7360 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
7361 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
7362 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
7363 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
7364 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
7365 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
7366 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
7367 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
7368 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
7369 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
7370 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
7371 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
7372 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
7373 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
7374 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
7375 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
7376 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
7377 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
7378 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
7379 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
7380 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
7381 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
7382 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
7383 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
7384 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
7385 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
7386 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
7387 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
7388 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
7389 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
7390 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
7391 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
7392
7393 vmcs12->guest_activity_state = vmcs_read32(GUEST_ACTIVITY_STATE);
7394 vmcs12->guest_interruptibility_info =
7395 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
7396 vmcs12->guest_pending_dbg_exceptions =
7397 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
7398
7399 /* TODO: These cannot have changed unless we have MSR bitmaps and
7400 * the relevant bit asks not to trap the change */
7401 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
7402 if (vmcs12->vm_entry_controls & VM_EXIT_SAVE_IA32_PAT)
7403 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
7404 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
7405 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
7406 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
7407
7408 /* update exit information fields: */
7409
957c897e 7410 vmcs12->vm_exit_reason = to_vmx(vcpu)->exit_reason;
4704d0be
NHE
7411 vmcs12->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7412
7413 vmcs12->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
7414 vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
44ceb9d6 7415 vmcs12->idt_vectoring_info_field = to_vmx(vcpu)->idt_vectoring_info;
4704d0be
NHE
7416 vmcs12->idt_vectoring_error_code =
7417 vmcs_read32(IDT_VECTORING_ERROR_CODE);
7418 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
7419 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
7420
7421 /* clear vm-entry fields which are to be cleared on exit */
7422 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
7423 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
7424}
7425
7426/*
7427 * A part of what we need to when the nested L2 guest exits and we want to
7428 * run its L1 parent, is to reset L1's guest state to the host state specified
7429 * in vmcs12.
7430 * This function is to be called not only on normal nested exit, but also on
7431 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
7432 * Failures During or After Loading Guest State").
7433 * This function should be called when the active VMCS is L1's (vmcs01).
7434 */
733568f9
JK
7435static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
7436 struct vmcs12 *vmcs12)
4704d0be
NHE
7437{
7438 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
7439 vcpu->arch.efer = vmcs12->host_ia32_efer;
7440 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
7441 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
7442 else
7443 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
7444 vmx_set_efer(vcpu, vcpu->arch.efer);
7445
7446 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
7447 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
c4627c72 7448 vmx_set_rflags(vcpu, X86_EFLAGS_BIT1);
4704d0be
NHE
7449 /*
7450 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
7451 * actually changed, because it depends on the current state of
7452 * fpu_active (which may have changed).
7453 * Note that vmx_set_cr0 refers to efer set above.
7454 */
7455 kvm_set_cr0(vcpu, vmcs12->host_cr0);
7456 /*
7457 * If we did fpu_activate()/fpu_deactivate() during L2's run, we need
7458 * to apply the same changes to L1's vmcs. We just set cr0 correctly,
7459 * but we also need to update cr0_guest_host_mask and exception_bitmap.
7460 */
7461 update_exception_bitmap(vcpu);
7462 vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0);
7463 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
7464
7465 /*
7466 * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01
7467 * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
7468 */
7469 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
7470 kvm_set_cr4(vcpu, vmcs12->host_cr4);
7471
7472 /* shadow page tables on either EPT or shadow page tables */
7473 kvm_set_cr3(vcpu, vmcs12->host_cr3);
7474 kvm_mmu_reset_context(vcpu);
7475
7476 if (enable_vpid) {
7477 /*
7478 * Trivially support vpid by letting L2s share their parent
7479 * L1's vpid. TODO: move to a more elaborate solution, giving
7480 * each L2 its own vpid and exposing the vpid feature to L1.
7481 */
7482 vmx_flush_tlb(vcpu);
7483 }
7484
7485
7486 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
7487 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
7488 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
7489 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
7490 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
7491 vmcs_writel(GUEST_TR_BASE, vmcs12->host_tr_base);
7492 vmcs_writel(GUEST_GS_BASE, vmcs12->host_gs_base);
7493 vmcs_writel(GUEST_FS_BASE, vmcs12->host_fs_base);
7494 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->host_es_selector);
7495 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->host_cs_selector);
7496 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->host_ss_selector);
7497 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->host_ds_selector);
7498 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->host_fs_selector);
7499 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->host_gs_selector);
7500 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->host_tr_selector);
7501
7502 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT)
7503 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
7504 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
7505 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
7506 vmcs12->host_ia32_perf_global_ctrl);
503cd0c5
JK
7507
7508 kvm_set_dr(vcpu, 7, 0x400);
7509 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4704d0be
NHE
7510}
7511
7512/*
7513 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
7514 * and modify vmcs12 to make it see what it would expect to see there if
7515 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
7516 */
7517static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
7518{
7519 struct vcpu_vmx *vmx = to_vmx(vcpu);
7520 int cpu;
7521 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7522
7523 leave_guest_mode(vcpu);
7524 prepare_vmcs12(vcpu, vmcs12);
7525
7526 cpu = get_cpu();
7527 vmx->loaded_vmcs = &vmx->vmcs01;
7528 vmx_vcpu_put(vcpu);
7529 vmx_vcpu_load(vcpu, cpu);
7530 vcpu->cpu = cpu;
7531 put_cpu();
7532
36c3cc42
JK
7533 vmx_segment_cache_clear(vmx);
7534
4704d0be
NHE
7535 /* if no vmcs02 cache requested, remove the one we used */
7536 if (VMCS02_POOL_SIZE == 0)
7537 nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
7538
7539 load_vmcs12_host_state(vcpu, vmcs12);
7540
27fc51b2 7541 /* Update TSC_OFFSET if TSC was changed while L2 ran */
4704d0be
NHE
7542 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
7543
7544 /* This is needed for same reason as it was needed in prepare_vmcs02 */
7545 vmx->host_rsp = 0;
7546
7547 /* Unpin physical memory we referred to in vmcs02 */
7548 if (vmx->nested.apic_access_page) {
7549 nested_release_page(vmx->nested.apic_access_page);
7550 vmx->nested.apic_access_page = 0;
7551 }
7552
7553 /*
7554 * Exiting from L2 to L1, we're now back to L1 which thinks it just
7555 * finished a VMLAUNCH or VMRESUME instruction, so we need to set the
7556 * success or failure flag accordingly.
7557 */
7558 if (unlikely(vmx->fail)) {
7559 vmx->fail = 0;
7560 nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
7561 } else
7562 nested_vmx_succeed(vcpu);
7563}
7564
7c177938
NHE
7565/*
7566 * L1's failure to enter L2 is a subset of a normal exit, as explained in
7567 * 23.7 "VM-entry failures during or after loading guest state" (this also
7568 * lists the acceptable exit-reason and exit-qualification parameters).
7569 * It should only be called before L2 actually succeeded to run, and when
7570 * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
7571 */
7572static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
7573 struct vmcs12 *vmcs12,
7574 u32 reason, unsigned long qualification)
7575{
7576 load_vmcs12_host_state(vcpu, vmcs12);
7577 vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
7578 vmcs12->exit_qualification = qualification;
7579 nested_vmx_succeed(vcpu);
7580}
7581
8a76d7f2
JR
7582static int vmx_check_intercept(struct kvm_vcpu *vcpu,
7583 struct x86_instruction_info *info,
7584 enum x86_intercept_stage stage)
7585{
7586 return X86EMUL_CONTINUE;
7587}
7588
cbdd1bea 7589static struct kvm_x86_ops vmx_x86_ops = {
6aa8b732
AK
7590 .cpu_has_kvm_support = cpu_has_kvm_support,
7591 .disabled_by_bios = vmx_disabled_by_bios,
7592 .hardware_setup = hardware_setup,
7593 .hardware_unsetup = hardware_unsetup,
002c7f7c 7594 .check_processor_compatibility = vmx_check_processor_compat,
6aa8b732
AK
7595 .hardware_enable = hardware_enable,
7596 .hardware_disable = hardware_disable,
04547156 7597 .cpu_has_accelerated_tpr = report_flexpriority,
6aa8b732
AK
7598
7599 .vcpu_create = vmx_create_vcpu,
7600 .vcpu_free = vmx_free_vcpu,
04d2cc77 7601 .vcpu_reset = vmx_vcpu_reset,
6aa8b732 7602
04d2cc77 7603 .prepare_guest_switch = vmx_save_host_state,
6aa8b732
AK
7604 .vcpu_load = vmx_vcpu_load,
7605 .vcpu_put = vmx_vcpu_put,
7606
c8639010 7607 .update_db_bp_intercept = update_exception_bitmap,
6aa8b732
AK
7608 .get_msr = vmx_get_msr,
7609 .set_msr = vmx_set_msr,
7610 .get_segment_base = vmx_get_segment_base,
7611 .get_segment = vmx_get_segment,
7612 .set_segment = vmx_set_segment,
2e4d2653 7613 .get_cpl = vmx_get_cpl,
6aa8b732 7614 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
e8467fda 7615 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
aff48baa 7616 .decache_cr3 = vmx_decache_cr3,
25c4c276 7617 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
6aa8b732 7618 .set_cr0 = vmx_set_cr0,
6aa8b732
AK
7619 .set_cr3 = vmx_set_cr3,
7620 .set_cr4 = vmx_set_cr4,
6aa8b732 7621 .set_efer = vmx_set_efer,
6aa8b732
AK
7622 .get_idt = vmx_get_idt,
7623 .set_idt = vmx_set_idt,
7624 .get_gdt = vmx_get_gdt,
7625 .set_gdt = vmx_set_gdt,
020df079 7626 .set_dr7 = vmx_set_dr7,
5fdbf976 7627 .cache_reg = vmx_cache_reg,
6aa8b732
AK
7628 .get_rflags = vmx_get_rflags,
7629 .set_rflags = vmx_set_rflags,
ebcbab4c 7630 .fpu_activate = vmx_fpu_activate,
02daab21 7631 .fpu_deactivate = vmx_fpu_deactivate,
6aa8b732
AK
7632
7633 .tlb_flush = vmx_flush_tlb,
6aa8b732 7634
6aa8b732 7635 .run = vmx_vcpu_run,
6062d012 7636 .handle_exit = vmx_handle_exit,
6aa8b732 7637 .skip_emulated_instruction = skip_emulated_instruction,
2809f5d2
GC
7638 .set_interrupt_shadow = vmx_set_interrupt_shadow,
7639 .get_interrupt_shadow = vmx_get_interrupt_shadow,
102d8325 7640 .patch_hypercall = vmx_patch_hypercall,
2a8067f1 7641 .set_irq = vmx_inject_irq,
95ba8273 7642 .set_nmi = vmx_inject_nmi,
298101da 7643 .queue_exception = vmx_queue_exception,
b463a6f7 7644 .cancel_injection = vmx_cancel_injection,
78646121 7645 .interrupt_allowed = vmx_interrupt_allowed,
95ba8273 7646 .nmi_allowed = vmx_nmi_allowed,
3cfc3092
JK
7647 .get_nmi_mask = vmx_get_nmi_mask,
7648 .set_nmi_mask = vmx_set_nmi_mask,
95ba8273
GN
7649 .enable_nmi_window = enable_nmi_window,
7650 .enable_irq_window = enable_irq_window,
7651 .update_cr8_intercept = update_cr8_intercept,
8d14695f 7652 .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
c7c9c56c
YZ
7653 .vm_has_apicv = vmx_vm_has_apicv,
7654 .load_eoi_exitmap = vmx_load_eoi_exitmap,
7655 .hwapic_irr_update = vmx_hwapic_irr_update,
7656 .hwapic_isr_update = vmx_hwapic_isr_update,
95ba8273 7657
cbc94022 7658 .set_tss_addr = vmx_set_tss_addr,
67253af5 7659 .get_tdp_level = get_ept_level,
4b12f0de 7660 .get_mt_mask = vmx_get_mt_mask,
229456fc 7661
586f9607 7662 .get_exit_info = vmx_get_exit_info,
586f9607 7663
17cc3935 7664 .get_lpage_level = vmx_get_lpage_level,
0e851880
SY
7665
7666 .cpuid_update = vmx_cpuid_update,
4e47c7a6
SY
7667
7668 .rdtscp_supported = vmx_rdtscp_supported,
ad756a16 7669 .invpcid_supported = vmx_invpcid_supported,
d4330ef2
JR
7670
7671 .set_supported_cpuid = vmx_set_supported_cpuid,
f5f48ee1
SY
7672
7673 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
99e3e30a 7674
4051b188 7675 .set_tsc_khz = vmx_set_tsc_khz,
ba904635 7676 .read_tsc_offset = vmx_read_tsc_offset,
99e3e30a 7677 .write_tsc_offset = vmx_write_tsc_offset,
e48672fa 7678 .adjust_tsc_offset = vmx_adjust_tsc_offset,
857e4099 7679 .compute_tsc_offset = vmx_compute_tsc_offset,
d5c1785d 7680 .read_l1_tsc = vmx_read_l1_tsc,
1c97f0a0
JR
7681
7682 .set_tdp_cr3 = vmx_set_cr3,
8a76d7f2
JR
7683
7684 .check_intercept = vmx_check_intercept,
6aa8b732
AK
7685};
7686
7687static int __init vmx_init(void)
7688{
8d14695f 7689 int r, i, msr;
26bb0981
AK
7690
7691 rdmsrl_safe(MSR_EFER, &host_efer);
7692
7693 for (i = 0; i < NR_VMX_MSR; ++i)
7694 kvm_define_shared_msr(i, vmx_msr_index[i]);
fdef3ad1 7695
3e7c73e9 7696 vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
fdef3ad1
HQ
7697 if (!vmx_io_bitmap_a)
7698 return -ENOMEM;
7699
2106a548
GC
7700 r = -ENOMEM;
7701
3e7c73e9 7702 vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
2106a548 7703 if (!vmx_io_bitmap_b)
fdef3ad1 7704 goto out;
fdef3ad1 7705
5897297b 7706 vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
2106a548 7707 if (!vmx_msr_bitmap_legacy)
25c5f225 7708 goto out1;
2106a548 7709
8d14695f
YZ
7710 vmx_msr_bitmap_legacy_x2apic =
7711 (unsigned long *)__get_free_page(GFP_KERNEL);
7712 if (!vmx_msr_bitmap_legacy_x2apic)
7713 goto out2;
25c5f225 7714
5897297b 7715 vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
2106a548 7716 if (!vmx_msr_bitmap_longmode)
8d14695f 7717 goto out3;
2106a548 7718
8d14695f
YZ
7719 vmx_msr_bitmap_longmode_x2apic =
7720 (unsigned long *)__get_free_page(GFP_KERNEL);
7721 if (!vmx_msr_bitmap_longmode_x2apic)
7722 goto out4;
5897297b 7723
fdef3ad1
HQ
7724 /*
7725 * Allow direct access to the PC debug port (it is often used for I/O
7726 * delays, but the vmexits simply slow things down).
7727 */
3e7c73e9
AK
7728 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
7729 clear_bit(0x80, vmx_io_bitmap_a);
fdef3ad1 7730
3e7c73e9 7731 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
fdef3ad1 7732
5897297b
AK
7733 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
7734 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
25c5f225 7735
2384d2b3
SY
7736 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
7737
0ee75bea
AK
7738 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
7739 __alignof__(struct vcpu_vmx), THIS_MODULE);
fdef3ad1 7740 if (r)
5897297b 7741 goto out3;
25c5f225 7742
8f536b76
ZY
7743#ifdef CONFIG_KEXEC
7744 rcu_assign_pointer(crash_vmclear_loaded_vmcss,
7745 crash_vmclear_local_loaded_vmcss);
7746#endif
7747
5897297b
AK
7748 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
7749 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
7750 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
7751 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
7752 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
7753 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
8d14695f
YZ
7754 memcpy(vmx_msr_bitmap_legacy_x2apic,
7755 vmx_msr_bitmap_legacy, PAGE_SIZE);
7756 memcpy(vmx_msr_bitmap_longmode_x2apic,
7757 vmx_msr_bitmap_longmode, PAGE_SIZE);
7758
c7c9c56c 7759 if (enable_apicv_reg_vid) {
8d14695f
YZ
7760 for (msr = 0x800; msr <= 0x8ff; msr++)
7761 vmx_disable_intercept_msr_read_x2apic(msr);
7762
7763 /* According SDM, in x2apic mode, the whole id reg is used.
7764 * But in KVM, it only use the highest eight bits. Need to
7765 * intercept it */
7766 vmx_enable_intercept_msr_read_x2apic(0x802);
7767 /* TMCCT */
7768 vmx_enable_intercept_msr_read_x2apic(0x839);
7769 /* TPR */
7770 vmx_disable_intercept_msr_write_x2apic(0x808);
c7c9c56c
YZ
7771 /* EOI */
7772 vmx_disable_intercept_msr_write_x2apic(0x80b);
7773 /* SELF-IPI */
7774 vmx_disable_intercept_msr_write_x2apic(0x83f);
8d14695f 7775 }
fdef3ad1 7776
089d034e 7777 if (enable_ept) {
3f6d8c8a
XH
7778 kvm_mmu_set_mask_ptes(0ull,
7779 (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
7780 (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
7781 0ull, VMX_EPT_EXECUTABLE_MASK);
ce88decf 7782 ept_set_mmio_spte_mask();
5fdbcb9d
SY
7783 kvm_enable_tdp();
7784 } else
7785 kvm_disable_tdp();
1439442c 7786
fdef3ad1
HQ
7787 return 0;
7788
8d14695f 7789out4:
5897297b 7790 free_page((unsigned long)vmx_msr_bitmap_longmode);
8d14695f
YZ
7791out3:
7792 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
25c5f225 7793out2:
5897297b 7794 free_page((unsigned long)vmx_msr_bitmap_legacy);
fdef3ad1 7795out1:
3e7c73e9 7796 free_page((unsigned long)vmx_io_bitmap_b);
fdef3ad1 7797out:
3e7c73e9 7798 free_page((unsigned long)vmx_io_bitmap_a);
fdef3ad1 7799 return r;
6aa8b732
AK
7800}
7801
7802static void __exit vmx_exit(void)
7803{
8d14695f
YZ
7804 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
7805 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
5897297b
AK
7806 free_page((unsigned long)vmx_msr_bitmap_legacy);
7807 free_page((unsigned long)vmx_msr_bitmap_longmode);
3e7c73e9
AK
7808 free_page((unsigned long)vmx_io_bitmap_b);
7809 free_page((unsigned long)vmx_io_bitmap_a);
fdef3ad1 7810
8f536b76
ZY
7811#ifdef CONFIG_KEXEC
7812 rcu_assign_pointer(crash_vmclear_loaded_vmcss, NULL);
7813 synchronize_rcu();
7814#endif
7815
cb498ea2 7816 kvm_exit();
6aa8b732
AK
7817}
7818
7819module_init(vmx_init)
7820module_exit(vmx_exit)
This page took 1.150272 seconds and 5 git commands to generate.