KVM: VMX: Force seg.base == (seg.sel << 4) in real mode
[deliverable/linux.git] / drivers / kvm / paging_tmpl.h
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
c7addb90 34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
cea0f0e7
AK
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
37 #else
38 #define PT_MAX_FULL_LEVELS 2
39 #endif
6aa8b732
AK
40#elif PTTYPE == 32
41 #define pt_element_t u32
42 #define guest_walker guest_walker32
43 #define FNAME(name) paging##32_##name
44 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
45 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
46 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
c7addb90 49 #define PT_LEVEL_BITS PT32_LEVEL_BITS
cea0f0e7 50 #define PT_MAX_FULL_LEVELS 2
6aa8b732
AK
51#else
52 #error Invalid PTTYPE value
53#endif
54
55/*
56 * The guest_walker structure emulates the behavior of the hardware page
57 * table walker.
58 */
59struct guest_walker {
60 int level;
cea0f0e7 61 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
fe551881 62 pt_element_t pte;
6aa8b732 63 pt_element_t inherited_ar;
815af8d4 64 gfn_t gfn;
7993ba43 65 u32 error_code;
6aa8b732
AK
66};
67
ac79c978
AK
68/*
69 * Fetch a guest pte for a guest virtual address
70 */
7993ba43
AK
71static int FNAME(walk_addr)(struct guest_walker *walker,
72 struct kvm_vcpu *vcpu, gva_t addr,
73b1087e 73 int write_fault, int user_fault, int fetch_fault)
6aa8b732 74{
8a7ae055 75 struct page *page = NULL;
42bf3f0a
AK
76 pt_element_t *table;
77 pt_element_t pte;
cea0f0e7 78 gfn_t table_gfn;
42bf3f0a
AK
79 unsigned index;
80 gpa_t pte_gpa;
6aa8b732 81
cea0f0e7 82 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
6aa8b732 83 walker->level = vcpu->mmu.root_level;
42bf3f0a 84 pte = vcpu->cr3;
1b0973bd
AK
85#if PTTYPE == 64
86 if (!is_long_mode(vcpu)) {
42bf3f0a
AK
87 pte = vcpu->pdptrs[(addr >> 30) & 3];
88 if (!is_present_pte(pte))
7993ba43 89 goto not_present;
1b0973bd
AK
90 --walker->level;
91 }
92#endif
a9058ecd 93 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
f802a307 94 (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
6aa8b732 95
6aa8b732 96 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
ac79c978
AK
97
98 for (;;) {
42bf3f0a 99 index = PT_INDEX(addr, walker->level);
ac79c978 100
42bf3f0a
AK
101 table_gfn = (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
102 walker->table_gfn[walker->level - 1] = table_gfn;
103 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
104 walker->level - 1, table_gfn);
105
cea7bb21
IE
106 page = gfn_to_page(vcpu->kvm, (pte & PT64_BASE_ADDR_MASK)
107 >> PAGE_SHIFT);
ac79c978 108
42bf3f0a
AK
109 table = kmap_atomic(page, KM_USER0);
110 pte = table[index];
111 kunmap_atomic(table, KM_USER0);
112
113 if (!is_present_pte(pte))
7993ba43
AK
114 goto not_present;
115
42bf3f0a 116 if (write_fault && !is_writeble_pte(pte))
7993ba43
AK
117 if (user_fault || is_write_protection(vcpu))
118 goto access_error;
119
42bf3f0a 120 if (user_fault && !(pte & PT_USER_MASK))
7993ba43
AK
121 goto access_error;
122
73b1087e 123#if PTTYPE == 64
42bf3f0a 124 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
73b1087e
AK
125 goto access_error;
126#endif
127
42bf3f0a 128 if (!(pte & PT_ACCESSED_MASK)) {
bf3f8e86 129 mark_page_dirty(vcpu->kvm, table_gfn);
42bf3f0a
AK
130 pte |= PT_ACCESSED_MASK;
131 table = kmap_atomic(page, KM_USER0);
132 table[index] = pte;
133 kunmap_atomic(table, KM_USER0);
bf3f8e86 134 }
815af8d4
AK
135
136 if (walker->level == PT_PAGE_TABLE_LEVEL) {
42bf3f0a 137 walker->gfn = (pte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
815af8d4
AK
138 break;
139 }
140
141 if (walker->level == PT_DIRECTORY_LEVEL
42bf3f0a 142 && (pte & PT_PAGE_SIZE_MASK)
815af8d4 143 && (PTTYPE == 64 || is_pse(vcpu))) {
42bf3f0a 144 walker->gfn = (pte & PT_DIR_BASE_ADDR_MASK)
815af8d4
AK
145 >> PAGE_SHIFT;
146 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
ac79c978 147 break;
815af8d4 148 }
ac79c978 149
42bf3f0a 150 walker->inherited_ar &= pte;
ac79c978 151 --walker->level;
8a7ae055 152 kvm_release_page(page);
ac79c978 153 }
42bf3f0a
AK
154
155 if (write_fault && !is_dirty_pte(pte)) {
156 mark_page_dirty(vcpu->kvm, table_gfn);
157 pte |= PT_DIRTY_MASK;
158 table = kmap_atomic(page, KM_USER0);
159 table[index] = pte;
160 kunmap_atomic(table, KM_USER0);
161 pte_gpa = table_gfn << PAGE_SHIFT;
162 pte_gpa += index * sizeof(pt_element_t);
163 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
164 }
165
8a7ae055 166 kvm_release_page(page);
42bf3f0a
AK
167 walker->pte = pte;
168 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)pte);
7993ba43
AK
169 return 1;
170
171not_present:
172 walker->error_code = 0;
173 goto err;
174
175access_error:
176 walker->error_code = PFERR_PRESENT_MASK;
177
178err:
179 if (write_fault)
180 walker->error_code |= PFERR_WRITE_MASK;
181 if (user_fault)
182 walker->error_code |= PFERR_USER_MASK;
73b1087e
AK
183 if (fetch_fault)
184 walker->error_code |= PFERR_FETCH_MASK;
8a7ae055
IE
185 if (page)
186 kvm_release_page(page);
fe551881 187 return 0;
6aa8b732
AK
188}
189
e60d75ea
AK
190static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
191 u64 *shadow_pte,
192 gpa_t gaddr,
fe551881 193 pt_element_t gpte,
e60d75ea 194 u64 access_bits,
97a0a01e 195 int user_fault,
63b1ad24 196 int write_fault,
97a0a01e
AK
197 int *ptwrite,
198 struct guest_walker *walker,
e60d75ea
AK
199 gfn_t gfn)
200{
201 hpa_t paddr;
fe551881 202 int dirty = gpte & PT_DIRTY_MASK;
c7addb90
AK
203 u64 spte;
204 int was_rmapped = is_rmap_pte(*shadow_pte);
97a0a01e
AK
205
206 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
207 " user_fault %d gfn %lx\n",
c7addb90 208 __FUNCTION__, *shadow_pte, (u64)gpte, access_bits,
97a0a01e
AK
209 write_fault, user_fault, gfn);
210
12b7d28f
AK
211 /*
212 * We don't set the accessed bit, since we sometimes want to see
213 * whether the guest actually used the pte (in order to detect
214 * demand paging).
215 */
216 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
fe551881 217 spte |= gpte & PT64_NX_MASK;
e60d75ea
AK
218 if (!dirty)
219 access_bits &= ~PT_WRITABLE_MASK;
220
4a4c9924 221 paddr = gpa_to_hpa(vcpu->kvm, gaddr & PT64_BASE_ADDR_MASK);
e60d75ea 222
0d551bb6 223 spte |= PT_PRESENT_MASK;
97a0a01e 224 if (access_bits & PT_USER_MASK)
0d551bb6 225 spte |= PT_USER_MASK;
e60d75ea
AK
226
227 if (is_error_hpa(paddr)) {
c7addb90
AK
228 set_shadow_pte(shadow_pte,
229 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
8a7ae055
IE
230 kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
231 >> PAGE_SHIFT));
e60d75ea
AK
232 return;
233 }
234
0d551bb6 235 spte |= paddr;
e60d75ea 236
97a0a01e
AK
237 if ((access_bits & PT_WRITABLE_MASK)
238 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
e60d75ea
AK
239 struct kvm_mmu_page *shadow;
240
0d551bb6 241 spte |= PT_WRITABLE_MASK;
97a0a01e 242 if (user_fault) {
f67a46f4 243 mmu_unshadow(vcpu->kvm, gfn);
97a0a01e
AK
244 goto unshadowed;
245 }
246
f67a46f4 247 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
e60d75ea
AK
248 if (shadow) {
249 pgprintk("%s: found shadow page for %lx, marking ro\n",
250 __FUNCTION__, gfn);
251 access_bits &= ~PT_WRITABLE_MASK;
0d551bb6
AK
252 if (is_writeble_pte(spte)) {
253 spte &= ~PT_WRITABLE_MASK;
cbdd1bea 254 kvm_x86_ops->tlb_flush(vcpu);
e60d75ea 255 }
97a0a01e
AK
256 if (write_fault)
257 *ptwrite = 1;
e60d75ea
AK
258 }
259 }
260
97a0a01e
AK
261unshadowed:
262
e60d75ea
AK
263 if (access_bits & PT_WRITABLE_MASK)
264 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
265
c7addb90 266 pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
e663ee64 267 set_shadow_pte(shadow_pte, spte);
e60d75ea 268 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
8a7ae055 269 if (!was_rmapped) {
290fc38d
IE
270 rmap_add(vcpu, shadow_pte, (gaddr & PT64_BASE_ADDR_MASK)
271 >> PAGE_SHIFT);
8a7ae055
IE
272 if (!is_rmap_pte(*shadow_pte)) {
273 struct page *page;
274
275 page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
276 >> PAGE_SHIFT);
277 kvm_release_page(page);
278 }
279 }
280 else
281 kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
282 >> PAGE_SHIFT));
12b7d28f
AK
283 if (!ptwrite || !*ptwrite)
284 vcpu->last_pte_updated = shadow_pte;
e60d75ea
AK
285}
286
fe551881 287static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,
63b1ad24 288 u64 *shadow_pte, u64 access_bits,
97a0a01e
AK
289 int user_fault, int write_fault, int *ptwrite,
290 struct guest_walker *walker, gfn_t gfn)
6aa8b732 291{
fe551881
SL
292 access_bits &= gpte;
293 FNAME(set_pte_common)(vcpu, shadow_pte, gpte & PT_BASE_ADDR_MASK,
97a0a01e
AK
294 gpte, access_bits, user_fault, write_fault,
295 ptwrite, walker, gfn);
6aa8b732
AK
296}
297
0028425f 298static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
c7addb90
AK
299 u64 *spte, const void *pte, int bytes,
300 int offset_in_pte)
0028425f
AK
301{
302 pt_element_t gpte;
303
0028425f 304 gpte = *(const pt_element_t *)pte;
c7addb90
AK
305 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
306 if (!offset_in_pte && !is_present_pte(gpte))
307 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
308 return;
309 }
310 if (bytes < sizeof(pt_element_t))
0028425f
AK
311 return;
312 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
fe551881 313 FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
97a0a01e 314 0, NULL, NULL,
0028425f
AK
315 (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
316}
317
fe551881 318static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t gpde,
97a0a01e
AK
319 u64 *shadow_pte, u64 access_bits,
320 int user_fault, int write_fault, int *ptwrite,
321 struct guest_walker *walker, gfn_t gfn)
6aa8b732
AK
322{
323 gpa_t gaddr;
324
fe551881 325 access_bits &= gpde;
815af8d4 326 gaddr = (gpa_t)gfn << PAGE_SHIFT;
6aa8b732 327 if (PTTYPE == 32 && is_cpuid_PSE36())
fe551881 328 gaddr |= (gpde & PT32_DIR_PSE36_MASK) <<
6aa8b732 329 (32 - PT32_DIR_PSE36_SHIFT);
e60d75ea 330 FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
97a0a01e
AK
331 gpde, access_bits, user_fault, write_fault,
332 ptwrite, walker, gfn);
6aa8b732
AK
333}
334
6aa8b732
AK
335/*
336 * Fetch a shadow pte for a specific level in the paging hierarchy.
337 */
338static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
97a0a01e
AK
339 struct guest_walker *walker,
340 int user_fault, int write_fault, int *ptwrite)
6aa8b732
AK
341{
342 hpa_t shadow_addr;
343 int level;
ef0197e8 344 u64 *shadow_ent;
6aa8b732 345 u64 *prev_shadow_ent = NULL;
ac79c978 346
fe551881 347 if (!is_present_pte(walker->pte))
ac79c978 348 return NULL;
6aa8b732
AK
349
350 shadow_addr = vcpu->mmu.root_hpa;
351 level = vcpu->mmu.shadow_root_level;
aef3d3fe
AK
352 if (level == PT32E_ROOT_LEVEL) {
353 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
354 shadow_addr &= PT64_BASE_ADDR_MASK;
355 --level;
356 }
6aa8b732
AK
357
358 for (; ; level--) {
359 u32 index = SHADOW_PT_INDEX(addr, level);
25c0de2c 360 struct kvm_mmu_page *shadow_page;
8c7bb723 361 u64 shadow_pte;
cea0f0e7
AK
362 int metaphysical;
363 gfn_t table_gfn;
d28c6cfb 364 unsigned hugepage_access = 0;
6aa8b732 365
ef0197e8 366 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
c7addb90 367 if (is_shadow_present_pte(*shadow_ent)) {
6aa8b732 368 if (level == PT_PAGE_TABLE_LEVEL)
97a0a01e 369 break;
6aa8b732
AK
370 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
371 prev_shadow_ent = shadow_ent;
372 continue;
373 }
374
ef0197e8
AK
375 if (level == PT_PAGE_TABLE_LEVEL)
376 break;
6aa8b732 377
cea0f0e7
AK
378 if (level - 1 == PT_PAGE_TABLE_LEVEL
379 && walker->level == PT_DIRECTORY_LEVEL) {
380 metaphysical = 1;
fe551881 381 hugepage_access = walker->pte;
d28c6cfb 382 hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
cc70e737
AK
383 if (!is_dirty_pte(walker->pte))
384 hugepage_access &= ~PT_WRITABLE_MASK;
c22e3514 385 hugepage_access >>= PT_WRITABLE_SHIFT;
fe551881 386 if (walker->pte & PT64_NX_MASK)
d55e2cb2 387 hugepage_access |= (1 << 2);
fe551881 388 table_gfn = (walker->pte & PT_BASE_ADDR_MASK)
cea0f0e7
AK
389 >> PAGE_SHIFT;
390 } else {
391 metaphysical = 0;
392 table_gfn = walker->table_gfn[level - 2];
393 }
394 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
d28c6cfb
AK
395 metaphysical, hugepage_access,
396 shadow_ent);
47ad8e68 397 shadow_addr = __pa(shadow_page->spt);
aef3d3fe
AK
398 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
399 | PT_WRITABLE_MASK | PT_USER_MASK;
8c7bb723 400 *shadow_ent = shadow_pte;
6aa8b732
AK
401 prev_shadow_ent = shadow_ent;
402 }
ef0197e8
AK
403
404 if (walker->level == PT_DIRECTORY_LEVEL) {
fe551881 405 FNAME(set_pde)(vcpu, walker->pte, shadow_ent,
97a0a01e
AK
406 walker->inherited_ar, user_fault, write_fault,
407 ptwrite, walker, walker->gfn);
ef0197e8
AK
408 } else {
409 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
fe551881 410 FNAME(set_pte)(vcpu, walker->pte, shadow_ent,
97a0a01e
AK
411 walker->inherited_ar, user_fault, write_fault,
412 ptwrite, walker, walker->gfn);
ef0197e8
AK
413 }
414 return shadow_ent;
6aa8b732
AK
415}
416
6aa8b732
AK
417/*
418 * Page fault handler. There are several causes for a page fault:
419 * - there is no shadow pte for the guest pte
420 * - write access through a shadow pte marked read only so that we can set
421 * the dirty bit
422 * - write access to a shadow pte marked read only so we can update the page
423 * dirty bitmap, when userspace requests it
424 * - mmio access; in this case we will never install a present shadow pte
425 * - normal guest page fault due to the guest pte marked not present, not
426 * writable, or not executable
427 *
e2dec939
AK
428 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
429 * a negative value on error.
6aa8b732
AK
430 */
431static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
432 u32 error_code)
433{
434 int write_fault = error_code & PFERR_WRITE_MASK;
6aa8b732 435 int user_fault = error_code & PFERR_USER_MASK;
73b1087e 436 int fetch_fault = error_code & PFERR_FETCH_MASK;
6aa8b732
AK
437 struct guest_walker walker;
438 u64 *shadow_pte;
cea0f0e7 439 int write_pt = 0;
e2dec939 440 int r;
6aa8b732 441
cea0f0e7 442 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
37a7d8b0 443 kvm_mmu_audit(vcpu, "pre page fault");
714b93da 444
e2dec939
AK
445 r = mmu_topup_memory_caches(vcpu);
446 if (r)
447 return r;
714b93da 448
6aa8b732
AK
449 /*
450 * Look up the shadow pte for the faulting address.
451 */
73b1087e
AK
452 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
453 fetch_fault);
6aa8b732
AK
454
455 /*
456 * The page is not mapped by the guest. Let the guest handle it.
457 */
7993ba43
AK
458 if (!r) {
459 pgprintk("%s: guest page fault\n", __FUNCTION__);
460 inject_page_fault(vcpu, addr, walker.error_code);
a25f7e1f 461 vcpu->last_pt_write_count = 0; /* reset fork detector */
6aa8b732
AK
462 return 0;
463 }
464
97a0a01e
AK
465 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
466 &write_pt);
467 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
468 shadow_pte, *shadow_pte, write_pt);
cea0f0e7 469
a25f7e1f
AK
470 if (!write_pt)
471 vcpu->last_pt_write_count = 0; /* reset fork detector */
472
6aa8b732
AK
473 /*
474 * mmio: emulate if accessible, otherwise its a guest fault.
475 */
d27d4aca 476 if (is_io_pte(*shadow_pte))
7993ba43 477 return 1;
6aa8b732 478
1165f5fe 479 ++vcpu->stat.pf_fixed;
37a7d8b0 480 kvm_mmu_audit(vcpu, "post page fault (fixed)");
6aa8b732 481
cea0f0e7 482 return write_pt;
6aa8b732
AK
483}
484
485static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
486{
487 struct guest_walker walker;
e119d117
AK
488 gpa_t gpa = UNMAPPED_GVA;
489 int r;
6aa8b732 490
e119d117 491 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
6aa8b732 492
e119d117
AK
493 if (r) {
494 gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
495 gpa |= vaddr & ~PAGE_MASK;
6aa8b732
AK
496 }
497
498 return gpa;
499}
500
c7addb90
AK
501static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
502 struct kvm_mmu_page *sp)
503{
504 int i;
505 pt_element_t *gpt;
8a7ae055 506 struct page *page;
c7addb90
AK
507
508 if (sp->role.metaphysical || PTTYPE == 32) {
509 nonpaging_prefetch_page(vcpu, sp);
510 return;
511 }
512
8a7ae055
IE
513 page = gfn_to_page(vcpu->kvm, sp->gfn);
514 gpt = kmap_atomic(page, KM_USER0);
c7addb90
AK
515 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
516 if (is_present_pte(gpt[i]))
517 sp->spt[i] = shadow_trap_nonpresent_pte;
518 else
519 sp->spt[i] = shadow_notrap_nonpresent_pte;
520 kunmap_atomic(gpt, KM_USER0);
8a7ae055 521 kvm_release_page(page);
c7addb90
AK
522}
523
6aa8b732
AK
524#undef pt_element_t
525#undef guest_walker
526#undef FNAME
527#undef PT_BASE_ADDR_MASK
528#undef PT_INDEX
529#undef SHADOW_PT_INDEX
530#undef PT_LEVEL_MASK
6aa8b732 531#undef PT_DIR_BASE_ADDR_MASK
c7addb90 532#undef PT_LEVEL_BITS
cea0f0e7 533#undef PT_MAX_FULL_LEVELS
This page took 0.226378 seconds and 5 git commands to generate.