KVM: MMU: Move nonpaging_prefetch_page()
[deliverable/linux.git] / arch / x86 / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
e495606d
AK
19
20#include "vmx.h"
1d737c8a 21#include "mmu.h"
e495606d 22
edf88417 23#include <linux/kvm_host.h>
6aa8b732
AK
24#include <linux/types.h>
25#include <linux/string.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/module.h>
448353ca 29#include <linux/swap.h>
05da4558 30#include <linux/hugetlb.h>
2f333bcb 31#include <linux/compiler.h>
6aa8b732 32
e495606d
AK
33#include <asm/page.h>
34#include <asm/cmpxchg.h>
4e542370 35#include <asm/io.h>
6aa8b732 36
18552672
JR
37/*
38 * When setting this variable to true it enables Two-Dimensional-Paging
39 * where the hardware walks 2 page tables:
40 * 1. the guest-virtual to guest-physical
41 * 2. while doing 1. it walks guest-physical to host-physical
42 * If the hardware supports that we don't need to do shadow paging.
43 */
2f333bcb 44bool tdp_enabled = false;
18552672 45
37a7d8b0
AK
46#undef MMU_DEBUG
47
48#undef AUDIT
49
50#ifdef AUDIT
51static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
52#else
53static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
54#endif
55
56#ifdef MMU_DEBUG
57
58#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
60
61#else
62
63#define pgprintk(x...) do { } while (0)
64#define rmap_printk(x...) do { } while (0)
65
66#endif
67
68#if defined(MMU_DEBUG) || defined(AUDIT)
69static int dbg = 1;
70#endif
6aa8b732 71
d6c69ee9
YD
72#ifndef MMU_DEBUG
73#define ASSERT(x) do { } while (0)
74#else
6aa8b732
AK
75#define ASSERT(x) \
76 if (!(x)) { \
77 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
78 __FILE__, __LINE__, #x); \
79 }
d6c69ee9 80#endif
6aa8b732 81
6aa8b732
AK
82#define PT_FIRST_AVAIL_BITS_SHIFT 9
83#define PT64_SECOND_AVAIL_BITS_SHIFT 52
84
6aa8b732
AK
85#define VALID_PAGE(x) ((x) != INVALID_PAGE)
86
87#define PT64_LEVEL_BITS 9
88
89#define PT64_LEVEL_SHIFT(level) \
d77c26fc 90 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
6aa8b732
AK
91
92#define PT64_LEVEL_MASK(level) \
93 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
94
95#define PT64_INDEX(address, level)\
96 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
97
98
99#define PT32_LEVEL_BITS 10
100
101#define PT32_LEVEL_SHIFT(level) \
d77c26fc 102 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
6aa8b732
AK
103
104#define PT32_LEVEL_MASK(level) \
105 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
106
107#define PT32_INDEX(address, level)\
108 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
109
110
27aba766 111#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
112#define PT64_DIR_BASE_ADDR_MASK \
113 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
114
115#define PT32_BASE_ADDR_MASK PAGE_MASK
116#define PT32_DIR_BASE_ADDR_MASK \
117 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
118
79539cec
AK
119#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
120 | PT64_NX_MASK)
6aa8b732
AK
121
122#define PFERR_PRESENT_MASK (1U << 0)
123#define PFERR_WRITE_MASK (1U << 1)
124#define PFERR_USER_MASK (1U << 2)
73b1087e 125#define PFERR_FETCH_MASK (1U << 4)
6aa8b732 126
6aa8b732
AK
127#define PT_DIRECTORY_LEVEL 2
128#define PT_PAGE_TABLE_LEVEL 1
129
cd4a4e53
AK
130#define RMAP_EXT 4
131
fe135d2c
AK
132#define ACC_EXEC_MASK 1
133#define ACC_WRITE_MASK PT_WRITABLE_MASK
134#define ACC_USER_MASK PT_USER_MASK
135#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
136
2f333bcb
MT
137struct kvm_pv_mmu_op_buffer {
138 void *ptr;
139 unsigned len;
140 unsigned processed;
141 char buf[512] __aligned(sizeof(long));
142};
143
cd4a4e53
AK
144struct kvm_rmap_desc {
145 u64 *shadow_ptes[RMAP_EXT];
146 struct kvm_rmap_desc *more;
147};
148
b5a33a75
AK
149static struct kmem_cache *pte_chain_cache;
150static struct kmem_cache *rmap_desc_cache;
d3d25b04 151static struct kmem_cache *mmu_page_header_cache;
b5a33a75 152
c7addb90
AK
153static u64 __read_mostly shadow_trap_nonpresent_pte;
154static u64 __read_mostly shadow_notrap_nonpresent_pte;
7b52345e
SY
155static u64 __read_mostly shadow_base_present_pte;
156static u64 __read_mostly shadow_nx_mask;
157static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
158static u64 __read_mostly shadow_user_mask;
159static u64 __read_mostly shadow_accessed_mask;
160static u64 __read_mostly shadow_dirty_mask;
c7addb90
AK
161
162void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
163{
164 shadow_trap_nonpresent_pte = trap_pte;
165 shadow_notrap_nonpresent_pte = notrap_pte;
166}
167EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
168
7b52345e
SY
169void kvm_mmu_set_base_ptes(u64 base_pte)
170{
171 shadow_base_present_pte = base_pte;
172}
173EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
174
175void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
176 u64 dirty_mask, u64 nx_mask, u64 x_mask)
177{
178 shadow_user_mask = user_mask;
179 shadow_accessed_mask = accessed_mask;
180 shadow_dirty_mask = dirty_mask;
181 shadow_nx_mask = nx_mask;
182 shadow_x_mask = x_mask;
183}
184EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
185
6aa8b732
AK
186static int is_write_protection(struct kvm_vcpu *vcpu)
187{
ad312c7c 188 return vcpu->arch.cr0 & X86_CR0_WP;
6aa8b732
AK
189}
190
191static int is_cpuid_PSE36(void)
192{
193 return 1;
194}
195
73b1087e
AK
196static int is_nx(struct kvm_vcpu *vcpu)
197{
ad312c7c 198 return vcpu->arch.shadow_efer & EFER_NX;
73b1087e
AK
199}
200
6aa8b732
AK
201static int is_present_pte(unsigned long pte)
202{
203 return pte & PT_PRESENT_MASK;
204}
205
c7addb90
AK
206static int is_shadow_present_pte(u64 pte)
207{
c7addb90
AK
208 return pte != shadow_trap_nonpresent_pte
209 && pte != shadow_notrap_nonpresent_pte;
210}
211
05da4558
MT
212static int is_large_pte(u64 pte)
213{
214 return pte & PT_PAGE_SIZE_MASK;
215}
216
6aa8b732
AK
217static int is_writeble_pte(unsigned long pte)
218{
219 return pte & PT_WRITABLE_MASK;
220}
221
e3c5e7ec
AK
222static int is_dirty_pte(unsigned long pte)
223{
7b52345e 224 return pte & shadow_dirty_mask;
e3c5e7ec
AK
225}
226
cd4a4e53
AK
227static int is_rmap_pte(u64 pte)
228{
4b1a80fa 229 return is_shadow_present_pte(pte);
cd4a4e53
AK
230}
231
35149e21 232static pfn_t spte_to_pfn(u64 pte)
0b49ea86 233{
35149e21 234 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
0b49ea86
AK
235}
236
da928521
AK
237static gfn_t pse36_gfn_delta(u32 gpte)
238{
239 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
240
241 return (gpte & PT32_DIR_PSE36_MASK) << shift;
242}
243
e663ee64
AK
244static void set_shadow_pte(u64 *sptep, u64 spte)
245{
246#ifdef CONFIG_X86_64
247 set_64bit((unsigned long *)sptep, spte);
248#else
249 set_64bit((unsigned long long *)sptep, spte);
250#endif
251}
252
e2dec939 253static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
2e3e5882 254 struct kmem_cache *base_cache, int min)
714b93da
AK
255{
256 void *obj;
257
258 if (cache->nobjs >= min)
e2dec939 259 return 0;
714b93da 260 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 261 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
714b93da 262 if (!obj)
e2dec939 263 return -ENOMEM;
714b93da
AK
264 cache->objects[cache->nobjs++] = obj;
265 }
e2dec939 266 return 0;
714b93da
AK
267}
268
269static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
270{
271 while (mc->nobjs)
272 kfree(mc->objects[--mc->nobjs]);
273}
274
c1158e63 275static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
2e3e5882 276 int min)
c1158e63
AK
277{
278 struct page *page;
279
280 if (cache->nobjs >= min)
281 return 0;
282 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 283 page = alloc_page(GFP_KERNEL);
c1158e63
AK
284 if (!page)
285 return -ENOMEM;
286 set_page_private(page, 0);
287 cache->objects[cache->nobjs++] = page_address(page);
288 }
289 return 0;
290}
291
292static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
293{
294 while (mc->nobjs)
c4d198d5 295 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
296}
297
2e3e5882 298static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
714b93da 299{
e2dec939
AK
300 int r;
301
ad312c7c 302 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
2e3e5882 303 pte_chain_cache, 4);
e2dec939
AK
304 if (r)
305 goto out;
ad312c7c 306 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
2e3e5882 307 rmap_desc_cache, 1);
d3d25b04
AK
308 if (r)
309 goto out;
ad312c7c 310 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
d3d25b04
AK
311 if (r)
312 goto out;
ad312c7c 313 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
2e3e5882 314 mmu_page_header_cache, 4);
e2dec939
AK
315out:
316 return r;
714b93da
AK
317}
318
319static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
320{
ad312c7c
ZX
321 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
322 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
323 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
324 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
714b93da
AK
325}
326
327static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
328 size_t size)
329{
330 void *p;
331
332 BUG_ON(!mc->nobjs);
333 p = mc->objects[--mc->nobjs];
334 memset(p, 0, size);
335 return p;
336}
337
714b93da
AK
338static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
339{
ad312c7c 340 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
714b93da
AK
341 sizeof(struct kvm_pte_chain));
342}
343
90cb0529 344static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 345{
90cb0529 346 kfree(pc);
714b93da
AK
347}
348
349static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
350{
ad312c7c 351 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
714b93da
AK
352 sizeof(struct kvm_rmap_desc));
353}
354
90cb0529 355static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 356{
90cb0529 357 kfree(rd);
714b93da
AK
358}
359
05da4558
MT
360/*
361 * Return the pointer to the largepage write count for a given
362 * gfn, handling slots that are not large page aligned.
363 */
364static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
365{
366 unsigned long idx;
367
368 idx = (gfn / KVM_PAGES_PER_HPAGE) -
369 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
370 return &slot->lpage_info[idx].write_count;
371}
372
373static void account_shadowed(struct kvm *kvm, gfn_t gfn)
374{
375 int *write_count;
376
377 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
378 *write_count += 1;
05da4558
MT
379}
380
381static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
382{
383 int *write_count;
384
385 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
386 *write_count -= 1;
387 WARN_ON(*write_count < 0);
388}
389
390static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
391{
392 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
393 int *largepage_idx;
394
395 if (slot) {
396 largepage_idx = slot_largepage_idx(gfn, slot);
397 return *largepage_idx;
398 }
399
400 return 1;
401}
402
403static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
404{
405 struct vm_area_struct *vma;
406 unsigned long addr;
407
408 addr = gfn_to_hva(kvm, gfn);
409 if (kvm_is_error_hva(addr))
410 return 0;
411
412 vma = find_vma(current->mm, addr);
413 if (vma && is_vm_hugetlb_page(vma))
414 return 1;
415
416 return 0;
417}
418
419static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
420{
421 struct kvm_memory_slot *slot;
422
423 if (has_wrprotected_page(vcpu->kvm, large_gfn))
424 return 0;
425
426 if (!host_largepage_backed(vcpu->kvm, large_gfn))
427 return 0;
428
429 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
430 if (slot && slot->dirty_bitmap)
431 return 0;
432
433 return 1;
434}
435
290fc38d
IE
436/*
437 * Take gfn and return the reverse mapping to it.
438 * Note: gfn must be unaliased before this function get called
439 */
440
05da4558 441static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
290fc38d
IE
442{
443 struct kvm_memory_slot *slot;
05da4558 444 unsigned long idx;
290fc38d
IE
445
446 slot = gfn_to_memslot(kvm, gfn);
05da4558
MT
447 if (!lpage)
448 return &slot->rmap[gfn - slot->base_gfn];
449
450 idx = (gfn / KVM_PAGES_PER_HPAGE) -
451 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
452
453 return &slot->lpage_info[idx].rmap_pde;
290fc38d
IE
454}
455
cd4a4e53
AK
456/*
457 * Reverse mapping data structures:
458 *
290fc38d
IE
459 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
460 * that points to page_address(page).
cd4a4e53 461 *
290fc38d
IE
462 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
463 * containing more mappings.
cd4a4e53 464 */
05da4558 465static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
cd4a4e53 466{
4db35314 467 struct kvm_mmu_page *sp;
cd4a4e53 468 struct kvm_rmap_desc *desc;
290fc38d 469 unsigned long *rmapp;
cd4a4e53
AK
470 int i;
471
472 if (!is_rmap_pte(*spte))
473 return;
290fc38d 474 gfn = unalias_gfn(vcpu->kvm, gfn);
4db35314
AK
475 sp = page_header(__pa(spte));
476 sp->gfns[spte - sp->spt] = gfn;
05da4558 477 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
290fc38d 478 if (!*rmapp) {
cd4a4e53 479 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
290fc38d
IE
480 *rmapp = (unsigned long)spte;
481 } else if (!(*rmapp & 1)) {
cd4a4e53 482 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 483 desc = mmu_alloc_rmap_desc(vcpu);
290fc38d 484 desc->shadow_ptes[0] = (u64 *)*rmapp;
cd4a4e53 485 desc->shadow_ptes[1] = spte;
290fc38d 486 *rmapp = (unsigned long)desc | 1;
cd4a4e53
AK
487 } else {
488 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
290fc38d 489 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
490 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
491 desc = desc->more;
492 if (desc->shadow_ptes[RMAP_EXT-1]) {
714b93da 493 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
494 desc = desc->more;
495 }
496 for (i = 0; desc->shadow_ptes[i]; ++i)
497 ;
498 desc->shadow_ptes[i] = spte;
499 }
500}
501
290fc38d 502static void rmap_desc_remove_entry(unsigned long *rmapp,
cd4a4e53
AK
503 struct kvm_rmap_desc *desc,
504 int i,
505 struct kvm_rmap_desc *prev_desc)
506{
507 int j;
508
509 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
510 ;
511 desc->shadow_ptes[i] = desc->shadow_ptes[j];
11718b4d 512 desc->shadow_ptes[j] = NULL;
cd4a4e53
AK
513 if (j != 0)
514 return;
515 if (!prev_desc && !desc->more)
290fc38d 516 *rmapp = (unsigned long)desc->shadow_ptes[0];
cd4a4e53
AK
517 else
518 if (prev_desc)
519 prev_desc->more = desc->more;
520 else
290fc38d 521 *rmapp = (unsigned long)desc->more | 1;
90cb0529 522 mmu_free_rmap_desc(desc);
cd4a4e53
AK
523}
524
290fc38d 525static void rmap_remove(struct kvm *kvm, u64 *spte)
cd4a4e53 526{
cd4a4e53
AK
527 struct kvm_rmap_desc *desc;
528 struct kvm_rmap_desc *prev_desc;
4db35314 529 struct kvm_mmu_page *sp;
35149e21 530 pfn_t pfn;
290fc38d 531 unsigned long *rmapp;
cd4a4e53
AK
532 int i;
533
534 if (!is_rmap_pte(*spte))
535 return;
4db35314 536 sp = page_header(__pa(spte));
35149e21 537 pfn = spte_to_pfn(*spte);
7b52345e 538 if (*spte & shadow_accessed_mask)
35149e21 539 kvm_set_pfn_accessed(pfn);
b4231d61 540 if (is_writeble_pte(*spte))
35149e21 541 kvm_release_pfn_dirty(pfn);
b4231d61 542 else
35149e21 543 kvm_release_pfn_clean(pfn);
05da4558 544 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
290fc38d 545 if (!*rmapp) {
cd4a4e53
AK
546 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
547 BUG();
290fc38d 548 } else if (!(*rmapp & 1)) {
cd4a4e53 549 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
290fc38d 550 if ((u64 *)*rmapp != spte) {
cd4a4e53
AK
551 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
552 spte, *spte);
553 BUG();
554 }
290fc38d 555 *rmapp = 0;
cd4a4e53
AK
556 } else {
557 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
290fc38d 558 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
559 prev_desc = NULL;
560 while (desc) {
561 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
562 if (desc->shadow_ptes[i] == spte) {
290fc38d 563 rmap_desc_remove_entry(rmapp,
714b93da 564 desc, i,
cd4a4e53
AK
565 prev_desc);
566 return;
567 }
568 prev_desc = desc;
569 desc = desc->more;
570 }
571 BUG();
572 }
573}
574
98348e95 575static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
374cbac0 576{
374cbac0 577 struct kvm_rmap_desc *desc;
98348e95
IE
578 struct kvm_rmap_desc *prev_desc;
579 u64 *prev_spte;
580 int i;
581
582 if (!*rmapp)
583 return NULL;
584 else if (!(*rmapp & 1)) {
585 if (!spte)
586 return (u64 *)*rmapp;
587 return NULL;
588 }
589 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
590 prev_desc = NULL;
591 prev_spte = NULL;
592 while (desc) {
593 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
594 if (prev_spte == spte)
595 return desc->shadow_ptes[i];
596 prev_spte = desc->shadow_ptes[i];
597 }
598 desc = desc->more;
599 }
600 return NULL;
601}
602
603static void rmap_write_protect(struct kvm *kvm, u64 gfn)
604{
290fc38d 605 unsigned long *rmapp;
374cbac0 606 u64 *spte;
caa5b8a5 607 int write_protected = 0;
374cbac0 608
4a4c9924 609 gfn = unalias_gfn(kvm, gfn);
05da4558 610 rmapp = gfn_to_rmap(kvm, gfn, 0);
374cbac0 611
98348e95
IE
612 spte = rmap_next(kvm, rmapp, NULL);
613 while (spte) {
374cbac0 614 BUG_ON(!spte);
374cbac0 615 BUG_ON(!(*spte & PT_PRESENT_MASK));
374cbac0 616 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
caa5b8a5 617 if (is_writeble_pte(*spte)) {
9647c14c 618 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
caa5b8a5
ED
619 write_protected = 1;
620 }
9647c14c 621 spte = rmap_next(kvm, rmapp, spte);
374cbac0 622 }
855149aa 623 if (write_protected) {
35149e21 624 pfn_t pfn;
855149aa
IE
625
626 spte = rmap_next(kvm, rmapp, NULL);
35149e21
AL
627 pfn = spte_to_pfn(*spte);
628 kvm_set_pfn_dirty(pfn);
855149aa
IE
629 }
630
05da4558
MT
631 /* check for huge page mappings */
632 rmapp = gfn_to_rmap(kvm, gfn, 1);
633 spte = rmap_next(kvm, rmapp, NULL);
634 while (spte) {
635 BUG_ON(!spte);
636 BUG_ON(!(*spte & PT_PRESENT_MASK));
637 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
638 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
639 if (is_writeble_pte(*spte)) {
640 rmap_remove(kvm, spte);
641 --kvm->stat.lpages;
642 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
6597ca09 643 spte = NULL;
05da4558
MT
644 write_protected = 1;
645 }
646 spte = rmap_next(kvm, rmapp, spte);
647 }
648
caa5b8a5
ED
649 if (write_protected)
650 kvm_flush_remote_tlbs(kvm);
05da4558
MT
651
652 account_shadowed(kvm, gfn);
374cbac0
AK
653}
654
d6c69ee9 655#ifdef MMU_DEBUG
47ad8e68 656static int is_empty_shadow_page(u64 *spt)
6aa8b732 657{
139bdb2d
AK
658 u64 *pos;
659 u64 *end;
660
47ad8e68 661 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
3c915510 662 if (is_shadow_present_pte(*pos)) {
b8688d51 663 printk(KERN_ERR "%s: %p %llx\n", __func__,
139bdb2d 664 pos, *pos);
6aa8b732 665 return 0;
139bdb2d 666 }
6aa8b732
AK
667 return 1;
668}
d6c69ee9 669#endif
6aa8b732 670
4db35314 671static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
260746c0 672{
4db35314
AK
673 ASSERT(is_empty_shadow_page(sp->spt));
674 list_del(&sp->link);
675 __free_page(virt_to_page(sp->spt));
676 __free_page(virt_to_page(sp->gfns));
677 kfree(sp);
f05e70ac 678 ++kvm->arch.n_free_mmu_pages;
260746c0
AK
679}
680
cea0f0e7
AK
681static unsigned kvm_page_table_hashfn(gfn_t gfn)
682{
1ae0a13d 683 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
cea0f0e7
AK
684}
685
25c0de2c
AK
686static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
687 u64 *parent_pte)
6aa8b732 688{
4db35314 689 struct kvm_mmu_page *sp;
6aa8b732 690
ad312c7c
ZX
691 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
692 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
693 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
4db35314 694 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
f05e70ac 695 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
4db35314
AK
696 ASSERT(is_empty_shadow_page(sp->spt));
697 sp->slot_bitmap = 0;
698 sp->multimapped = 0;
699 sp->parent_pte = parent_pte;
f05e70ac 700 --vcpu->kvm->arch.n_free_mmu_pages;
4db35314 701 return sp;
6aa8b732
AK
702}
703
714b93da 704static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
4db35314 705 struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7
AK
706{
707 struct kvm_pte_chain *pte_chain;
708 struct hlist_node *node;
709 int i;
710
711 if (!parent_pte)
712 return;
4db35314
AK
713 if (!sp->multimapped) {
714 u64 *old = sp->parent_pte;
cea0f0e7
AK
715
716 if (!old) {
4db35314 717 sp->parent_pte = parent_pte;
cea0f0e7
AK
718 return;
719 }
4db35314 720 sp->multimapped = 1;
714b93da 721 pte_chain = mmu_alloc_pte_chain(vcpu);
4db35314
AK
722 INIT_HLIST_HEAD(&sp->parent_ptes);
723 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
724 pte_chain->parent_ptes[0] = old;
725 }
4db35314 726 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
cea0f0e7
AK
727 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
728 continue;
729 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
730 if (!pte_chain->parent_ptes[i]) {
731 pte_chain->parent_ptes[i] = parent_pte;
732 return;
733 }
734 }
714b93da 735 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7 736 BUG_ON(!pte_chain);
4db35314 737 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
738 pte_chain->parent_ptes[0] = parent_pte;
739}
740
4db35314 741static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
cea0f0e7
AK
742 u64 *parent_pte)
743{
744 struct kvm_pte_chain *pte_chain;
745 struct hlist_node *node;
746 int i;
747
4db35314
AK
748 if (!sp->multimapped) {
749 BUG_ON(sp->parent_pte != parent_pte);
750 sp->parent_pte = NULL;
cea0f0e7
AK
751 return;
752 }
4db35314 753 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
cea0f0e7
AK
754 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
755 if (!pte_chain->parent_ptes[i])
756 break;
757 if (pte_chain->parent_ptes[i] != parent_pte)
758 continue;
697fe2e2
AK
759 while (i + 1 < NR_PTE_CHAIN_ENTRIES
760 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
761 pte_chain->parent_ptes[i]
762 = pte_chain->parent_ptes[i + 1];
763 ++i;
764 }
765 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
766 if (i == 0) {
767 hlist_del(&pte_chain->link);
90cb0529 768 mmu_free_pte_chain(pte_chain);
4db35314
AK
769 if (hlist_empty(&sp->parent_ptes)) {
770 sp->multimapped = 0;
771 sp->parent_pte = NULL;
697fe2e2
AK
772 }
773 }
cea0f0e7
AK
774 return;
775 }
776 BUG();
777}
778
d761a501
AK
779static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
780 struct kvm_mmu_page *sp)
781{
782 int i;
783
784 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
785 sp->spt[i] = shadow_trap_nonpresent_pte;
786}
787
4db35314 788static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
cea0f0e7
AK
789{
790 unsigned index;
791 struct hlist_head *bucket;
4db35314 792 struct kvm_mmu_page *sp;
cea0f0e7
AK
793 struct hlist_node *node;
794
b8688d51 795 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1ae0a13d 796 index = kvm_page_table_hashfn(gfn);
f05e70ac 797 bucket = &kvm->arch.mmu_page_hash[index];
4db35314 798 hlist_for_each_entry(sp, node, bucket, hash_link)
2e53d63a
MT
799 if (sp->gfn == gfn && !sp->role.metaphysical
800 && !sp->role.invalid) {
cea0f0e7 801 pgprintk("%s: found role %x\n",
b8688d51 802 __func__, sp->role.word);
4db35314 803 return sp;
cea0f0e7
AK
804 }
805 return NULL;
806}
807
808static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
809 gfn_t gfn,
810 gva_t gaddr,
811 unsigned level,
812 int metaphysical,
41074d07 813 unsigned access,
f7d9c7b7 814 u64 *parent_pte)
cea0f0e7
AK
815{
816 union kvm_mmu_page_role role;
817 unsigned index;
818 unsigned quadrant;
819 struct hlist_head *bucket;
4db35314 820 struct kvm_mmu_page *sp;
cea0f0e7
AK
821 struct hlist_node *node;
822
823 role.word = 0;
ad312c7c 824 role.glevels = vcpu->arch.mmu.root_level;
cea0f0e7
AK
825 role.level = level;
826 role.metaphysical = metaphysical;
41074d07 827 role.access = access;
ad312c7c 828 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
cea0f0e7
AK
829 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
830 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
831 role.quadrant = quadrant;
832 }
b8688d51 833 pgprintk("%s: looking gfn %lx role %x\n", __func__,
cea0f0e7 834 gfn, role.word);
1ae0a13d 835 index = kvm_page_table_hashfn(gfn);
f05e70ac 836 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
837 hlist_for_each_entry(sp, node, bucket, hash_link)
838 if (sp->gfn == gfn && sp->role.word == role.word) {
839 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
b8688d51 840 pgprintk("%s: found\n", __func__);
4db35314 841 return sp;
cea0f0e7 842 }
dfc5aa00 843 ++vcpu->kvm->stat.mmu_cache_miss;
4db35314
AK
844 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
845 if (!sp)
846 return sp;
b8688d51 847 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
4db35314
AK
848 sp->gfn = gfn;
849 sp->role = role;
850 hlist_add_head(&sp->hash_link, bucket);
374cbac0 851 if (!metaphysical)
4a4c9924 852 rmap_write_protect(vcpu->kvm, gfn);
bed1d1df 853 vcpu->arch.mmu.prefetch_page(vcpu, sp);
4db35314 854 return sp;
cea0f0e7
AK
855}
856
90cb0529 857static void kvm_mmu_page_unlink_children(struct kvm *kvm,
4db35314 858 struct kvm_mmu_page *sp)
a436036b 859{
697fe2e2
AK
860 unsigned i;
861 u64 *pt;
862 u64 ent;
863
4db35314 864 pt = sp->spt;
697fe2e2 865
4db35314 866 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
697fe2e2 867 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
c7addb90 868 if (is_shadow_present_pte(pt[i]))
290fc38d 869 rmap_remove(kvm, &pt[i]);
c7addb90 870 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 871 }
90cb0529 872 kvm_flush_remote_tlbs(kvm);
697fe2e2
AK
873 return;
874 }
875
876 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
877 ent = pt[i];
878
05da4558
MT
879 if (is_shadow_present_pte(ent)) {
880 if (!is_large_pte(ent)) {
881 ent &= PT64_BASE_ADDR_MASK;
882 mmu_page_remove_parent_pte(page_header(ent),
883 &pt[i]);
884 } else {
885 --kvm->stat.lpages;
886 rmap_remove(kvm, &pt[i]);
887 }
888 }
c7addb90 889 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 890 }
90cb0529 891 kvm_flush_remote_tlbs(kvm);
a436036b
AK
892}
893
4db35314 894static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7 895{
4db35314 896 mmu_page_remove_parent_pte(sp, parent_pte);
a436036b
AK
897}
898
12b7d28f
AK
899static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
900{
901 int i;
902
903 for (i = 0; i < KVM_MAX_VCPUS; ++i)
904 if (kvm->vcpus[i])
ad312c7c 905 kvm->vcpus[i]->arch.last_pte_updated = NULL;
12b7d28f
AK
906}
907
4db35314 908static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
a436036b
AK
909{
910 u64 *parent_pte;
911
4cee5764 912 ++kvm->stat.mmu_shadow_zapped;
4db35314
AK
913 while (sp->multimapped || sp->parent_pte) {
914 if (!sp->multimapped)
915 parent_pte = sp->parent_pte;
a436036b
AK
916 else {
917 struct kvm_pte_chain *chain;
918
4db35314 919 chain = container_of(sp->parent_ptes.first,
a436036b
AK
920 struct kvm_pte_chain, link);
921 parent_pte = chain->parent_ptes[0];
922 }
697fe2e2 923 BUG_ON(!parent_pte);
4db35314 924 kvm_mmu_put_page(sp, parent_pte);
c7addb90 925 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
a436036b 926 }
4db35314
AK
927 kvm_mmu_page_unlink_children(kvm, sp);
928 if (!sp->root_count) {
05da4558
MT
929 if (!sp->role.metaphysical)
930 unaccount_shadowed(kvm, sp->gfn);
4db35314
AK
931 hlist_del(&sp->hash_link);
932 kvm_mmu_free_page(kvm, sp);
2e53d63a 933 } else {
f05e70ac 934 list_move(&sp->link, &kvm->arch.active_mmu_pages);
2e53d63a
MT
935 sp->role.invalid = 1;
936 kvm_reload_remote_mmus(kvm);
937 }
12b7d28f 938 kvm_mmu_reset_last_pte_updated(kvm);
a436036b
AK
939}
940
82ce2c96
IE
941/*
942 * Changing the number of mmu pages allocated to the vm
943 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
944 */
945void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
946{
947 /*
948 * If we set the number of mmu pages to be smaller be than the
949 * number of actived pages , we must to free some mmu pages before we
950 * change the value
951 */
952
f05e70ac 953 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
82ce2c96 954 kvm_nr_mmu_pages) {
f05e70ac
ZX
955 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
956 - kvm->arch.n_free_mmu_pages;
82ce2c96
IE
957
958 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
959 struct kvm_mmu_page *page;
960
f05e70ac 961 page = container_of(kvm->arch.active_mmu_pages.prev,
82ce2c96
IE
962 struct kvm_mmu_page, link);
963 kvm_mmu_zap_page(kvm, page);
964 n_used_mmu_pages--;
965 }
f05e70ac 966 kvm->arch.n_free_mmu_pages = 0;
82ce2c96
IE
967 }
968 else
f05e70ac
ZX
969 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
970 - kvm->arch.n_alloc_mmu_pages;
82ce2c96 971
f05e70ac 972 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
82ce2c96
IE
973}
974
f67a46f4 975static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
a436036b
AK
976{
977 unsigned index;
978 struct hlist_head *bucket;
4db35314 979 struct kvm_mmu_page *sp;
a436036b
AK
980 struct hlist_node *node, *n;
981 int r;
982
b8688d51 983 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
a436036b 984 r = 0;
1ae0a13d 985 index = kvm_page_table_hashfn(gfn);
f05e70ac 986 bucket = &kvm->arch.mmu_page_hash[index];
4db35314
AK
987 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
988 if (sp->gfn == gfn && !sp->role.metaphysical) {
b8688d51 989 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
4db35314
AK
990 sp->role.word);
991 kvm_mmu_zap_page(kvm, sp);
a436036b
AK
992 r = 1;
993 }
994 return r;
cea0f0e7
AK
995}
996
f67a46f4 997static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
97a0a01e 998{
4db35314 999 struct kvm_mmu_page *sp;
97a0a01e 1000
4db35314 1001 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
b8688d51 1002 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
4db35314 1003 kvm_mmu_zap_page(kvm, sp);
97a0a01e
AK
1004 }
1005}
1006
38c335f1 1007static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
6aa8b732 1008{
38c335f1 1009 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
4db35314 1010 struct kvm_mmu_page *sp = page_header(__pa(pte));
6aa8b732 1011
4db35314 1012 __set_bit(slot, &sp->slot_bitmap);
6aa8b732
AK
1013}
1014
039576c0
AK
1015struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1016{
72dc67a6
IE
1017 struct page *page;
1018
ad312c7c 1019 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
039576c0
AK
1020
1021 if (gpa == UNMAPPED_GVA)
1022 return NULL;
72dc67a6
IE
1023
1024 down_read(&current->mm->mmap_sem);
1025 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1026 up_read(&current->mm->mmap_sem);
1027
1028 return page;
039576c0
AK
1029}
1030
1c4f1fd6
AK
1031static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1032 unsigned pt_access, unsigned pte_access,
1033 int user_fault, int write_fault, int dirty,
05da4558 1034 int *ptwrite, int largepage, gfn_t gfn,
35149e21 1035 pfn_t pfn, bool speculative)
1c4f1fd6
AK
1036{
1037 u64 spte;
15aaa819 1038 int was_rmapped = 0;
75e68e60 1039 int was_writeble = is_writeble_pte(*shadow_pte);
1c4f1fd6 1040
bc750ba8 1041 pgprintk("%s: spte %llx access %x write_fault %d"
1c4f1fd6 1042 " user_fault %d gfn %lx\n",
b8688d51 1043 __func__, *shadow_pte, pt_access,
1c4f1fd6
AK
1044 write_fault, user_fault, gfn);
1045
15aaa819 1046 if (is_rmap_pte(*shadow_pte)) {
05da4558
MT
1047 /*
1048 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1049 * the parent of the now unreachable PTE.
1050 */
1051 if (largepage && !is_large_pte(*shadow_pte)) {
1052 struct kvm_mmu_page *child;
1053 u64 pte = *shadow_pte;
1054
1055 child = page_header(pte & PT64_BASE_ADDR_MASK);
1056 mmu_page_remove_parent_pte(child, shadow_pte);
35149e21 1057 } else if (pfn != spte_to_pfn(*shadow_pte)) {
15aaa819 1058 pgprintk("hfn old %lx new %lx\n",
35149e21 1059 spte_to_pfn(*shadow_pte), pfn);
15aaa819 1060 rmap_remove(vcpu->kvm, shadow_pte);
05da4558
MT
1061 } else {
1062 if (largepage)
1063 was_rmapped = is_large_pte(*shadow_pte);
1064 else
1065 was_rmapped = 1;
15aaa819 1066 }
15aaa819
MT
1067 }
1068
1c4f1fd6
AK
1069 /*
1070 * We don't set the accessed bit, since we sometimes want to see
1071 * whether the guest actually used the pte (in order to detect
1072 * demand paging).
1073 */
7b52345e 1074 spte = shadow_base_present_pte | shadow_dirty_mask;
947da538
AK
1075 if (!speculative)
1076 pte_access |= PT_ACCESSED_MASK;
1c4f1fd6
AK
1077 if (!dirty)
1078 pte_access &= ~ACC_WRITE_MASK;
7b52345e
SY
1079 if (pte_access & ACC_EXEC_MASK)
1080 spte |= shadow_x_mask;
1081 else
1082 spte |= shadow_nx_mask;
1c4f1fd6 1083 if (pte_access & ACC_USER_MASK)
7b52345e 1084 spte |= shadow_user_mask;
05da4558
MT
1085 if (largepage)
1086 spte |= PT_PAGE_SIZE_MASK;
1c4f1fd6 1087
35149e21 1088 spte |= (u64)pfn << PAGE_SHIFT;
1c4f1fd6
AK
1089
1090 if ((pte_access & ACC_WRITE_MASK)
1091 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1092 struct kvm_mmu_page *shadow;
1093
1094 spte |= PT_WRITABLE_MASK;
1c4f1fd6
AK
1095
1096 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
05da4558
MT
1097 if (shadow ||
1098 (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1c4f1fd6 1099 pgprintk("%s: found shadow page for %lx, marking ro\n",
b8688d51 1100 __func__, gfn);
1c4f1fd6
AK
1101 pte_access &= ~ACC_WRITE_MASK;
1102 if (is_writeble_pte(spte)) {
1103 spte &= ~PT_WRITABLE_MASK;
1104 kvm_x86_ops->tlb_flush(vcpu);
1105 }
1106 if (write_fault)
1107 *ptwrite = 1;
1108 }
1109 }
1110
1c4f1fd6
AK
1111 if (pte_access & ACC_WRITE_MASK)
1112 mark_page_dirty(vcpu->kvm, gfn);
1113
b8688d51 1114 pgprintk("%s: setting spte %llx\n", __func__, spte);
05da4558
MT
1115 pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
1116 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
1117 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1c4f1fd6 1118 set_shadow_pte(shadow_pte, spte);
05da4558
MT
1119 if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
1120 && (spte & PT_PRESENT_MASK))
1121 ++vcpu->kvm->stat.lpages;
1122
1c4f1fd6
AK
1123 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1124 if (!was_rmapped) {
05da4558 1125 rmap_add(vcpu, shadow_pte, gfn, largepage);
1c4f1fd6 1126 if (!is_rmap_pte(*shadow_pte))
35149e21 1127 kvm_release_pfn_clean(pfn);
75e68e60
IE
1128 } else {
1129 if (was_writeble)
35149e21 1130 kvm_release_pfn_dirty(pfn);
75e68e60 1131 else
35149e21 1132 kvm_release_pfn_clean(pfn);
1c4f1fd6 1133 }
1b7fcd32 1134 if (speculative) {
ad312c7c 1135 vcpu->arch.last_pte_updated = shadow_pte;
1b7fcd32
AK
1136 vcpu->arch.last_pte_gfn = gfn;
1137 }
1c4f1fd6
AK
1138}
1139
6aa8b732
AK
1140static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1141{
1142}
1143
4d9976bb 1144static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
35149e21 1145 int largepage, gfn_t gfn, pfn_t pfn,
05da4558 1146 int level)
6aa8b732 1147{
ad312c7c 1148 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
e833240f 1149 int pt_write = 0;
6aa8b732
AK
1150
1151 for (; ; level--) {
1152 u32 index = PT64_INDEX(v, level);
1153 u64 *table;
1154
1155 ASSERT(VALID_PAGE(table_addr));
1156 table = __va(table_addr);
1157
1158 if (level == 1) {
e833240f 1159 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
35149e21 1160 0, write, 1, &pt_write, 0, gfn, pfn, false);
05da4558
MT
1161 return pt_write;
1162 }
1163
1164 if (largepage && level == 2) {
1165 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
35149e21 1166 0, write, 1, &pt_write, 1, gfn, pfn, false);
d196e343 1167 return pt_write;
6aa8b732
AK
1168 }
1169
c7addb90 1170 if (table[index] == shadow_trap_nonpresent_pte) {
25c0de2c 1171 struct kvm_mmu_page *new_table;
cea0f0e7 1172 gfn_t pseudo_gfn;
6aa8b732 1173
cea0f0e7
AK
1174 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
1175 >> PAGE_SHIFT;
1176 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1177 v, level - 1,
f7d9c7b7 1178 1, ACC_ALL, &table[index]);
25c0de2c 1179 if (!new_table) {
6aa8b732 1180 pgprintk("nonpaging_map: ENOMEM\n");
35149e21 1181 kvm_release_pfn_clean(pfn);
6aa8b732
AK
1182 return -ENOMEM;
1183 }
1184
1439442c
SY
1185 table[index] = __pa(new_table->spt)
1186 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1187 | shadow_user_mask | shadow_x_mask;
6aa8b732
AK
1188 }
1189 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1190 }
1191}
1192
10589a46
MT
1193static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1194{
1195 int r;
05da4558 1196 int largepage = 0;
35149e21 1197 pfn_t pfn;
aaee2c94
MT
1198
1199 down_read(&current->mm->mmap_sem);
05da4558
MT
1200 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1201 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1202 largepage = 1;
1203 }
1204
35149e21 1205 pfn = gfn_to_pfn(vcpu->kvm, gfn);
72dc67a6 1206 up_read(&current->mm->mmap_sem);
aaee2c94 1207
d196e343 1208 /* mmio */
35149e21
AL
1209 if (is_error_pfn(pfn)) {
1210 kvm_release_pfn_clean(pfn);
d196e343
AK
1211 return 1;
1212 }
1213
aaee2c94 1214 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1215 kvm_mmu_free_some_pages(vcpu);
35149e21 1216 r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
05da4558 1217 PT32E_ROOT_LEVEL);
aaee2c94
MT
1218 spin_unlock(&vcpu->kvm->mmu_lock);
1219
aaee2c94 1220
10589a46
MT
1221 return r;
1222}
1223
1224
17ac10ad
AK
1225static void mmu_free_roots(struct kvm_vcpu *vcpu)
1226{
1227 int i;
4db35314 1228 struct kvm_mmu_page *sp;
17ac10ad 1229
ad312c7c 1230 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
7b53aa56 1231 return;
aaee2c94 1232 spin_lock(&vcpu->kvm->mmu_lock);
ad312c7c
ZX
1233 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1234 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad 1235
4db35314
AK
1236 sp = page_header(root);
1237 --sp->root_count;
2e53d63a
MT
1238 if (!sp->root_count && sp->role.invalid)
1239 kvm_mmu_zap_page(vcpu->kvm, sp);
ad312c7c 1240 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
aaee2c94 1241 spin_unlock(&vcpu->kvm->mmu_lock);
17ac10ad
AK
1242 return;
1243 }
17ac10ad 1244 for (i = 0; i < 4; ++i) {
ad312c7c 1245 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad 1246
417726a3 1247 if (root) {
417726a3 1248 root &= PT64_BASE_ADDR_MASK;
4db35314
AK
1249 sp = page_header(root);
1250 --sp->root_count;
2e53d63a
MT
1251 if (!sp->root_count && sp->role.invalid)
1252 kvm_mmu_zap_page(vcpu->kvm, sp);
417726a3 1253 }
ad312c7c 1254 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1255 }
aaee2c94 1256 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1257 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
17ac10ad
AK
1258}
1259
1260static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1261{
1262 int i;
cea0f0e7 1263 gfn_t root_gfn;
4db35314 1264 struct kvm_mmu_page *sp;
fb72d167 1265 int metaphysical = 0;
3bb65a22 1266
ad312c7c 1267 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
17ac10ad 1268
ad312c7c
ZX
1269 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1270 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad
AK
1271
1272 ASSERT(!VALID_PAGE(root));
fb72d167
JR
1273 if (tdp_enabled)
1274 metaphysical = 1;
4db35314 1275 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
fb72d167
JR
1276 PT64_ROOT_LEVEL, metaphysical,
1277 ACC_ALL, NULL);
4db35314
AK
1278 root = __pa(sp->spt);
1279 ++sp->root_count;
ad312c7c 1280 vcpu->arch.mmu.root_hpa = root;
17ac10ad
AK
1281 return;
1282 }
fb72d167
JR
1283 metaphysical = !is_paging(vcpu);
1284 if (tdp_enabled)
1285 metaphysical = 1;
17ac10ad 1286 for (i = 0; i < 4; ++i) {
ad312c7c 1287 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad
AK
1288
1289 ASSERT(!VALID_PAGE(root));
ad312c7c
ZX
1290 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1291 if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1292 vcpu->arch.mmu.pae_root[i] = 0;
417726a3
AK
1293 continue;
1294 }
ad312c7c
ZX
1295 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1296 } else if (vcpu->arch.mmu.root_level == 0)
cea0f0e7 1297 root_gfn = 0;
4db35314 1298 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
fb72d167 1299 PT32_ROOT_LEVEL, metaphysical,
f7d9c7b7 1300 ACC_ALL, NULL);
4db35314
AK
1301 root = __pa(sp->spt);
1302 ++sp->root_count;
ad312c7c 1303 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
17ac10ad 1304 }
ad312c7c 1305 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
17ac10ad
AK
1306}
1307
6aa8b732
AK
1308static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1309{
1310 return vaddr;
1311}
1312
1313static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3f3e7124 1314 u32 error_code)
6aa8b732 1315{
e833240f 1316 gfn_t gfn;
e2dec939 1317 int r;
6aa8b732 1318
b8688d51 1319 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
e2dec939
AK
1320 r = mmu_topup_memory_caches(vcpu);
1321 if (r)
1322 return r;
714b93da 1323
6aa8b732 1324 ASSERT(vcpu);
ad312c7c 1325 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1326
e833240f 1327 gfn = gva >> PAGE_SHIFT;
6aa8b732 1328
e833240f
AK
1329 return nonpaging_map(vcpu, gva & PAGE_MASK,
1330 error_code & PFERR_WRITE_MASK, gfn);
6aa8b732
AK
1331}
1332
fb72d167
JR
1333static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1334 u32 error_code)
1335{
35149e21 1336 pfn_t pfn;
fb72d167 1337 int r;
05da4558
MT
1338 int largepage = 0;
1339 gfn_t gfn = gpa >> PAGE_SHIFT;
fb72d167
JR
1340
1341 ASSERT(vcpu);
1342 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1343
1344 r = mmu_topup_memory_caches(vcpu);
1345 if (r)
1346 return r;
1347
1348 down_read(&current->mm->mmap_sem);
05da4558
MT
1349 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1350 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1351 largepage = 1;
1352 }
35149e21 1353 pfn = gfn_to_pfn(vcpu->kvm, gfn);
3200f405 1354 up_read(&current->mm->mmap_sem);
35149e21
AL
1355 if (is_error_pfn(pfn)) {
1356 kvm_release_pfn_clean(pfn);
fb72d167
JR
1357 return 1;
1358 }
1359 spin_lock(&vcpu->kvm->mmu_lock);
1360 kvm_mmu_free_some_pages(vcpu);
1361 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
67253af5 1362 largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
fb72d167 1363 spin_unlock(&vcpu->kvm->mmu_lock);
fb72d167
JR
1364
1365 return r;
1366}
1367
6aa8b732
AK
1368static void nonpaging_free(struct kvm_vcpu *vcpu)
1369{
17ac10ad 1370 mmu_free_roots(vcpu);
6aa8b732
AK
1371}
1372
1373static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1374{
ad312c7c 1375 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1376
1377 context->new_cr3 = nonpaging_new_cr3;
1378 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
1379 context->gva_to_gpa = nonpaging_gva_to_gpa;
1380 context->free = nonpaging_free;
c7addb90 1381 context->prefetch_page = nonpaging_prefetch_page;
cea0f0e7 1382 context->root_level = 0;
6aa8b732 1383 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1384 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1385 return 0;
1386}
1387
d835dfec 1388void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
6aa8b732 1389{
1165f5fe 1390 ++vcpu->stat.tlb_flush;
cbdd1bea 1391 kvm_x86_ops->tlb_flush(vcpu);
6aa8b732
AK
1392}
1393
1394static void paging_new_cr3(struct kvm_vcpu *vcpu)
1395{
b8688d51 1396 pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
cea0f0e7 1397 mmu_free_roots(vcpu);
6aa8b732
AK
1398}
1399
6aa8b732
AK
1400static void inject_page_fault(struct kvm_vcpu *vcpu,
1401 u64 addr,
1402 u32 err_code)
1403{
c3c91fee 1404 kvm_inject_page_fault(vcpu, addr, err_code);
6aa8b732
AK
1405}
1406
6aa8b732
AK
1407static void paging_free(struct kvm_vcpu *vcpu)
1408{
1409 nonpaging_free(vcpu);
1410}
1411
1412#define PTTYPE 64
1413#include "paging_tmpl.h"
1414#undef PTTYPE
1415
1416#define PTTYPE 32
1417#include "paging_tmpl.h"
1418#undef PTTYPE
1419
17ac10ad 1420static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732 1421{
ad312c7c 1422 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1423
1424 ASSERT(is_pae(vcpu));
1425 context->new_cr3 = paging_new_cr3;
1426 context->page_fault = paging64_page_fault;
6aa8b732 1427 context->gva_to_gpa = paging64_gva_to_gpa;
c7addb90 1428 context->prefetch_page = paging64_prefetch_page;
6aa8b732 1429 context->free = paging_free;
17ac10ad
AK
1430 context->root_level = level;
1431 context->shadow_root_level = level;
17c3ba9d 1432 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1433 return 0;
1434}
1435
17ac10ad
AK
1436static int paging64_init_context(struct kvm_vcpu *vcpu)
1437{
1438 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1439}
1440
6aa8b732
AK
1441static int paging32_init_context(struct kvm_vcpu *vcpu)
1442{
ad312c7c 1443 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1444
1445 context->new_cr3 = paging_new_cr3;
1446 context->page_fault = paging32_page_fault;
6aa8b732
AK
1447 context->gva_to_gpa = paging32_gva_to_gpa;
1448 context->free = paging_free;
c7addb90 1449 context->prefetch_page = paging32_prefetch_page;
6aa8b732
AK
1450 context->root_level = PT32_ROOT_LEVEL;
1451 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1452 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1453 return 0;
1454}
1455
1456static int paging32E_init_context(struct kvm_vcpu *vcpu)
1457{
17ac10ad 1458 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
1459}
1460
fb72d167
JR
1461static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1462{
1463 struct kvm_mmu *context = &vcpu->arch.mmu;
1464
1465 context->new_cr3 = nonpaging_new_cr3;
1466 context->page_fault = tdp_page_fault;
1467 context->free = nonpaging_free;
1468 context->prefetch_page = nonpaging_prefetch_page;
67253af5 1469 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
fb72d167
JR
1470 context->root_hpa = INVALID_PAGE;
1471
1472 if (!is_paging(vcpu)) {
1473 context->gva_to_gpa = nonpaging_gva_to_gpa;
1474 context->root_level = 0;
1475 } else if (is_long_mode(vcpu)) {
1476 context->gva_to_gpa = paging64_gva_to_gpa;
1477 context->root_level = PT64_ROOT_LEVEL;
1478 } else if (is_pae(vcpu)) {
1479 context->gva_to_gpa = paging64_gva_to_gpa;
1480 context->root_level = PT32E_ROOT_LEVEL;
1481 } else {
1482 context->gva_to_gpa = paging32_gva_to_gpa;
1483 context->root_level = PT32_ROOT_LEVEL;
1484 }
1485
1486 return 0;
1487}
1488
1489static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
6aa8b732
AK
1490{
1491 ASSERT(vcpu);
ad312c7c 1492 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732
AK
1493
1494 if (!is_paging(vcpu))
1495 return nonpaging_init_context(vcpu);
a9058ecd 1496 else if (is_long_mode(vcpu))
6aa8b732
AK
1497 return paging64_init_context(vcpu);
1498 else if (is_pae(vcpu))
1499 return paging32E_init_context(vcpu);
1500 else
1501 return paging32_init_context(vcpu);
1502}
1503
fb72d167
JR
1504static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1505{
35149e21
AL
1506 vcpu->arch.update_pte.pfn = bad_pfn;
1507
fb72d167
JR
1508 if (tdp_enabled)
1509 return init_kvm_tdp_mmu(vcpu);
1510 else
1511 return init_kvm_softmmu(vcpu);
1512}
1513
6aa8b732
AK
1514static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1515{
1516 ASSERT(vcpu);
ad312c7c
ZX
1517 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1518 vcpu->arch.mmu.free(vcpu);
1519 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
6aa8b732
AK
1520 }
1521}
1522
1523int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
1524{
1525 destroy_kvm_mmu(vcpu);
1526 return init_kvm_mmu(vcpu);
1527}
8668a3c4 1528EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
17c3ba9d
AK
1529
1530int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 1531{
714b93da
AK
1532 int r;
1533
e2dec939 1534 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
1535 if (r)
1536 goto out;
aaee2c94 1537 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1538 kvm_mmu_free_some_pages(vcpu);
17c3ba9d 1539 mmu_alloc_roots(vcpu);
aaee2c94 1540 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1541 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
17c3ba9d 1542 kvm_mmu_flush_tlb(vcpu);
714b93da
AK
1543out:
1544 return r;
6aa8b732 1545}
17c3ba9d
AK
1546EXPORT_SYMBOL_GPL(kvm_mmu_load);
1547
1548void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1549{
1550 mmu_free_roots(vcpu);
1551}
6aa8b732 1552
09072daf 1553static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
4db35314 1554 struct kvm_mmu_page *sp,
ac1b714e
AK
1555 u64 *spte)
1556{
1557 u64 pte;
1558 struct kvm_mmu_page *child;
1559
1560 pte = *spte;
c7addb90 1561 if (is_shadow_present_pte(pte)) {
05da4558
MT
1562 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
1563 is_large_pte(pte))
290fc38d 1564 rmap_remove(vcpu->kvm, spte);
ac1b714e
AK
1565 else {
1566 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 1567 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
1568 }
1569 }
c7addb90 1570 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
05da4558
MT
1571 if (is_large_pte(pte))
1572 --vcpu->kvm->stat.lpages;
ac1b714e
AK
1573}
1574
0028425f 1575static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4db35314 1576 struct kvm_mmu_page *sp,
0028425f 1577 u64 *spte,
489f1d65 1578 const void *new)
0028425f 1579{
30945387
MT
1580 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
1581 if (!vcpu->arch.update_pte.largepage ||
1582 sp->role.glevels == PT32_ROOT_LEVEL) {
1583 ++vcpu->kvm->stat.mmu_pde_zapped;
1584 return;
1585 }
1586 }
0028425f 1587
4cee5764 1588 ++vcpu->kvm->stat.mmu_pte_updated;
4db35314 1589 if (sp->role.glevels == PT32_ROOT_LEVEL)
489f1d65 1590 paging32_update_pte(vcpu, sp, spte, new);
0028425f 1591 else
489f1d65 1592 paging64_update_pte(vcpu, sp, spte, new);
0028425f
AK
1593}
1594
79539cec
AK
1595static bool need_remote_flush(u64 old, u64 new)
1596{
1597 if (!is_shadow_present_pte(old))
1598 return false;
1599 if (!is_shadow_present_pte(new))
1600 return true;
1601 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1602 return true;
1603 old ^= PT64_NX_MASK;
1604 new ^= PT64_NX_MASK;
1605 return (old & ~new & PT64_PERM_MASK) != 0;
1606}
1607
1608static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1609{
1610 if (need_remote_flush(old, new))
1611 kvm_flush_remote_tlbs(vcpu->kvm);
1612 else
1613 kvm_mmu_flush_tlb(vcpu);
1614}
1615
12b7d28f
AK
1616static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1617{
ad312c7c 1618 u64 *spte = vcpu->arch.last_pte_updated;
12b7d28f 1619
7b52345e 1620 return !!(spte && (*spte & shadow_accessed_mask));
12b7d28f
AK
1621}
1622
d7824fff
AK
1623static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1624 const u8 *new, int bytes)
1625{
1626 gfn_t gfn;
1627 int r;
1628 u64 gpte = 0;
35149e21 1629 pfn_t pfn;
d7824fff 1630
05da4558
MT
1631 vcpu->arch.update_pte.largepage = 0;
1632
d7824fff
AK
1633 if (bytes != 4 && bytes != 8)
1634 return;
1635
1636 /*
1637 * Assume that the pte write on a page table of the same type
1638 * as the current vcpu paging mode. This is nearly always true
1639 * (might be false while changing modes). Note it is verified later
1640 * by update_pte().
1641 */
1642 if (is_pae(vcpu)) {
1643 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1644 if ((bytes == 4) && (gpa % 4 == 0)) {
1645 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1646 if (r)
1647 return;
1648 memcpy((void *)&gpte + (gpa % 8), new, 4);
1649 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1650 memcpy((void *)&gpte, new, 8);
1651 }
1652 } else {
1653 if ((bytes == 4) && (gpa % 4 == 0))
1654 memcpy((void *)&gpte, new, 4);
1655 }
1656 if (!is_present_pte(gpte))
1657 return;
1658 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
72dc67a6 1659
05da4558
MT
1660 down_read(&current->mm->mmap_sem);
1661 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
1662 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1663 vcpu->arch.update_pte.largepage = 1;
1664 }
35149e21 1665 pfn = gfn_to_pfn(vcpu->kvm, gfn);
05da4558 1666 up_read(&current->mm->mmap_sem);
72dc67a6 1667
35149e21
AL
1668 if (is_error_pfn(pfn)) {
1669 kvm_release_pfn_clean(pfn);
d196e343
AK
1670 return;
1671 }
d7824fff 1672 vcpu->arch.update_pte.gfn = gfn;
35149e21 1673 vcpu->arch.update_pte.pfn = pfn;
d7824fff
AK
1674}
1675
1b7fcd32
AK
1676static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1677{
1678 u64 *spte = vcpu->arch.last_pte_updated;
1679
1680 if (spte
1681 && vcpu->arch.last_pte_gfn == gfn
1682 && shadow_accessed_mask
1683 && !(*spte & shadow_accessed_mask)
1684 && is_shadow_present_pte(*spte))
1685 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
1686}
1687
09072daf 1688void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
fe551881 1689 const u8 *new, int bytes)
da4a00f0 1690{
9b7a0325 1691 gfn_t gfn = gpa >> PAGE_SHIFT;
4db35314 1692 struct kvm_mmu_page *sp;
0e7bc4b9 1693 struct hlist_node *node, *n;
9b7a0325
AK
1694 struct hlist_head *bucket;
1695 unsigned index;
489f1d65 1696 u64 entry, gentry;
9b7a0325 1697 u64 *spte;
9b7a0325 1698 unsigned offset = offset_in_page(gpa);
0e7bc4b9 1699 unsigned pte_size;
9b7a0325 1700 unsigned page_offset;
0e7bc4b9 1701 unsigned misaligned;
fce0657f 1702 unsigned quadrant;
9b7a0325 1703 int level;
86a5ba02 1704 int flooded = 0;
ac1b714e 1705 int npte;
489f1d65 1706 int r;
9b7a0325 1707
b8688d51 1708 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
d7824fff 1709 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
aaee2c94 1710 spin_lock(&vcpu->kvm->mmu_lock);
1b7fcd32 1711 kvm_mmu_access_page(vcpu, gfn);
eb787d10 1712 kvm_mmu_free_some_pages(vcpu);
4cee5764 1713 ++vcpu->kvm->stat.mmu_pte_write;
c7addb90 1714 kvm_mmu_audit(vcpu, "pre pte write");
ad312c7c 1715 if (gfn == vcpu->arch.last_pt_write_gfn
12b7d28f 1716 && !last_updated_pte_accessed(vcpu)) {
ad312c7c
ZX
1717 ++vcpu->arch.last_pt_write_count;
1718 if (vcpu->arch.last_pt_write_count >= 3)
86a5ba02
AK
1719 flooded = 1;
1720 } else {
ad312c7c
ZX
1721 vcpu->arch.last_pt_write_gfn = gfn;
1722 vcpu->arch.last_pt_write_count = 1;
1723 vcpu->arch.last_pte_updated = NULL;
86a5ba02 1724 }
1ae0a13d 1725 index = kvm_page_table_hashfn(gfn);
f05e70ac 1726 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
1727 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1728 if (sp->gfn != gfn || sp->role.metaphysical)
9b7a0325 1729 continue;
4db35314 1730 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
0e7bc4b9 1731 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 1732 misaligned |= bytes < 4;
86a5ba02 1733 if (misaligned || flooded) {
0e7bc4b9
AK
1734 /*
1735 * Misaligned accesses are too much trouble to fix
1736 * up; also, they usually indicate a page is not used
1737 * as a page table.
86a5ba02
AK
1738 *
1739 * If we're seeing too many writes to a page,
1740 * it may no longer be a page table, or we may be
1741 * forking, in which case it is better to unmap the
1742 * page.
0e7bc4b9
AK
1743 */
1744 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4db35314
AK
1745 gpa, bytes, sp->role.word);
1746 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1747 ++vcpu->kvm->stat.mmu_flooded;
0e7bc4b9
AK
1748 continue;
1749 }
9b7a0325 1750 page_offset = offset;
4db35314 1751 level = sp->role.level;
ac1b714e 1752 npte = 1;
4db35314 1753 if (sp->role.glevels == PT32_ROOT_LEVEL) {
ac1b714e
AK
1754 page_offset <<= 1; /* 32->64 */
1755 /*
1756 * A 32-bit pde maps 4MB while the shadow pdes map
1757 * only 2MB. So we need to double the offset again
1758 * and zap two pdes instead of one.
1759 */
1760 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 1761 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
1762 page_offset <<= 1;
1763 npte = 2;
1764 }
fce0657f 1765 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 1766 page_offset &= ~PAGE_MASK;
4db35314 1767 if (quadrant != sp->role.quadrant)
fce0657f 1768 continue;
9b7a0325 1769 }
4db35314 1770 spte = &sp->spt[page_offset / sizeof(*spte)];
489f1d65
DE
1771 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1772 gentry = 0;
1773 r = kvm_read_guest_atomic(vcpu->kvm,
1774 gpa & ~(u64)(pte_size - 1),
1775 &gentry, pte_size);
1776 new = (const void *)&gentry;
1777 if (r < 0)
1778 new = NULL;
1779 }
ac1b714e 1780 while (npte--) {
79539cec 1781 entry = *spte;
4db35314 1782 mmu_pte_write_zap_pte(vcpu, sp, spte);
489f1d65
DE
1783 if (new)
1784 mmu_pte_write_new_pte(vcpu, sp, spte, new);
79539cec 1785 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
ac1b714e 1786 ++spte;
9b7a0325 1787 }
9b7a0325 1788 }
c7addb90 1789 kvm_mmu_audit(vcpu, "post pte write");
aaee2c94 1790 spin_unlock(&vcpu->kvm->mmu_lock);
35149e21
AL
1791 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
1792 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
1793 vcpu->arch.update_pte.pfn = bad_pfn;
d7824fff 1794 }
da4a00f0
AK
1795}
1796
a436036b
AK
1797int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1798{
10589a46
MT
1799 gpa_t gpa;
1800 int r;
a436036b 1801
10589a46 1802 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
10589a46 1803
aaee2c94 1804 spin_lock(&vcpu->kvm->mmu_lock);
10589a46 1805 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
aaee2c94 1806 spin_unlock(&vcpu->kvm->mmu_lock);
10589a46 1807 return r;
a436036b
AK
1808}
1809
22d95b12 1810void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86 1811{
f05e70ac 1812 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
4db35314 1813 struct kvm_mmu_page *sp;
ebeace86 1814
f05e70ac 1815 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
4db35314
AK
1816 struct kvm_mmu_page, link);
1817 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1818 ++vcpu->kvm->stat.mmu_recycled;
ebeace86
AK
1819 }
1820}
ebeace86 1821
3067714c
AK
1822int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1823{
1824 int r;
1825 enum emulation_result er;
1826
ad312c7c 1827 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
3067714c
AK
1828 if (r < 0)
1829 goto out;
1830
1831 if (!r) {
1832 r = 1;
1833 goto out;
1834 }
1835
b733bfb5
AK
1836 r = mmu_topup_memory_caches(vcpu);
1837 if (r)
1838 goto out;
1839
3067714c 1840 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
3067714c
AK
1841
1842 switch (er) {
1843 case EMULATE_DONE:
1844 return 1;
1845 case EMULATE_DO_MMIO:
1846 ++vcpu->stat.mmio_exits;
1847 return 0;
1848 case EMULATE_FAIL:
1849 kvm_report_emulation_failure(vcpu, "pagetable");
1850 return 1;
1851 default:
1852 BUG();
1853 }
1854out:
3067714c
AK
1855 return r;
1856}
1857EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1858
18552672
JR
1859void kvm_enable_tdp(void)
1860{
1861 tdp_enabled = true;
1862}
1863EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1864
6aa8b732
AK
1865static void free_mmu_pages(struct kvm_vcpu *vcpu)
1866{
4db35314 1867 struct kvm_mmu_page *sp;
6aa8b732 1868
f05e70ac
ZX
1869 while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1870 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
4db35314
AK
1871 struct kvm_mmu_page, link);
1872 kvm_mmu_zap_page(vcpu->kvm, sp);
8d2d73b9 1873 cond_resched();
f51234c2 1874 }
ad312c7c 1875 free_page((unsigned long)vcpu->arch.mmu.pae_root);
6aa8b732
AK
1876}
1877
1878static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1879{
17ac10ad 1880 struct page *page;
6aa8b732
AK
1881 int i;
1882
1883 ASSERT(vcpu);
1884
f05e70ac
ZX
1885 if (vcpu->kvm->arch.n_requested_mmu_pages)
1886 vcpu->kvm->arch.n_free_mmu_pages =
1887 vcpu->kvm->arch.n_requested_mmu_pages;
82ce2c96 1888 else
f05e70ac
ZX
1889 vcpu->kvm->arch.n_free_mmu_pages =
1890 vcpu->kvm->arch.n_alloc_mmu_pages;
17ac10ad
AK
1891 /*
1892 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1893 * Therefore we need to allocate shadow page tables in the first
1894 * 4GB of memory, which happens to fit the DMA32 zone.
1895 */
1896 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1897 if (!page)
1898 goto error_1;
ad312c7c 1899 vcpu->arch.mmu.pae_root = page_address(page);
17ac10ad 1900 for (i = 0; i < 4; ++i)
ad312c7c 1901 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1902
6aa8b732
AK
1903 return 0;
1904
1905error_1:
1906 free_mmu_pages(vcpu);
1907 return -ENOMEM;
1908}
1909
8018c27b 1910int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 1911{
6aa8b732 1912 ASSERT(vcpu);
ad312c7c 1913 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1914
8018c27b
IM
1915 return alloc_mmu_pages(vcpu);
1916}
6aa8b732 1917
8018c27b
IM
1918int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1919{
1920 ASSERT(vcpu);
ad312c7c 1921 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2c264957 1922
8018c27b 1923 return init_kvm_mmu(vcpu);
6aa8b732
AK
1924}
1925
1926void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1927{
1928 ASSERT(vcpu);
1929
1930 destroy_kvm_mmu(vcpu);
1931 free_mmu_pages(vcpu);
714b93da 1932 mmu_free_memory_caches(vcpu);
6aa8b732
AK
1933}
1934
90cb0529 1935void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732 1936{
4db35314 1937 struct kvm_mmu_page *sp;
6aa8b732 1938
f05e70ac 1939 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
6aa8b732
AK
1940 int i;
1941 u64 *pt;
1942
4db35314 1943 if (!test_bit(slot, &sp->slot_bitmap))
6aa8b732
AK
1944 continue;
1945
4db35314 1946 pt = sp->spt;
6aa8b732
AK
1947 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1948 /* avoid RMW */
9647c14c 1949 if (pt[i] & PT_WRITABLE_MASK)
6aa8b732 1950 pt[i] &= ~PT_WRITABLE_MASK;
6aa8b732
AK
1951 }
1952}
37a7d8b0 1953
90cb0529 1954void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 1955{
4db35314 1956 struct kvm_mmu_page *sp, *node;
e0fa826f 1957
aaee2c94 1958 spin_lock(&kvm->mmu_lock);
f05e70ac 1959 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
4db35314 1960 kvm_mmu_zap_page(kvm, sp);
aaee2c94 1961 spin_unlock(&kvm->mmu_lock);
e0fa826f 1962
90cb0529 1963 kvm_flush_remote_tlbs(kvm);
e0fa826f
DL
1964}
1965
8b2cf73c 1966static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
3ee16c81
IE
1967{
1968 struct kvm_mmu_page *page;
1969
1970 page = container_of(kvm->arch.active_mmu_pages.prev,
1971 struct kvm_mmu_page, link);
1972 kvm_mmu_zap_page(kvm, page);
1973}
1974
1975static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
1976{
1977 struct kvm *kvm;
1978 struct kvm *kvm_freed = NULL;
1979 int cache_count = 0;
1980
1981 spin_lock(&kvm_lock);
1982
1983 list_for_each_entry(kvm, &vm_list, vm_list) {
1984 int npages;
1985
1986 spin_lock(&kvm->mmu_lock);
1987 npages = kvm->arch.n_alloc_mmu_pages -
1988 kvm->arch.n_free_mmu_pages;
1989 cache_count += npages;
1990 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
1991 kvm_mmu_remove_one_alloc_mmu_page(kvm);
1992 cache_count--;
1993 kvm_freed = kvm;
1994 }
1995 nr_to_scan--;
1996
1997 spin_unlock(&kvm->mmu_lock);
1998 }
1999 if (kvm_freed)
2000 list_move_tail(&kvm_freed->vm_list, &vm_list);
2001
2002 spin_unlock(&kvm_lock);
2003
2004 return cache_count;
2005}
2006
2007static struct shrinker mmu_shrinker = {
2008 .shrink = mmu_shrink,
2009 .seeks = DEFAULT_SEEKS * 10,
2010};
2011
2ddfd20e 2012static void mmu_destroy_caches(void)
b5a33a75
AK
2013{
2014 if (pte_chain_cache)
2015 kmem_cache_destroy(pte_chain_cache);
2016 if (rmap_desc_cache)
2017 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
2018 if (mmu_page_header_cache)
2019 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
2020}
2021
3ee16c81
IE
2022void kvm_mmu_module_exit(void)
2023{
2024 mmu_destroy_caches();
2025 unregister_shrinker(&mmu_shrinker);
2026}
2027
b5a33a75
AK
2028int kvm_mmu_module_init(void)
2029{
2030 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2031 sizeof(struct kvm_pte_chain),
20c2df83 2032 0, 0, NULL);
b5a33a75
AK
2033 if (!pte_chain_cache)
2034 goto nomem;
2035 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2036 sizeof(struct kvm_rmap_desc),
20c2df83 2037 0, 0, NULL);
b5a33a75
AK
2038 if (!rmap_desc_cache)
2039 goto nomem;
2040
d3d25b04
AK
2041 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2042 sizeof(struct kvm_mmu_page),
20c2df83 2043 0, 0, NULL);
d3d25b04
AK
2044 if (!mmu_page_header_cache)
2045 goto nomem;
2046
3ee16c81
IE
2047 register_shrinker(&mmu_shrinker);
2048
b5a33a75
AK
2049 return 0;
2050
2051nomem:
3ee16c81 2052 mmu_destroy_caches();
b5a33a75
AK
2053 return -ENOMEM;
2054}
2055
3ad82a7e
ZX
2056/*
2057 * Caculate mmu pages needed for kvm.
2058 */
2059unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2060{
2061 int i;
2062 unsigned int nr_mmu_pages;
2063 unsigned int nr_pages = 0;
2064
2065 for (i = 0; i < kvm->nmemslots; i++)
2066 nr_pages += kvm->memslots[i].npages;
2067
2068 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2069 nr_mmu_pages = max(nr_mmu_pages,
2070 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2071
2072 return nr_mmu_pages;
2073}
2074
2f333bcb
MT
2075static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2076 unsigned len)
2077{
2078 if (len > buffer->len)
2079 return NULL;
2080 return buffer->ptr;
2081}
2082
2083static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2084 unsigned len)
2085{
2086 void *ret;
2087
2088 ret = pv_mmu_peek_buffer(buffer, len);
2089 if (!ret)
2090 return ret;
2091 buffer->ptr += len;
2092 buffer->len -= len;
2093 buffer->processed += len;
2094 return ret;
2095}
2096
2097static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2098 gpa_t addr, gpa_t value)
2099{
2100 int bytes = 8;
2101 int r;
2102
2103 if (!is_long_mode(vcpu) && !is_pae(vcpu))
2104 bytes = 4;
2105
2106 r = mmu_topup_memory_caches(vcpu);
2107 if (r)
2108 return r;
2109
3200f405 2110 if (!emulator_write_phys(vcpu, addr, &value, bytes))
2f333bcb
MT
2111 return -EFAULT;
2112
2113 return 1;
2114}
2115
2116static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2117{
2118 kvm_x86_ops->tlb_flush(vcpu);
2119 return 1;
2120}
2121
2122static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2123{
2124 spin_lock(&vcpu->kvm->mmu_lock);
2125 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2126 spin_unlock(&vcpu->kvm->mmu_lock);
2127 return 1;
2128}
2129
2130static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2131 struct kvm_pv_mmu_op_buffer *buffer)
2132{
2133 struct kvm_mmu_op_header *header;
2134
2135 header = pv_mmu_peek_buffer(buffer, sizeof *header);
2136 if (!header)
2137 return 0;
2138 switch (header->op) {
2139 case KVM_MMU_OP_WRITE_PTE: {
2140 struct kvm_mmu_op_write_pte *wpte;
2141
2142 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2143 if (!wpte)
2144 return 0;
2145 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2146 wpte->pte_val);
2147 }
2148 case KVM_MMU_OP_FLUSH_TLB: {
2149 struct kvm_mmu_op_flush_tlb *ftlb;
2150
2151 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2152 if (!ftlb)
2153 return 0;
2154 return kvm_pv_mmu_flush_tlb(vcpu);
2155 }
2156 case KVM_MMU_OP_RELEASE_PT: {
2157 struct kvm_mmu_op_release_pt *rpt;
2158
2159 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2160 if (!rpt)
2161 return 0;
2162 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2163 }
2164 default: return 0;
2165 }
2166}
2167
2168int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2169 gpa_t addr, unsigned long *ret)
2170{
2171 int r;
2172 struct kvm_pv_mmu_op_buffer buffer;
2173
2f333bcb
MT
2174 buffer.ptr = buffer.buf;
2175 buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
2176 buffer.processed = 0;
2177
2178 r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
2179 if (r)
2180 goto out;
2181
2182 while (buffer.len) {
2183 r = kvm_pv_mmu_op_one(vcpu, &buffer);
2184 if (r < 0)
2185 goto out;
2186 if (r == 0)
2187 break;
2188 }
2189
2190 r = 1;
2191out:
2192 *ret = buffer.processed;
2f333bcb
MT
2193 return r;
2194}
2195
37a7d8b0
AK
2196#ifdef AUDIT
2197
2198static const char *audit_msg;
2199
2200static gva_t canonicalize(gva_t gva)
2201{
2202#ifdef CONFIG_X86_64
2203 gva = (long long)(gva << 16) >> 16;
2204#endif
2205 return gva;
2206}
2207
2208static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2209 gva_t va, int level)
2210{
2211 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2212 int i;
2213 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2214
2215 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2216 u64 ent = pt[i];
2217
c7addb90 2218 if (ent == shadow_trap_nonpresent_pte)
37a7d8b0
AK
2219 continue;
2220
2221 va = canonicalize(va);
c7addb90
AK
2222 if (level > 1) {
2223 if (ent == shadow_notrap_nonpresent_pte)
2224 printk(KERN_ERR "audit: (%s) nontrapping pte"
2225 " in nonleaf level: levels %d gva %lx"
2226 " level %d pte %llx\n", audit_msg,
ad312c7c 2227 vcpu->arch.mmu.root_level, va, level, ent);
c7addb90 2228
37a7d8b0 2229 audit_mappings_page(vcpu, ent, va, level - 1);
c7addb90 2230 } else {
ad312c7c 2231 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
35149e21 2232 hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
37a7d8b0 2233
c7addb90 2234 if (is_shadow_present_pte(ent)
37a7d8b0 2235 && (ent & PT64_BASE_ADDR_MASK) != hpa)
c7addb90
AK
2236 printk(KERN_ERR "xx audit error: (%s) levels %d"
2237 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
ad312c7c 2238 audit_msg, vcpu->arch.mmu.root_level,
d77c26fc
MD
2239 va, gpa, hpa, ent,
2240 is_shadow_present_pte(ent));
c7addb90
AK
2241 else if (ent == shadow_notrap_nonpresent_pte
2242 && !is_error_hpa(hpa))
2243 printk(KERN_ERR "audit: (%s) notrap shadow,"
2244 " valid guest gva %lx\n", audit_msg, va);
35149e21 2245 kvm_release_pfn_clean(pfn);
c7addb90 2246
37a7d8b0
AK
2247 }
2248 }
2249}
2250
2251static void audit_mappings(struct kvm_vcpu *vcpu)
2252{
1ea252af 2253 unsigned i;
37a7d8b0 2254
ad312c7c
ZX
2255 if (vcpu->arch.mmu.root_level == 4)
2256 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
37a7d8b0
AK
2257 else
2258 for (i = 0; i < 4; ++i)
ad312c7c 2259 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
37a7d8b0 2260 audit_mappings_page(vcpu,
ad312c7c 2261 vcpu->arch.mmu.pae_root[i],
37a7d8b0
AK
2262 i << 30,
2263 2);
2264}
2265
2266static int count_rmaps(struct kvm_vcpu *vcpu)
2267{
2268 int nmaps = 0;
2269 int i, j, k;
2270
2271 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2272 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2273 struct kvm_rmap_desc *d;
2274
2275 for (j = 0; j < m->npages; ++j) {
290fc38d 2276 unsigned long *rmapp = &m->rmap[j];
37a7d8b0 2277
290fc38d 2278 if (!*rmapp)
37a7d8b0 2279 continue;
290fc38d 2280 if (!(*rmapp & 1)) {
37a7d8b0
AK
2281 ++nmaps;
2282 continue;
2283 }
290fc38d 2284 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
37a7d8b0
AK
2285 while (d) {
2286 for (k = 0; k < RMAP_EXT; ++k)
2287 if (d->shadow_ptes[k])
2288 ++nmaps;
2289 else
2290 break;
2291 d = d->more;
2292 }
2293 }
2294 }
2295 return nmaps;
2296}
2297
2298static int count_writable_mappings(struct kvm_vcpu *vcpu)
2299{
2300 int nmaps = 0;
4db35314 2301 struct kvm_mmu_page *sp;
37a7d8b0
AK
2302 int i;
2303
f05e70ac 2304 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 2305 u64 *pt = sp->spt;
37a7d8b0 2306
4db35314 2307 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
37a7d8b0
AK
2308 continue;
2309
2310 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2311 u64 ent = pt[i];
2312
2313 if (!(ent & PT_PRESENT_MASK))
2314 continue;
2315 if (!(ent & PT_WRITABLE_MASK))
2316 continue;
2317 ++nmaps;
2318 }
2319 }
2320 return nmaps;
2321}
2322
2323static void audit_rmap(struct kvm_vcpu *vcpu)
2324{
2325 int n_rmap = count_rmaps(vcpu);
2326 int n_actual = count_writable_mappings(vcpu);
2327
2328 if (n_rmap != n_actual)
2329 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
b8688d51 2330 __func__, audit_msg, n_rmap, n_actual);
37a7d8b0
AK
2331}
2332
2333static void audit_write_protection(struct kvm_vcpu *vcpu)
2334{
4db35314 2335 struct kvm_mmu_page *sp;
290fc38d
IE
2336 struct kvm_memory_slot *slot;
2337 unsigned long *rmapp;
2338 gfn_t gfn;
37a7d8b0 2339
f05e70ac 2340 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 2341 if (sp->role.metaphysical)
37a7d8b0
AK
2342 continue;
2343
4db35314
AK
2344 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2345 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
290fc38d
IE
2346 rmapp = &slot->rmap[gfn - slot->base_gfn];
2347 if (*rmapp)
37a7d8b0
AK
2348 printk(KERN_ERR "%s: (%s) shadow page has writable"
2349 " mappings: gfn %lx role %x\n",
b8688d51 2350 __func__, audit_msg, sp->gfn,
4db35314 2351 sp->role.word);
37a7d8b0
AK
2352 }
2353}
2354
2355static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2356{
2357 int olddbg = dbg;
2358
2359 dbg = 0;
2360 audit_msg = msg;
2361 audit_rmap(vcpu);
2362 audit_write_protection(vcpu);
2363 audit_mappings(vcpu);
2364 dbg = olddbg;
2365}
2366
2367#endif
This page took 0.341648 seconds and 5 git commands to generate.