Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[deliverable/linux.git] / arch / x86 / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
e495606d
AK
19
20#include "vmx.h"
1d737c8a 21#include "mmu.h"
e495606d 22
edf88417 23#include <linux/kvm_host.h>
6aa8b732
AK
24#include <linux/types.h>
25#include <linux/string.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/module.h>
448353ca 29#include <linux/swap.h>
05da4558 30#include <linux/hugetlb.h>
2f333bcb 31#include <linux/compiler.h>
6aa8b732 32
e495606d
AK
33#include <asm/page.h>
34#include <asm/cmpxchg.h>
4e542370 35#include <asm/io.h>
6aa8b732 36
18552672
JR
37/*
38 * When setting this variable to true it enables Two-Dimensional-Paging
39 * where the hardware walks 2 page tables:
40 * 1. the guest-virtual to guest-physical
41 * 2. while doing 1. it walks guest-physical to host-physical
42 * If the hardware supports that we don't need to do shadow paging.
43 */
2f333bcb 44bool tdp_enabled = false;
18552672 45
37a7d8b0
AK
46#undef MMU_DEBUG
47
48#undef AUDIT
49
50#ifdef AUDIT
51static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
52#else
53static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
54#endif
55
56#ifdef MMU_DEBUG
57
58#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
59#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
60
61#else
62
63#define pgprintk(x...) do { } while (0)
64#define rmap_printk(x...) do { } while (0)
65
66#endif
67
68#if defined(MMU_DEBUG) || defined(AUDIT)
6ada8cca
AK
69static int dbg = 0;
70module_param(dbg, bool, 0644);
37a7d8b0 71#endif
6aa8b732 72
d6c69ee9
YD
73#ifndef MMU_DEBUG
74#define ASSERT(x) do { } while (0)
75#else
6aa8b732
AK
76#define ASSERT(x) \
77 if (!(x)) { \
78 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
79 __FILE__, __LINE__, #x); \
80 }
d6c69ee9 81#endif
6aa8b732 82
6aa8b732
AK
83#define PT_FIRST_AVAIL_BITS_SHIFT 9
84#define PT64_SECOND_AVAIL_BITS_SHIFT 52
85
6aa8b732
AK
86#define VALID_PAGE(x) ((x) != INVALID_PAGE)
87
88#define PT64_LEVEL_BITS 9
89
90#define PT64_LEVEL_SHIFT(level) \
d77c26fc 91 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
6aa8b732
AK
92
93#define PT64_LEVEL_MASK(level) \
94 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
95
96#define PT64_INDEX(address, level)\
97 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
98
99
100#define PT32_LEVEL_BITS 10
101
102#define PT32_LEVEL_SHIFT(level) \
d77c26fc 103 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
6aa8b732
AK
104
105#define PT32_LEVEL_MASK(level) \
106 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
107
108#define PT32_INDEX(address, level)\
109 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
110
111
27aba766 112#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
113#define PT64_DIR_BASE_ADDR_MASK \
114 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
115
116#define PT32_BASE_ADDR_MASK PAGE_MASK
117#define PT32_DIR_BASE_ADDR_MASK \
118 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
119
79539cec
AK
120#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
121 | PT64_NX_MASK)
6aa8b732
AK
122
123#define PFERR_PRESENT_MASK (1U << 0)
124#define PFERR_WRITE_MASK (1U << 1)
125#define PFERR_USER_MASK (1U << 2)
73b1087e 126#define PFERR_FETCH_MASK (1U << 4)
6aa8b732 127
6aa8b732
AK
128#define PT_DIRECTORY_LEVEL 2
129#define PT_PAGE_TABLE_LEVEL 1
130
cd4a4e53
AK
131#define RMAP_EXT 4
132
fe135d2c
AK
133#define ACC_EXEC_MASK 1
134#define ACC_WRITE_MASK PT_WRITABLE_MASK
135#define ACC_USER_MASK PT_USER_MASK
136#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
137
2f333bcb
MT
138struct kvm_pv_mmu_op_buffer {
139 void *ptr;
140 unsigned len;
141 unsigned processed;
142 char buf[512] __aligned(sizeof(long));
143};
144
cd4a4e53
AK
145struct kvm_rmap_desc {
146 u64 *shadow_ptes[RMAP_EXT];
147 struct kvm_rmap_desc *more;
148};
149
b5a33a75
AK
150static struct kmem_cache *pte_chain_cache;
151static struct kmem_cache *rmap_desc_cache;
d3d25b04 152static struct kmem_cache *mmu_page_header_cache;
b5a33a75 153
c7addb90
AK
154static u64 __read_mostly shadow_trap_nonpresent_pte;
155static u64 __read_mostly shadow_notrap_nonpresent_pte;
7b52345e
SY
156static u64 __read_mostly shadow_base_present_pte;
157static u64 __read_mostly shadow_nx_mask;
158static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
159static u64 __read_mostly shadow_user_mask;
160static u64 __read_mostly shadow_accessed_mask;
161static u64 __read_mostly shadow_dirty_mask;
c7addb90
AK
162
163void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
164{
165 shadow_trap_nonpresent_pte = trap_pte;
166 shadow_notrap_nonpresent_pte = notrap_pte;
167}
168EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
169
7b52345e
SY
170void kvm_mmu_set_base_ptes(u64 base_pte)
171{
172 shadow_base_present_pte = base_pte;
173}
174EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
175
176void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
177 u64 dirty_mask, u64 nx_mask, u64 x_mask)
178{
179 shadow_user_mask = user_mask;
180 shadow_accessed_mask = accessed_mask;
181 shadow_dirty_mask = dirty_mask;
182 shadow_nx_mask = nx_mask;
183 shadow_x_mask = x_mask;
184}
185EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
186
6aa8b732
AK
187static int is_write_protection(struct kvm_vcpu *vcpu)
188{
ad312c7c 189 return vcpu->arch.cr0 & X86_CR0_WP;
6aa8b732
AK
190}
191
192static int is_cpuid_PSE36(void)
193{
194 return 1;
195}
196
73b1087e
AK
197static int is_nx(struct kvm_vcpu *vcpu)
198{
ad312c7c 199 return vcpu->arch.shadow_efer & EFER_NX;
73b1087e
AK
200}
201
6aa8b732
AK
202static int is_present_pte(unsigned long pte)
203{
204 return pte & PT_PRESENT_MASK;
205}
206
c7addb90
AK
207static int is_shadow_present_pte(u64 pte)
208{
c7addb90
AK
209 return pte != shadow_trap_nonpresent_pte
210 && pte != shadow_notrap_nonpresent_pte;
211}
212
05da4558
MT
213static int is_large_pte(u64 pte)
214{
215 return pte & PT_PAGE_SIZE_MASK;
216}
217
6aa8b732
AK
218static int is_writeble_pte(unsigned long pte)
219{
220 return pte & PT_WRITABLE_MASK;
221}
222
e3c5e7ec
AK
223static int is_dirty_pte(unsigned long pte)
224{
7b52345e 225 return pte & shadow_dirty_mask;
e3c5e7ec
AK
226}
227
cd4a4e53
AK
228static int is_rmap_pte(u64 pte)
229{
4b1a80fa 230 return is_shadow_present_pte(pte);
cd4a4e53
AK
231}
232
35149e21 233static pfn_t spte_to_pfn(u64 pte)
0b49ea86 234{
35149e21 235 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
0b49ea86
AK
236}
237
da928521
AK
238static gfn_t pse36_gfn_delta(u32 gpte)
239{
240 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
241
242 return (gpte & PT32_DIR_PSE36_MASK) << shift;
243}
244
e663ee64
AK
245static void set_shadow_pte(u64 *sptep, u64 spte)
246{
247#ifdef CONFIG_X86_64
248 set_64bit((unsigned long *)sptep, spte);
249#else
250 set_64bit((unsigned long long *)sptep, spte);
251#endif
252}
253
e2dec939 254static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
2e3e5882 255 struct kmem_cache *base_cache, int min)
714b93da
AK
256{
257 void *obj;
258
259 if (cache->nobjs >= min)
e2dec939 260 return 0;
714b93da 261 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 262 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
714b93da 263 if (!obj)
e2dec939 264 return -ENOMEM;
714b93da
AK
265 cache->objects[cache->nobjs++] = obj;
266 }
e2dec939 267 return 0;
714b93da
AK
268}
269
270static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
271{
272 while (mc->nobjs)
273 kfree(mc->objects[--mc->nobjs]);
274}
275
c1158e63 276static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
2e3e5882 277 int min)
c1158e63
AK
278{
279 struct page *page;
280
281 if (cache->nobjs >= min)
282 return 0;
283 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 284 page = alloc_page(GFP_KERNEL);
c1158e63
AK
285 if (!page)
286 return -ENOMEM;
287 set_page_private(page, 0);
288 cache->objects[cache->nobjs++] = page_address(page);
289 }
290 return 0;
291}
292
293static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
294{
295 while (mc->nobjs)
c4d198d5 296 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
297}
298
2e3e5882 299static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
714b93da 300{
e2dec939
AK
301 int r;
302
ad312c7c 303 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
2e3e5882 304 pte_chain_cache, 4);
e2dec939
AK
305 if (r)
306 goto out;
ad312c7c 307 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
2e3e5882 308 rmap_desc_cache, 1);
d3d25b04
AK
309 if (r)
310 goto out;
ad312c7c 311 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
d3d25b04
AK
312 if (r)
313 goto out;
ad312c7c 314 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
2e3e5882 315 mmu_page_header_cache, 4);
e2dec939
AK
316out:
317 return r;
714b93da
AK
318}
319
320static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
321{
ad312c7c
ZX
322 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
323 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
324 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
325 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
714b93da
AK
326}
327
328static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
329 size_t size)
330{
331 void *p;
332
333 BUG_ON(!mc->nobjs);
334 p = mc->objects[--mc->nobjs];
335 memset(p, 0, size);
336 return p;
337}
338
714b93da
AK
339static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
340{
ad312c7c 341 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
714b93da
AK
342 sizeof(struct kvm_pte_chain));
343}
344
90cb0529 345static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 346{
90cb0529 347 kfree(pc);
714b93da
AK
348}
349
350static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
351{
ad312c7c 352 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
714b93da
AK
353 sizeof(struct kvm_rmap_desc));
354}
355
90cb0529 356static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 357{
90cb0529 358 kfree(rd);
714b93da
AK
359}
360
05da4558
MT
361/*
362 * Return the pointer to the largepage write count for a given
363 * gfn, handling slots that are not large page aligned.
364 */
365static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
366{
367 unsigned long idx;
368
369 idx = (gfn / KVM_PAGES_PER_HPAGE) -
370 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
371 return &slot->lpage_info[idx].write_count;
372}
373
374static void account_shadowed(struct kvm *kvm, gfn_t gfn)
375{
376 int *write_count;
377
378 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
379 *write_count += 1;
05da4558
MT
380}
381
382static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
383{
384 int *write_count;
385
386 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
387 *write_count -= 1;
388 WARN_ON(*write_count < 0);
389}
390
391static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
392{
393 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
394 int *largepage_idx;
395
396 if (slot) {
397 largepage_idx = slot_largepage_idx(gfn, slot);
398 return *largepage_idx;
399 }
400
401 return 1;
402}
403
404static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
405{
406 struct vm_area_struct *vma;
407 unsigned long addr;
408
409 addr = gfn_to_hva(kvm, gfn);
410 if (kvm_is_error_hva(addr))
411 return 0;
412
413 vma = find_vma(current->mm, addr);
414 if (vma && is_vm_hugetlb_page(vma))
415 return 1;
416
417 return 0;
418}
419
420static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
421{
422 struct kvm_memory_slot *slot;
423
424 if (has_wrprotected_page(vcpu->kvm, large_gfn))
425 return 0;
426
427 if (!host_largepage_backed(vcpu->kvm, large_gfn))
428 return 0;
429
430 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
431 if (slot && slot->dirty_bitmap)
432 return 0;
433
434 return 1;
435}
436
290fc38d
IE
437/*
438 * Take gfn and return the reverse mapping to it.
439 * Note: gfn must be unaliased before this function get called
440 */
441
05da4558 442static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
290fc38d
IE
443{
444 struct kvm_memory_slot *slot;
05da4558 445 unsigned long idx;
290fc38d
IE
446
447 slot = gfn_to_memslot(kvm, gfn);
05da4558
MT
448 if (!lpage)
449 return &slot->rmap[gfn - slot->base_gfn];
450
451 idx = (gfn / KVM_PAGES_PER_HPAGE) -
452 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
453
454 return &slot->lpage_info[idx].rmap_pde;
290fc38d
IE
455}
456
cd4a4e53
AK
457/*
458 * Reverse mapping data structures:
459 *
290fc38d
IE
460 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
461 * that points to page_address(page).
cd4a4e53 462 *
290fc38d
IE
463 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
464 * containing more mappings.
cd4a4e53 465 */
05da4558 466static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
cd4a4e53 467{
4db35314 468 struct kvm_mmu_page *sp;
cd4a4e53 469 struct kvm_rmap_desc *desc;
290fc38d 470 unsigned long *rmapp;
cd4a4e53
AK
471 int i;
472
473 if (!is_rmap_pte(*spte))
474 return;
290fc38d 475 gfn = unalias_gfn(vcpu->kvm, gfn);
4db35314
AK
476 sp = page_header(__pa(spte));
477 sp->gfns[spte - sp->spt] = gfn;
05da4558 478 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
290fc38d 479 if (!*rmapp) {
cd4a4e53 480 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
290fc38d
IE
481 *rmapp = (unsigned long)spte;
482 } else if (!(*rmapp & 1)) {
cd4a4e53 483 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 484 desc = mmu_alloc_rmap_desc(vcpu);
290fc38d 485 desc->shadow_ptes[0] = (u64 *)*rmapp;
cd4a4e53 486 desc->shadow_ptes[1] = spte;
290fc38d 487 *rmapp = (unsigned long)desc | 1;
cd4a4e53
AK
488 } else {
489 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
290fc38d 490 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
491 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
492 desc = desc->more;
493 if (desc->shadow_ptes[RMAP_EXT-1]) {
714b93da 494 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
495 desc = desc->more;
496 }
497 for (i = 0; desc->shadow_ptes[i]; ++i)
498 ;
499 desc->shadow_ptes[i] = spte;
500 }
501}
502
290fc38d 503static void rmap_desc_remove_entry(unsigned long *rmapp,
cd4a4e53
AK
504 struct kvm_rmap_desc *desc,
505 int i,
506 struct kvm_rmap_desc *prev_desc)
507{
508 int j;
509
510 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
511 ;
512 desc->shadow_ptes[i] = desc->shadow_ptes[j];
11718b4d 513 desc->shadow_ptes[j] = NULL;
cd4a4e53
AK
514 if (j != 0)
515 return;
516 if (!prev_desc && !desc->more)
290fc38d 517 *rmapp = (unsigned long)desc->shadow_ptes[0];
cd4a4e53
AK
518 else
519 if (prev_desc)
520 prev_desc->more = desc->more;
521 else
290fc38d 522 *rmapp = (unsigned long)desc->more | 1;
90cb0529 523 mmu_free_rmap_desc(desc);
cd4a4e53
AK
524}
525
290fc38d 526static void rmap_remove(struct kvm *kvm, u64 *spte)
cd4a4e53 527{
cd4a4e53
AK
528 struct kvm_rmap_desc *desc;
529 struct kvm_rmap_desc *prev_desc;
4db35314 530 struct kvm_mmu_page *sp;
35149e21 531 pfn_t pfn;
290fc38d 532 unsigned long *rmapp;
cd4a4e53
AK
533 int i;
534
535 if (!is_rmap_pte(*spte))
536 return;
4db35314 537 sp = page_header(__pa(spte));
35149e21 538 pfn = spte_to_pfn(*spte);
7b52345e 539 if (*spte & shadow_accessed_mask)
35149e21 540 kvm_set_pfn_accessed(pfn);
b4231d61 541 if (is_writeble_pte(*spte))
35149e21 542 kvm_release_pfn_dirty(pfn);
b4231d61 543 else
35149e21 544 kvm_release_pfn_clean(pfn);
05da4558 545 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
290fc38d 546 if (!*rmapp) {
cd4a4e53
AK
547 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
548 BUG();
290fc38d 549 } else if (!(*rmapp & 1)) {
cd4a4e53 550 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
290fc38d 551 if ((u64 *)*rmapp != spte) {
cd4a4e53
AK
552 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
553 spte, *spte);
554 BUG();
555 }
290fc38d 556 *rmapp = 0;
cd4a4e53
AK
557 } else {
558 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
290fc38d 559 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
560 prev_desc = NULL;
561 while (desc) {
562 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
563 if (desc->shadow_ptes[i] == spte) {
290fc38d 564 rmap_desc_remove_entry(rmapp,
714b93da 565 desc, i,
cd4a4e53
AK
566 prev_desc);
567 return;
568 }
569 prev_desc = desc;
570 desc = desc->more;
571 }
572 BUG();
573 }
574}
575
98348e95 576static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
374cbac0 577{
374cbac0 578 struct kvm_rmap_desc *desc;
98348e95
IE
579 struct kvm_rmap_desc *prev_desc;
580 u64 *prev_spte;
581 int i;
582
583 if (!*rmapp)
584 return NULL;
585 else if (!(*rmapp & 1)) {
586 if (!spte)
587 return (u64 *)*rmapp;
588 return NULL;
589 }
590 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
591 prev_desc = NULL;
592 prev_spte = NULL;
593 while (desc) {
594 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
595 if (prev_spte == spte)
596 return desc->shadow_ptes[i];
597 prev_spte = desc->shadow_ptes[i];
598 }
599 desc = desc->more;
600 }
601 return NULL;
602}
603
604static void rmap_write_protect(struct kvm *kvm, u64 gfn)
605{
290fc38d 606 unsigned long *rmapp;
374cbac0 607 u64 *spte;
caa5b8a5 608 int write_protected = 0;
374cbac0 609
4a4c9924 610 gfn = unalias_gfn(kvm, gfn);
05da4558 611 rmapp = gfn_to_rmap(kvm, gfn, 0);
374cbac0 612
98348e95
IE
613 spte = rmap_next(kvm, rmapp, NULL);
614 while (spte) {
374cbac0 615 BUG_ON(!spte);
374cbac0 616 BUG_ON(!(*spte & PT_PRESENT_MASK));
374cbac0 617 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
caa5b8a5 618 if (is_writeble_pte(*spte)) {
9647c14c 619 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
caa5b8a5
ED
620 write_protected = 1;
621 }
9647c14c 622 spte = rmap_next(kvm, rmapp, spte);
374cbac0 623 }
855149aa 624 if (write_protected) {
35149e21 625 pfn_t pfn;
855149aa
IE
626
627 spte = rmap_next(kvm, rmapp, NULL);
35149e21
AL
628 pfn = spte_to_pfn(*spte);
629 kvm_set_pfn_dirty(pfn);
855149aa
IE
630 }
631
05da4558
MT
632 /* check for huge page mappings */
633 rmapp = gfn_to_rmap(kvm, gfn, 1);
634 spte = rmap_next(kvm, rmapp, NULL);
635 while (spte) {
636 BUG_ON(!spte);
637 BUG_ON(!(*spte & PT_PRESENT_MASK));
638 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
639 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
640 if (is_writeble_pte(*spte)) {
641 rmap_remove(kvm, spte);
642 --kvm->stat.lpages;
643 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
6597ca09 644 spte = NULL;
05da4558
MT
645 write_protected = 1;
646 }
647 spte = rmap_next(kvm, rmapp, spte);
648 }
649
caa5b8a5
ED
650 if (write_protected)
651 kvm_flush_remote_tlbs(kvm);
05da4558
MT
652
653 account_shadowed(kvm, gfn);
374cbac0
AK
654}
655
d6c69ee9 656#ifdef MMU_DEBUG
47ad8e68 657static int is_empty_shadow_page(u64 *spt)
6aa8b732 658{
139bdb2d
AK
659 u64 *pos;
660 u64 *end;
661
47ad8e68 662 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
3c915510 663 if (is_shadow_present_pte(*pos)) {
b8688d51 664 printk(KERN_ERR "%s: %p %llx\n", __func__,
139bdb2d 665 pos, *pos);
6aa8b732 666 return 0;
139bdb2d 667 }
6aa8b732
AK
668 return 1;
669}
d6c69ee9 670#endif
6aa8b732 671
4db35314 672static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
260746c0 673{
4db35314
AK
674 ASSERT(is_empty_shadow_page(sp->spt));
675 list_del(&sp->link);
676 __free_page(virt_to_page(sp->spt));
677 __free_page(virt_to_page(sp->gfns));
678 kfree(sp);
f05e70ac 679 ++kvm->arch.n_free_mmu_pages;
260746c0
AK
680}
681
cea0f0e7
AK
682static unsigned kvm_page_table_hashfn(gfn_t gfn)
683{
1ae0a13d 684 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
cea0f0e7
AK
685}
686
25c0de2c
AK
687static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
688 u64 *parent_pte)
6aa8b732 689{
4db35314 690 struct kvm_mmu_page *sp;
6aa8b732 691
ad312c7c
ZX
692 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
693 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
694 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
4db35314 695 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
f05e70ac 696 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
4db35314
AK
697 ASSERT(is_empty_shadow_page(sp->spt));
698 sp->slot_bitmap = 0;
699 sp->multimapped = 0;
700 sp->parent_pte = parent_pte;
f05e70ac 701 --vcpu->kvm->arch.n_free_mmu_pages;
4db35314 702 return sp;
6aa8b732
AK
703}
704
714b93da 705static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
4db35314 706 struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7
AK
707{
708 struct kvm_pte_chain *pte_chain;
709 struct hlist_node *node;
710 int i;
711
712 if (!parent_pte)
713 return;
4db35314
AK
714 if (!sp->multimapped) {
715 u64 *old = sp->parent_pte;
cea0f0e7
AK
716
717 if (!old) {
4db35314 718 sp->parent_pte = parent_pte;
cea0f0e7
AK
719 return;
720 }
4db35314 721 sp->multimapped = 1;
714b93da 722 pte_chain = mmu_alloc_pte_chain(vcpu);
4db35314
AK
723 INIT_HLIST_HEAD(&sp->parent_ptes);
724 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
725 pte_chain->parent_ptes[0] = old;
726 }
4db35314 727 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
cea0f0e7
AK
728 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
729 continue;
730 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
731 if (!pte_chain->parent_ptes[i]) {
732 pte_chain->parent_ptes[i] = parent_pte;
733 return;
734 }
735 }
714b93da 736 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7 737 BUG_ON(!pte_chain);
4db35314 738 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
739 pte_chain->parent_ptes[0] = parent_pte;
740}
741
4db35314 742static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
cea0f0e7
AK
743 u64 *parent_pte)
744{
745 struct kvm_pte_chain *pte_chain;
746 struct hlist_node *node;
747 int i;
748
4db35314
AK
749 if (!sp->multimapped) {
750 BUG_ON(sp->parent_pte != parent_pte);
751 sp->parent_pte = NULL;
cea0f0e7
AK
752 return;
753 }
4db35314 754 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
cea0f0e7
AK
755 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
756 if (!pte_chain->parent_ptes[i])
757 break;
758 if (pte_chain->parent_ptes[i] != parent_pte)
759 continue;
697fe2e2
AK
760 while (i + 1 < NR_PTE_CHAIN_ENTRIES
761 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
762 pte_chain->parent_ptes[i]
763 = pte_chain->parent_ptes[i + 1];
764 ++i;
765 }
766 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
767 if (i == 0) {
768 hlist_del(&pte_chain->link);
90cb0529 769 mmu_free_pte_chain(pte_chain);
4db35314
AK
770 if (hlist_empty(&sp->parent_ptes)) {
771 sp->multimapped = 0;
772 sp->parent_pte = NULL;
697fe2e2
AK
773 }
774 }
cea0f0e7
AK
775 return;
776 }
777 BUG();
778}
779
d761a501
AK
780static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
781 struct kvm_mmu_page *sp)
782{
783 int i;
784
785 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
786 sp->spt[i] = shadow_trap_nonpresent_pte;
787}
788
4db35314 789static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
cea0f0e7
AK
790{
791 unsigned index;
792 struct hlist_head *bucket;
4db35314 793 struct kvm_mmu_page *sp;
cea0f0e7
AK
794 struct hlist_node *node;
795
b8688d51 796 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1ae0a13d 797 index = kvm_page_table_hashfn(gfn);
f05e70ac 798 bucket = &kvm->arch.mmu_page_hash[index];
4db35314 799 hlist_for_each_entry(sp, node, bucket, hash_link)
2e53d63a
MT
800 if (sp->gfn == gfn && !sp->role.metaphysical
801 && !sp->role.invalid) {
cea0f0e7 802 pgprintk("%s: found role %x\n",
b8688d51 803 __func__, sp->role.word);
4db35314 804 return sp;
cea0f0e7
AK
805 }
806 return NULL;
807}
808
809static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
810 gfn_t gfn,
811 gva_t gaddr,
812 unsigned level,
813 int metaphysical,
41074d07 814 unsigned access,
f7d9c7b7 815 u64 *parent_pte)
cea0f0e7
AK
816{
817 union kvm_mmu_page_role role;
818 unsigned index;
819 unsigned quadrant;
820 struct hlist_head *bucket;
4db35314 821 struct kvm_mmu_page *sp;
cea0f0e7
AK
822 struct hlist_node *node;
823
824 role.word = 0;
ad312c7c 825 role.glevels = vcpu->arch.mmu.root_level;
cea0f0e7
AK
826 role.level = level;
827 role.metaphysical = metaphysical;
41074d07 828 role.access = access;
ad312c7c 829 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
cea0f0e7
AK
830 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
831 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
832 role.quadrant = quadrant;
833 }
b8688d51 834 pgprintk("%s: looking gfn %lx role %x\n", __func__,
cea0f0e7 835 gfn, role.word);
1ae0a13d 836 index = kvm_page_table_hashfn(gfn);
f05e70ac 837 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
838 hlist_for_each_entry(sp, node, bucket, hash_link)
839 if (sp->gfn == gfn && sp->role.word == role.word) {
840 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
b8688d51 841 pgprintk("%s: found\n", __func__);
4db35314 842 return sp;
cea0f0e7 843 }
dfc5aa00 844 ++vcpu->kvm->stat.mmu_cache_miss;
4db35314
AK
845 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
846 if (!sp)
847 return sp;
b8688d51 848 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
4db35314
AK
849 sp->gfn = gfn;
850 sp->role = role;
851 hlist_add_head(&sp->hash_link, bucket);
374cbac0 852 if (!metaphysical)
4a4c9924 853 rmap_write_protect(vcpu->kvm, gfn);
131d8279
AK
854 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
855 vcpu->arch.mmu.prefetch_page(vcpu, sp);
856 else
857 nonpaging_prefetch_page(vcpu, sp);
4db35314 858 return sp;
cea0f0e7
AK
859}
860
90cb0529 861static void kvm_mmu_page_unlink_children(struct kvm *kvm,
4db35314 862 struct kvm_mmu_page *sp)
a436036b 863{
697fe2e2
AK
864 unsigned i;
865 u64 *pt;
866 u64 ent;
867
4db35314 868 pt = sp->spt;
697fe2e2 869
4db35314 870 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
697fe2e2 871 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
c7addb90 872 if (is_shadow_present_pte(pt[i]))
290fc38d 873 rmap_remove(kvm, &pt[i]);
c7addb90 874 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 875 }
90cb0529 876 kvm_flush_remote_tlbs(kvm);
697fe2e2
AK
877 return;
878 }
879
880 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
881 ent = pt[i];
882
05da4558
MT
883 if (is_shadow_present_pte(ent)) {
884 if (!is_large_pte(ent)) {
885 ent &= PT64_BASE_ADDR_MASK;
886 mmu_page_remove_parent_pte(page_header(ent),
887 &pt[i]);
888 } else {
889 --kvm->stat.lpages;
890 rmap_remove(kvm, &pt[i]);
891 }
892 }
c7addb90 893 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 894 }
90cb0529 895 kvm_flush_remote_tlbs(kvm);
a436036b
AK
896}
897
4db35314 898static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7 899{
4db35314 900 mmu_page_remove_parent_pte(sp, parent_pte);
a436036b
AK
901}
902
12b7d28f
AK
903static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
904{
905 int i;
906
907 for (i = 0; i < KVM_MAX_VCPUS; ++i)
908 if (kvm->vcpus[i])
ad312c7c 909 kvm->vcpus[i]->arch.last_pte_updated = NULL;
12b7d28f
AK
910}
911
4db35314 912static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
a436036b
AK
913{
914 u64 *parent_pte;
915
4cee5764 916 ++kvm->stat.mmu_shadow_zapped;
4db35314
AK
917 while (sp->multimapped || sp->parent_pte) {
918 if (!sp->multimapped)
919 parent_pte = sp->parent_pte;
a436036b
AK
920 else {
921 struct kvm_pte_chain *chain;
922
4db35314 923 chain = container_of(sp->parent_ptes.first,
a436036b
AK
924 struct kvm_pte_chain, link);
925 parent_pte = chain->parent_ptes[0];
926 }
697fe2e2 927 BUG_ON(!parent_pte);
4db35314 928 kvm_mmu_put_page(sp, parent_pte);
c7addb90 929 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
a436036b 930 }
4db35314
AK
931 kvm_mmu_page_unlink_children(kvm, sp);
932 if (!sp->root_count) {
376c53c2 933 if (!sp->role.metaphysical && !sp->role.invalid)
05da4558 934 unaccount_shadowed(kvm, sp->gfn);
4db35314
AK
935 hlist_del(&sp->hash_link);
936 kvm_mmu_free_page(kvm, sp);
2e53d63a 937 } else {
376c53c2 938 int invalid = sp->role.invalid;
f05e70ac 939 list_move(&sp->link, &kvm->arch.active_mmu_pages);
2e53d63a
MT
940 sp->role.invalid = 1;
941 kvm_reload_remote_mmus(kvm);
376c53c2
MT
942 if (!sp->role.metaphysical && !invalid)
943 unaccount_shadowed(kvm, sp->gfn);
2e53d63a 944 }
12b7d28f 945 kvm_mmu_reset_last_pte_updated(kvm);
a436036b
AK
946}
947
82ce2c96
IE
948/*
949 * Changing the number of mmu pages allocated to the vm
950 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
951 */
952void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
953{
954 /*
955 * If we set the number of mmu pages to be smaller be than the
956 * number of actived pages , we must to free some mmu pages before we
957 * change the value
958 */
959
f05e70ac 960 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
82ce2c96 961 kvm_nr_mmu_pages) {
f05e70ac
ZX
962 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
963 - kvm->arch.n_free_mmu_pages;
82ce2c96
IE
964
965 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
966 struct kvm_mmu_page *page;
967
f05e70ac 968 page = container_of(kvm->arch.active_mmu_pages.prev,
82ce2c96
IE
969 struct kvm_mmu_page, link);
970 kvm_mmu_zap_page(kvm, page);
971 n_used_mmu_pages--;
972 }
f05e70ac 973 kvm->arch.n_free_mmu_pages = 0;
82ce2c96
IE
974 }
975 else
f05e70ac
ZX
976 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
977 - kvm->arch.n_alloc_mmu_pages;
82ce2c96 978
f05e70ac 979 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
82ce2c96
IE
980}
981
f67a46f4 982static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
a436036b
AK
983{
984 unsigned index;
985 struct hlist_head *bucket;
4db35314 986 struct kvm_mmu_page *sp;
a436036b
AK
987 struct hlist_node *node, *n;
988 int r;
989
b8688d51 990 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
a436036b 991 r = 0;
1ae0a13d 992 index = kvm_page_table_hashfn(gfn);
f05e70ac 993 bucket = &kvm->arch.mmu_page_hash[index];
4db35314
AK
994 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
995 if (sp->gfn == gfn && !sp->role.metaphysical) {
b8688d51 996 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
4db35314
AK
997 sp->role.word);
998 kvm_mmu_zap_page(kvm, sp);
a436036b
AK
999 r = 1;
1000 }
1001 return r;
cea0f0e7
AK
1002}
1003
f67a46f4 1004static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
97a0a01e 1005{
4db35314 1006 struct kvm_mmu_page *sp;
97a0a01e 1007
4db35314 1008 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
b8688d51 1009 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
4db35314 1010 kvm_mmu_zap_page(kvm, sp);
97a0a01e
AK
1011 }
1012}
1013
38c335f1 1014static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
6aa8b732 1015{
38c335f1 1016 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
4db35314 1017 struct kvm_mmu_page *sp = page_header(__pa(pte));
6aa8b732 1018
4db35314 1019 __set_bit(slot, &sp->slot_bitmap);
6aa8b732
AK
1020}
1021
039576c0
AK
1022struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1023{
72dc67a6
IE
1024 struct page *page;
1025
ad312c7c 1026 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
039576c0
AK
1027
1028 if (gpa == UNMAPPED_GVA)
1029 return NULL;
72dc67a6
IE
1030
1031 down_read(&current->mm->mmap_sem);
1032 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1033 up_read(&current->mm->mmap_sem);
1034
1035 return page;
039576c0
AK
1036}
1037
1c4f1fd6
AK
1038static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1039 unsigned pt_access, unsigned pte_access,
1040 int user_fault, int write_fault, int dirty,
05da4558 1041 int *ptwrite, int largepage, gfn_t gfn,
35149e21 1042 pfn_t pfn, bool speculative)
1c4f1fd6
AK
1043{
1044 u64 spte;
15aaa819 1045 int was_rmapped = 0;
75e68e60 1046 int was_writeble = is_writeble_pte(*shadow_pte);
1c4f1fd6 1047
bc750ba8 1048 pgprintk("%s: spte %llx access %x write_fault %d"
1c4f1fd6 1049 " user_fault %d gfn %lx\n",
b8688d51 1050 __func__, *shadow_pte, pt_access,
1c4f1fd6
AK
1051 write_fault, user_fault, gfn);
1052
15aaa819 1053 if (is_rmap_pte(*shadow_pte)) {
05da4558
MT
1054 /*
1055 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1056 * the parent of the now unreachable PTE.
1057 */
1058 if (largepage && !is_large_pte(*shadow_pte)) {
1059 struct kvm_mmu_page *child;
1060 u64 pte = *shadow_pte;
1061
1062 child = page_header(pte & PT64_BASE_ADDR_MASK);
1063 mmu_page_remove_parent_pte(child, shadow_pte);
35149e21 1064 } else if (pfn != spte_to_pfn(*shadow_pte)) {
15aaa819 1065 pgprintk("hfn old %lx new %lx\n",
35149e21 1066 spte_to_pfn(*shadow_pte), pfn);
15aaa819 1067 rmap_remove(vcpu->kvm, shadow_pte);
05da4558
MT
1068 } else {
1069 if (largepage)
1070 was_rmapped = is_large_pte(*shadow_pte);
1071 else
1072 was_rmapped = 1;
15aaa819 1073 }
15aaa819
MT
1074 }
1075
1c4f1fd6
AK
1076 /*
1077 * We don't set the accessed bit, since we sometimes want to see
1078 * whether the guest actually used the pte (in order to detect
1079 * demand paging).
1080 */
7b52345e 1081 spte = shadow_base_present_pte | shadow_dirty_mask;
947da538
AK
1082 if (!speculative)
1083 pte_access |= PT_ACCESSED_MASK;
1c4f1fd6
AK
1084 if (!dirty)
1085 pte_access &= ~ACC_WRITE_MASK;
7b52345e
SY
1086 if (pte_access & ACC_EXEC_MASK)
1087 spte |= shadow_x_mask;
1088 else
1089 spte |= shadow_nx_mask;
1c4f1fd6 1090 if (pte_access & ACC_USER_MASK)
7b52345e 1091 spte |= shadow_user_mask;
05da4558
MT
1092 if (largepage)
1093 spte |= PT_PAGE_SIZE_MASK;
1c4f1fd6 1094
35149e21 1095 spte |= (u64)pfn << PAGE_SHIFT;
1c4f1fd6
AK
1096
1097 if ((pte_access & ACC_WRITE_MASK)
1098 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1099 struct kvm_mmu_page *shadow;
1100
1101 spte |= PT_WRITABLE_MASK;
1c4f1fd6
AK
1102
1103 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
05da4558
MT
1104 if (shadow ||
1105 (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1c4f1fd6 1106 pgprintk("%s: found shadow page for %lx, marking ro\n",
b8688d51 1107 __func__, gfn);
1c4f1fd6
AK
1108 pte_access &= ~ACC_WRITE_MASK;
1109 if (is_writeble_pte(spte)) {
1110 spte &= ~PT_WRITABLE_MASK;
1111 kvm_x86_ops->tlb_flush(vcpu);
1112 }
1113 if (write_fault)
1114 *ptwrite = 1;
1115 }
1116 }
1117
1c4f1fd6
AK
1118 if (pte_access & ACC_WRITE_MASK)
1119 mark_page_dirty(vcpu->kvm, gfn);
1120
b8688d51 1121 pgprintk("%s: setting spte %llx\n", __func__, spte);
db475c39 1122 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
05da4558
MT
1123 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
1124 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1c4f1fd6 1125 set_shadow_pte(shadow_pte, spte);
05da4558
MT
1126 if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
1127 && (spte & PT_PRESENT_MASK))
1128 ++vcpu->kvm->stat.lpages;
1129
1c4f1fd6
AK
1130 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1131 if (!was_rmapped) {
05da4558 1132 rmap_add(vcpu, shadow_pte, gfn, largepage);
1c4f1fd6 1133 if (!is_rmap_pte(*shadow_pte))
35149e21 1134 kvm_release_pfn_clean(pfn);
75e68e60
IE
1135 } else {
1136 if (was_writeble)
35149e21 1137 kvm_release_pfn_dirty(pfn);
75e68e60 1138 else
35149e21 1139 kvm_release_pfn_clean(pfn);
1c4f1fd6 1140 }
1b7fcd32 1141 if (speculative) {
ad312c7c 1142 vcpu->arch.last_pte_updated = shadow_pte;
1b7fcd32
AK
1143 vcpu->arch.last_pte_gfn = gfn;
1144 }
1c4f1fd6
AK
1145}
1146
6aa8b732
AK
1147static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1148{
1149}
1150
4d9976bb 1151static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
35149e21 1152 int largepage, gfn_t gfn, pfn_t pfn,
05da4558 1153 int level)
6aa8b732 1154{
ad312c7c 1155 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
e833240f 1156 int pt_write = 0;
6aa8b732
AK
1157
1158 for (; ; level--) {
1159 u32 index = PT64_INDEX(v, level);
1160 u64 *table;
1161
1162 ASSERT(VALID_PAGE(table_addr));
1163 table = __va(table_addr);
1164
1165 if (level == 1) {
e833240f 1166 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
35149e21 1167 0, write, 1, &pt_write, 0, gfn, pfn, false);
05da4558
MT
1168 return pt_write;
1169 }
1170
1171 if (largepage && level == 2) {
1172 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
35149e21 1173 0, write, 1, &pt_write, 1, gfn, pfn, false);
d196e343 1174 return pt_write;
6aa8b732
AK
1175 }
1176
c7addb90 1177 if (table[index] == shadow_trap_nonpresent_pte) {
25c0de2c 1178 struct kvm_mmu_page *new_table;
cea0f0e7 1179 gfn_t pseudo_gfn;
6aa8b732 1180
cea0f0e7
AK
1181 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
1182 >> PAGE_SHIFT;
1183 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1184 v, level - 1,
f7d9c7b7 1185 1, ACC_ALL, &table[index]);
25c0de2c 1186 if (!new_table) {
6aa8b732 1187 pgprintk("nonpaging_map: ENOMEM\n");
35149e21 1188 kvm_release_pfn_clean(pfn);
6aa8b732
AK
1189 return -ENOMEM;
1190 }
1191
722c05f2
AK
1192 set_shadow_pte(&table[index],
1193 __pa(new_table->spt)
1194 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1195 | shadow_user_mask | shadow_x_mask);
6aa8b732
AK
1196 }
1197 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1198 }
1199}
1200
10589a46
MT
1201static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1202{
1203 int r;
05da4558 1204 int largepage = 0;
35149e21 1205 pfn_t pfn;
aaee2c94
MT
1206
1207 down_read(&current->mm->mmap_sem);
05da4558
MT
1208 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1209 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1210 largepage = 1;
1211 }
1212
35149e21 1213 pfn = gfn_to_pfn(vcpu->kvm, gfn);
72dc67a6 1214 up_read(&current->mm->mmap_sem);
aaee2c94 1215
d196e343 1216 /* mmio */
35149e21
AL
1217 if (is_error_pfn(pfn)) {
1218 kvm_release_pfn_clean(pfn);
d196e343
AK
1219 return 1;
1220 }
1221
aaee2c94 1222 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1223 kvm_mmu_free_some_pages(vcpu);
35149e21 1224 r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
05da4558 1225 PT32E_ROOT_LEVEL);
aaee2c94
MT
1226 spin_unlock(&vcpu->kvm->mmu_lock);
1227
aaee2c94 1228
10589a46
MT
1229 return r;
1230}
1231
1232
17ac10ad
AK
1233static void mmu_free_roots(struct kvm_vcpu *vcpu)
1234{
1235 int i;
4db35314 1236 struct kvm_mmu_page *sp;
17ac10ad 1237
ad312c7c 1238 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
7b53aa56 1239 return;
aaee2c94 1240 spin_lock(&vcpu->kvm->mmu_lock);
ad312c7c
ZX
1241 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1242 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad 1243
4db35314
AK
1244 sp = page_header(root);
1245 --sp->root_count;
2e53d63a
MT
1246 if (!sp->root_count && sp->role.invalid)
1247 kvm_mmu_zap_page(vcpu->kvm, sp);
ad312c7c 1248 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
aaee2c94 1249 spin_unlock(&vcpu->kvm->mmu_lock);
17ac10ad
AK
1250 return;
1251 }
17ac10ad 1252 for (i = 0; i < 4; ++i) {
ad312c7c 1253 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad 1254
417726a3 1255 if (root) {
417726a3 1256 root &= PT64_BASE_ADDR_MASK;
4db35314
AK
1257 sp = page_header(root);
1258 --sp->root_count;
2e53d63a
MT
1259 if (!sp->root_count && sp->role.invalid)
1260 kvm_mmu_zap_page(vcpu->kvm, sp);
417726a3 1261 }
ad312c7c 1262 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1263 }
aaee2c94 1264 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1265 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
17ac10ad
AK
1266}
1267
1268static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1269{
1270 int i;
cea0f0e7 1271 gfn_t root_gfn;
4db35314 1272 struct kvm_mmu_page *sp;
fb72d167 1273 int metaphysical = 0;
3bb65a22 1274
ad312c7c 1275 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
17ac10ad 1276
ad312c7c
ZX
1277 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1278 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad
AK
1279
1280 ASSERT(!VALID_PAGE(root));
fb72d167
JR
1281 if (tdp_enabled)
1282 metaphysical = 1;
4db35314 1283 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
fb72d167
JR
1284 PT64_ROOT_LEVEL, metaphysical,
1285 ACC_ALL, NULL);
4db35314
AK
1286 root = __pa(sp->spt);
1287 ++sp->root_count;
ad312c7c 1288 vcpu->arch.mmu.root_hpa = root;
17ac10ad
AK
1289 return;
1290 }
fb72d167
JR
1291 metaphysical = !is_paging(vcpu);
1292 if (tdp_enabled)
1293 metaphysical = 1;
17ac10ad 1294 for (i = 0; i < 4; ++i) {
ad312c7c 1295 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad
AK
1296
1297 ASSERT(!VALID_PAGE(root));
ad312c7c
ZX
1298 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1299 if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1300 vcpu->arch.mmu.pae_root[i] = 0;
417726a3
AK
1301 continue;
1302 }
ad312c7c
ZX
1303 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1304 } else if (vcpu->arch.mmu.root_level == 0)
cea0f0e7 1305 root_gfn = 0;
4db35314 1306 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
fb72d167 1307 PT32_ROOT_LEVEL, metaphysical,
f7d9c7b7 1308 ACC_ALL, NULL);
4db35314
AK
1309 root = __pa(sp->spt);
1310 ++sp->root_count;
ad312c7c 1311 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
17ac10ad 1312 }
ad312c7c 1313 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
17ac10ad
AK
1314}
1315
6aa8b732
AK
1316static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1317{
1318 return vaddr;
1319}
1320
1321static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3f3e7124 1322 u32 error_code)
6aa8b732 1323{
e833240f 1324 gfn_t gfn;
e2dec939 1325 int r;
6aa8b732 1326
b8688d51 1327 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
e2dec939
AK
1328 r = mmu_topup_memory_caches(vcpu);
1329 if (r)
1330 return r;
714b93da 1331
6aa8b732 1332 ASSERT(vcpu);
ad312c7c 1333 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1334
e833240f 1335 gfn = gva >> PAGE_SHIFT;
6aa8b732 1336
e833240f
AK
1337 return nonpaging_map(vcpu, gva & PAGE_MASK,
1338 error_code & PFERR_WRITE_MASK, gfn);
6aa8b732
AK
1339}
1340
fb72d167
JR
1341static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1342 u32 error_code)
1343{
35149e21 1344 pfn_t pfn;
fb72d167 1345 int r;
05da4558
MT
1346 int largepage = 0;
1347 gfn_t gfn = gpa >> PAGE_SHIFT;
fb72d167
JR
1348
1349 ASSERT(vcpu);
1350 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1351
1352 r = mmu_topup_memory_caches(vcpu);
1353 if (r)
1354 return r;
1355
1356 down_read(&current->mm->mmap_sem);
05da4558
MT
1357 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1358 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1359 largepage = 1;
1360 }
35149e21 1361 pfn = gfn_to_pfn(vcpu->kvm, gfn);
3200f405 1362 up_read(&current->mm->mmap_sem);
35149e21
AL
1363 if (is_error_pfn(pfn)) {
1364 kvm_release_pfn_clean(pfn);
fb72d167
JR
1365 return 1;
1366 }
1367 spin_lock(&vcpu->kvm->mmu_lock);
1368 kvm_mmu_free_some_pages(vcpu);
1369 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
67253af5 1370 largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
fb72d167 1371 spin_unlock(&vcpu->kvm->mmu_lock);
fb72d167
JR
1372
1373 return r;
1374}
1375
6aa8b732
AK
1376static void nonpaging_free(struct kvm_vcpu *vcpu)
1377{
17ac10ad 1378 mmu_free_roots(vcpu);
6aa8b732
AK
1379}
1380
1381static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1382{
ad312c7c 1383 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1384
1385 context->new_cr3 = nonpaging_new_cr3;
1386 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
1387 context->gva_to_gpa = nonpaging_gva_to_gpa;
1388 context->free = nonpaging_free;
c7addb90 1389 context->prefetch_page = nonpaging_prefetch_page;
cea0f0e7 1390 context->root_level = 0;
6aa8b732 1391 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1392 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1393 return 0;
1394}
1395
d835dfec 1396void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
6aa8b732 1397{
1165f5fe 1398 ++vcpu->stat.tlb_flush;
cbdd1bea 1399 kvm_x86_ops->tlb_flush(vcpu);
6aa8b732
AK
1400}
1401
1402static void paging_new_cr3(struct kvm_vcpu *vcpu)
1403{
b8688d51 1404 pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
cea0f0e7 1405 mmu_free_roots(vcpu);
6aa8b732
AK
1406}
1407
6aa8b732
AK
1408static void inject_page_fault(struct kvm_vcpu *vcpu,
1409 u64 addr,
1410 u32 err_code)
1411{
c3c91fee 1412 kvm_inject_page_fault(vcpu, addr, err_code);
6aa8b732
AK
1413}
1414
6aa8b732
AK
1415static void paging_free(struct kvm_vcpu *vcpu)
1416{
1417 nonpaging_free(vcpu);
1418}
1419
1420#define PTTYPE 64
1421#include "paging_tmpl.h"
1422#undef PTTYPE
1423
1424#define PTTYPE 32
1425#include "paging_tmpl.h"
1426#undef PTTYPE
1427
17ac10ad 1428static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732 1429{
ad312c7c 1430 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1431
1432 ASSERT(is_pae(vcpu));
1433 context->new_cr3 = paging_new_cr3;
1434 context->page_fault = paging64_page_fault;
6aa8b732 1435 context->gva_to_gpa = paging64_gva_to_gpa;
c7addb90 1436 context->prefetch_page = paging64_prefetch_page;
6aa8b732 1437 context->free = paging_free;
17ac10ad
AK
1438 context->root_level = level;
1439 context->shadow_root_level = level;
17c3ba9d 1440 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1441 return 0;
1442}
1443
17ac10ad
AK
1444static int paging64_init_context(struct kvm_vcpu *vcpu)
1445{
1446 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1447}
1448
6aa8b732
AK
1449static int paging32_init_context(struct kvm_vcpu *vcpu)
1450{
ad312c7c 1451 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1452
1453 context->new_cr3 = paging_new_cr3;
1454 context->page_fault = paging32_page_fault;
6aa8b732
AK
1455 context->gva_to_gpa = paging32_gva_to_gpa;
1456 context->free = paging_free;
c7addb90 1457 context->prefetch_page = paging32_prefetch_page;
6aa8b732
AK
1458 context->root_level = PT32_ROOT_LEVEL;
1459 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1460 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1461 return 0;
1462}
1463
1464static int paging32E_init_context(struct kvm_vcpu *vcpu)
1465{
17ac10ad 1466 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
1467}
1468
fb72d167
JR
1469static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1470{
1471 struct kvm_mmu *context = &vcpu->arch.mmu;
1472
1473 context->new_cr3 = nonpaging_new_cr3;
1474 context->page_fault = tdp_page_fault;
1475 context->free = nonpaging_free;
1476 context->prefetch_page = nonpaging_prefetch_page;
67253af5 1477 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
fb72d167
JR
1478 context->root_hpa = INVALID_PAGE;
1479
1480 if (!is_paging(vcpu)) {
1481 context->gva_to_gpa = nonpaging_gva_to_gpa;
1482 context->root_level = 0;
1483 } else if (is_long_mode(vcpu)) {
1484 context->gva_to_gpa = paging64_gva_to_gpa;
1485 context->root_level = PT64_ROOT_LEVEL;
1486 } else if (is_pae(vcpu)) {
1487 context->gva_to_gpa = paging64_gva_to_gpa;
1488 context->root_level = PT32E_ROOT_LEVEL;
1489 } else {
1490 context->gva_to_gpa = paging32_gva_to_gpa;
1491 context->root_level = PT32_ROOT_LEVEL;
1492 }
1493
1494 return 0;
1495}
1496
1497static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
6aa8b732
AK
1498{
1499 ASSERT(vcpu);
ad312c7c 1500 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732
AK
1501
1502 if (!is_paging(vcpu))
1503 return nonpaging_init_context(vcpu);
a9058ecd 1504 else if (is_long_mode(vcpu))
6aa8b732
AK
1505 return paging64_init_context(vcpu);
1506 else if (is_pae(vcpu))
1507 return paging32E_init_context(vcpu);
1508 else
1509 return paging32_init_context(vcpu);
1510}
1511
fb72d167
JR
1512static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1513{
35149e21
AL
1514 vcpu->arch.update_pte.pfn = bad_pfn;
1515
fb72d167
JR
1516 if (tdp_enabled)
1517 return init_kvm_tdp_mmu(vcpu);
1518 else
1519 return init_kvm_softmmu(vcpu);
1520}
1521
6aa8b732
AK
1522static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1523{
1524 ASSERT(vcpu);
ad312c7c
ZX
1525 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1526 vcpu->arch.mmu.free(vcpu);
1527 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
6aa8b732
AK
1528 }
1529}
1530
1531int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
1532{
1533 destroy_kvm_mmu(vcpu);
1534 return init_kvm_mmu(vcpu);
1535}
8668a3c4 1536EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
17c3ba9d
AK
1537
1538int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 1539{
714b93da
AK
1540 int r;
1541
e2dec939 1542 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
1543 if (r)
1544 goto out;
aaee2c94 1545 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1546 kvm_mmu_free_some_pages(vcpu);
17c3ba9d 1547 mmu_alloc_roots(vcpu);
aaee2c94 1548 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1549 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
17c3ba9d 1550 kvm_mmu_flush_tlb(vcpu);
714b93da
AK
1551out:
1552 return r;
6aa8b732 1553}
17c3ba9d
AK
1554EXPORT_SYMBOL_GPL(kvm_mmu_load);
1555
1556void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1557{
1558 mmu_free_roots(vcpu);
1559}
6aa8b732 1560
09072daf 1561static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
4db35314 1562 struct kvm_mmu_page *sp,
ac1b714e
AK
1563 u64 *spte)
1564{
1565 u64 pte;
1566 struct kvm_mmu_page *child;
1567
1568 pte = *spte;
c7addb90 1569 if (is_shadow_present_pte(pte)) {
05da4558
MT
1570 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
1571 is_large_pte(pte))
290fc38d 1572 rmap_remove(vcpu->kvm, spte);
ac1b714e
AK
1573 else {
1574 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 1575 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
1576 }
1577 }
c7addb90 1578 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
05da4558
MT
1579 if (is_large_pte(pte))
1580 --vcpu->kvm->stat.lpages;
ac1b714e
AK
1581}
1582
0028425f 1583static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4db35314 1584 struct kvm_mmu_page *sp,
0028425f 1585 u64 *spte,
489f1d65 1586 const void *new)
0028425f 1587{
30945387
MT
1588 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
1589 if (!vcpu->arch.update_pte.largepage ||
1590 sp->role.glevels == PT32_ROOT_LEVEL) {
1591 ++vcpu->kvm->stat.mmu_pde_zapped;
1592 return;
1593 }
1594 }
0028425f 1595
4cee5764 1596 ++vcpu->kvm->stat.mmu_pte_updated;
4db35314 1597 if (sp->role.glevels == PT32_ROOT_LEVEL)
489f1d65 1598 paging32_update_pte(vcpu, sp, spte, new);
0028425f 1599 else
489f1d65 1600 paging64_update_pte(vcpu, sp, spte, new);
0028425f
AK
1601}
1602
79539cec
AK
1603static bool need_remote_flush(u64 old, u64 new)
1604{
1605 if (!is_shadow_present_pte(old))
1606 return false;
1607 if (!is_shadow_present_pte(new))
1608 return true;
1609 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1610 return true;
1611 old ^= PT64_NX_MASK;
1612 new ^= PT64_NX_MASK;
1613 return (old & ~new & PT64_PERM_MASK) != 0;
1614}
1615
1616static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1617{
1618 if (need_remote_flush(old, new))
1619 kvm_flush_remote_tlbs(vcpu->kvm);
1620 else
1621 kvm_mmu_flush_tlb(vcpu);
1622}
1623
12b7d28f
AK
1624static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1625{
ad312c7c 1626 u64 *spte = vcpu->arch.last_pte_updated;
12b7d28f 1627
7b52345e 1628 return !!(spte && (*spte & shadow_accessed_mask));
12b7d28f
AK
1629}
1630
d7824fff
AK
1631static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1632 const u8 *new, int bytes)
1633{
1634 gfn_t gfn;
1635 int r;
1636 u64 gpte = 0;
35149e21 1637 pfn_t pfn;
d7824fff 1638
05da4558
MT
1639 vcpu->arch.update_pte.largepage = 0;
1640
d7824fff
AK
1641 if (bytes != 4 && bytes != 8)
1642 return;
1643
1644 /*
1645 * Assume that the pte write on a page table of the same type
1646 * as the current vcpu paging mode. This is nearly always true
1647 * (might be false while changing modes). Note it is verified later
1648 * by update_pte().
1649 */
1650 if (is_pae(vcpu)) {
1651 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1652 if ((bytes == 4) && (gpa % 4 == 0)) {
1653 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1654 if (r)
1655 return;
1656 memcpy((void *)&gpte + (gpa % 8), new, 4);
1657 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1658 memcpy((void *)&gpte, new, 8);
1659 }
1660 } else {
1661 if ((bytes == 4) && (gpa % 4 == 0))
1662 memcpy((void *)&gpte, new, 4);
1663 }
1664 if (!is_present_pte(gpte))
1665 return;
1666 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
72dc67a6 1667
05da4558
MT
1668 down_read(&current->mm->mmap_sem);
1669 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
1670 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1671 vcpu->arch.update_pte.largepage = 1;
1672 }
35149e21 1673 pfn = gfn_to_pfn(vcpu->kvm, gfn);
05da4558 1674 up_read(&current->mm->mmap_sem);
72dc67a6 1675
35149e21
AL
1676 if (is_error_pfn(pfn)) {
1677 kvm_release_pfn_clean(pfn);
d196e343
AK
1678 return;
1679 }
d7824fff 1680 vcpu->arch.update_pte.gfn = gfn;
35149e21 1681 vcpu->arch.update_pte.pfn = pfn;
d7824fff
AK
1682}
1683
1b7fcd32
AK
1684static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
1685{
1686 u64 *spte = vcpu->arch.last_pte_updated;
1687
1688 if (spte
1689 && vcpu->arch.last_pte_gfn == gfn
1690 && shadow_accessed_mask
1691 && !(*spte & shadow_accessed_mask)
1692 && is_shadow_present_pte(*spte))
1693 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
1694}
1695
09072daf 1696void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
fe551881 1697 const u8 *new, int bytes)
da4a00f0 1698{
9b7a0325 1699 gfn_t gfn = gpa >> PAGE_SHIFT;
4db35314 1700 struct kvm_mmu_page *sp;
0e7bc4b9 1701 struct hlist_node *node, *n;
9b7a0325
AK
1702 struct hlist_head *bucket;
1703 unsigned index;
489f1d65 1704 u64 entry, gentry;
9b7a0325 1705 u64 *spte;
9b7a0325 1706 unsigned offset = offset_in_page(gpa);
0e7bc4b9 1707 unsigned pte_size;
9b7a0325 1708 unsigned page_offset;
0e7bc4b9 1709 unsigned misaligned;
fce0657f 1710 unsigned quadrant;
9b7a0325 1711 int level;
86a5ba02 1712 int flooded = 0;
ac1b714e 1713 int npte;
489f1d65 1714 int r;
9b7a0325 1715
b8688d51 1716 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
d7824fff 1717 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
aaee2c94 1718 spin_lock(&vcpu->kvm->mmu_lock);
1b7fcd32 1719 kvm_mmu_access_page(vcpu, gfn);
eb787d10 1720 kvm_mmu_free_some_pages(vcpu);
4cee5764 1721 ++vcpu->kvm->stat.mmu_pte_write;
c7addb90 1722 kvm_mmu_audit(vcpu, "pre pte write");
ad312c7c 1723 if (gfn == vcpu->arch.last_pt_write_gfn
12b7d28f 1724 && !last_updated_pte_accessed(vcpu)) {
ad312c7c
ZX
1725 ++vcpu->arch.last_pt_write_count;
1726 if (vcpu->arch.last_pt_write_count >= 3)
86a5ba02
AK
1727 flooded = 1;
1728 } else {
ad312c7c
ZX
1729 vcpu->arch.last_pt_write_gfn = gfn;
1730 vcpu->arch.last_pt_write_count = 1;
1731 vcpu->arch.last_pte_updated = NULL;
86a5ba02 1732 }
1ae0a13d 1733 index = kvm_page_table_hashfn(gfn);
f05e70ac 1734 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
1735 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1736 if (sp->gfn != gfn || sp->role.metaphysical)
9b7a0325 1737 continue;
4db35314 1738 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
0e7bc4b9 1739 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 1740 misaligned |= bytes < 4;
86a5ba02 1741 if (misaligned || flooded) {
0e7bc4b9
AK
1742 /*
1743 * Misaligned accesses are too much trouble to fix
1744 * up; also, they usually indicate a page is not used
1745 * as a page table.
86a5ba02
AK
1746 *
1747 * If we're seeing too many writes to a page,
1748 * it may no longer be a page table, or we may be
1749 * forking, in which case it is better to unmap the
1750 * page.
0e7bc4b9
AK
1751 */
1752 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4db35314
AK
1753 gpa, bytes, sp->role.word);
1754 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1755 ++vcpu->kvm->stat.mmu_flooded;
0e7bc4b9
AK
1756 continue;
1757 }
9b7a0325 1758 page_offset = offset;
4db35314 1759 level = sp->role.level;
ac1b714e 1760 npte = 1;
4db35314 1761 if (sp->role.glevels == PT32_ROOT_LEVEL) {
ac1b714e
AK
1762 page_offset <<= 1; /* 32->64 */
1763 /*
1764 * A 32-bit pde maps 4MB while the shadow pdes map
1765 * only 2MB. So we need to double the offset again
1766 * and zap two pdes instead of one.
1767 */
1768 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 1769 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
1770 page_offset <<= 1;
1771 npte = 2;
1772 }
fce0657f 1773 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 1774 page_offset &= ~PAGE_MASK;
4db35314 1775 if (quadrant != sp->role.quadrant)
fce0657f 1776 continue;
9b7a0325 1777 }
4db35314 1778 spte = &sp->spt[page_offset / sizeof(*spte)];
489f1d65
DE
1779 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1780 gentry = 0;
1781 r = kvm_read_guest_atomic(vcpu->kvm,
1782 gpa & ~(u64)(pte_size - 1),
1783 &gentry, pte_size);
1784 new = (const void *)&gentry;
1785 if (r < 0)
1786 new = NULL;
1787 }
ac1b714e 1788 while (npte--) {
79539cec 1789 entry = *spte;
4db35314 1790 mmu_pte_write_zap_pte(vcpu, sp, spte);
489f1d65
DE
1791 if (new)
1792 mmu_pte_write_new_pte(vcpu, sp, spte, new);
79539cec 1793 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
ac1b714e 1794 ++spte;
9b7a0325 1795 }
9b7a0325 1796 }
c7addb90 1797 kvm_mmu_audit(vcpu, "post pte write");
aaee2c94 1798 spin_unlock(&vcpu->kvm->mmu_lock);
35149e21
AL
1799 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
1800 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
1801 vcpu->arch.update_pte.pfn = bad_pfn;
d7824fff 1802 }
da4a00f0
AK
1803}
1804
a436036b
AK
1805int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1806{
10589a46
MT
1807 gpa_t gpa;
1808 int r;
a436036b 1809
10589a46 1810 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
10589a46 1811
aaee2c94 1812 spin_lock(&vcpu->kvm->mmu_lock);
10589a46 1813 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
aaee2c94 1814 spin_unlock(&vcpu->kvm->mmu_lock);
10589a46 1815 return r;
a436036b
AK
1816}
1817
22d95b12 1818void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86 1819{
f05e70ac 1820 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
4db35314 1821 struct kvm_mmu_page *sp;
ebeace86 1822
f05e70ac 1823 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
4db35314
AK
1824 struct kvm_mmu_page, link);
1825 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1826 ++vcpu->kvm->stat.mmu_recycled;
ebeace86
AK
1827 }
1828}
ebeace86 1829
3067714c
AK
1830int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1831{
1832 int r;
1833 enum emulation_result er;
1834
ad312c7c 1835 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
3067714c
AK
1836 if (r < 0)
1837 goto out;
1838
1839 if (!r) {
1840 r = 1;
1841 goto out;
1842 }
1843
b733bfb5
AK
1844 r = mmu_topup_memory_caches(vcpu);
1845 if (r)
1846 goto out;
1847
3067714c 1848 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
3067714c
AK
1849
1850 switch (er) {
1851 case EMULATE_DONE:
1852 return 1;
1853 case EMULATE_DO_MMIO:
1854 ++vcpu->stat.mmio_exits;
1855 return 0;
1856 case EMULATE_FAIL:
1857 kvm_report_emulation_failure(vcpu, "pagetable");
1858 return 1;
1859 default:
1860 BUG();
1861 }
1862out:
3067714c
AK
1863 return r;
1864}
1865EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1866
18552672
JR
1867void kvm_enable_tdp(void)
1868{
1869 tdp_enabled = true;
1870}
1871EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1872
6aa8b732
AK
1873static void free_mmu_pages(struct kvm_vcpu *vcpu)
1874{
4db35314 1875 struct kvm_mmu_page *sp;
6aa8b732 1876
f05e70ac
ZX
1877 while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1878 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
4db35314
AK
1879 struct kvm_mmu_page, link);
1880 kvm_mmu_zap_page(vcpu->kvm, sp);
8d2d73b9 1881 cond_resched();
f51234c2 1882 }
ad312c7c 1883 free_page((unsigned long)vcpu->arch.mmu.pae_root);
6aa8b732
AK
1884}
1885
1886static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1887{
17ac10ad 1888 struct page *page;
6aa8b732
AK
1889 int i;
1890
1891 ASSERT(vcpu);
1892
f05e70ac
ZX
1893 if (vcpu->kvm->arch.n_requested_mmu_pages)
1894 vcpu->kvm->arch.n_free_mmu_pages =
1895 vcpu->kvm->arch.n_requested_mmu_pages;
82ce2c96 1896 else
f05e70ac
ZX
1897 vcpu->kvm->arch.n_free_mmu_pages =
1898 vcpu->kvm->arch.n_alloc_mmu_pages;
17ac10ad
AK
1899 /*
1900 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1901 * Therefore we need to allocate shadow page tables in the first
1902 * 4GB of memory, which happens to fit the DMA32 zone.
1903 */
1904 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1905 if (!page)
1906 goto error_1;
ad312c7c 1907 vcpu->arch.mmu.pae_root = page_address(page);
17ac10ad 1908 for (i = 0; i < 4; ++i)
ad312c7c 1909 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1910
6aa8b732
AK
1911 return 0;
1912
1913error_1:
1914 free_mmu_pages(vcpu);
1915 return -ENOMEM;
1916}
1917
8018c27b 1918int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 1919{
6aa8b732 1920 ASSERT(vcpu);
ad312c7c 1921 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1922
8018c27b
IM
1923 return alloc_mmu_pages(vcpu);
1924}
6aa8b732 1925
8018c27b
IM
1926int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1927{
1928 ASSERT(vcpu);
ad312c7c 1929 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2c264957 1930
8018c27b 1931 return init_kvm_mmu(vcpu);
6aa8b732
AK
1932}
1933
1934void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1935{
1936 ASSERT(vcpu);
1937
1938 destroy_kvm_mmu(vcpu);
1939 free_mmu_pages(vcpu);
714b93da 1940 mmu_free_memory_caches(vcpu);
6aa8b732
AK
1941}
1942
90cb0529 1943void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732 1944{
4db35314 1945 struct kvm_mmu_page *sp;
6aa8b732 1946
f05e70ac 1947 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
6aa8b732
AK
1948 int i;
1949 u64 *pt;
1950
4db35314 1951 if (!test_bit(slot, &sp->slot_bitmap))
6aa8b732
AK
1952 continue;
1953
4db35314 1954 pt = sp->spt;
6aa8b732
AK
1955 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1956 /* avoid RMW */
9647c14c 1957 if (pt[i] & PT_WRITABLE_MASK)
6aa8b732 1958 pt[i] &= ~PT_WRITABLE_MASK;
6aa8b732
AK
1959 }
1960}
37a7d8b0 1961
90cb0529 1962void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 1963{
4db35314 1964 struct kvm_mmu_page *sp, *node;
e0fa826f 1965
aaee2c94 1966 spin_lock(&kvm->mmu_lock);
f05e70ac 1967 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
4db35314 1968 kvm_mmu_zap_page(kvm, sp);
aaee2c94 1969 spin_unlock(&kvm->mmu_lock);
e0fa826f 1970
90cb0529 1971 kvm_flush_remote_tlbs(kvm);
e0fa826f
DL
1972}
1973
8b2cf73c 1974static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
3ee16c81
IE
1975{
1976 struct kvm_mmu_page *page;
1977
1978 page = container_of(kvm->arch.active_mmu_pages.prev,
1979 struct kvm_mmu_page, link);
1980 kvm_mmu_zap_page(kvm, page);
1981}
1982
1983static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
1984{
1985 struct kvm *kvm;
1986 struct kvm *kvm_freed = NULL;
1987 int cache_count = 0;
1988
1989 spin_lock(&kvm_lock);
1990
1991 list_for_each_entry(kvm, &vm_list, vm_list) {
1992 int npages;
1993
5a4c9288
MT
1994 if (!down_read_trylock(&kvm->slots_lock))
1995 continue;
3ee16c81
IE
1996 spin_lock(&kvm->mmu_lock);
1997 npages = kvm->arch.n_alloc_mmu_pages -
1998 kvm->arch.n_free_mmu_pages;
1999 cache_count += npages;
2000 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2001 kvm_mmu_remove_one_alloc_mmu_page(kvm);
2002 cache_count--;
2003 kvm_freed = kvm;
2004 }
2005 nr_to_scan--;
2006
2007 spin_unlock(&kvm->mmu_lock);
5a4c9288 2008 up_read(&kvm->slots_lock);
3ee16c81
IE
2009 }
2010 if (kvm_freed)
2011 list_move_tail(&kvm_freed->vm_list, &vm_list);
2012
2013 spin_unlock(&kvm_lock);
2014
2015 return cache_count;
2016}
2017
2018static struct shrinker mmu_shrinker = {
2019 .shrink = mmu_shrink,
2020 .seeks = DEFAULT_SEEKS * 10,
2021};
2022
2ddfd20e 2023static void mmu_destroy_caches(void)
b5a33a75
AK
2024{
2025 if (pte_chain_cache)
2026 kmem_cache_destroy(pte_chain_cache);
2027 if (rmap_desc_cache)
2028 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
2029 if (mmu_page_header_cache)
2030 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
2031}
2032
3ee16c81
IE
2033void kvm_mmu_module_exit(void)
2034{
2035 mmu_destroy_caches();
2036 unregister_shrinker(&mmu_shrinker);
2037}
2038
b5a33a75
AK
2039int kvm_mmu_module_init(void)
2040{
2041 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2042 sizeof(struct kvm_pte_chain),
20c2df83 2043 0, 0, NULL);
b5a33a75
AK
2044 if (!pte_chain_cache)
2045 goto nomem;
2046 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2047 sizeof(struct kvm_rmap_desc),
20c2df83 2048 0, 0, NULL);
b5a33a75
AK
2049 if (!rmap_desc_cache)
2050 goto nomem;
2051
d3d25b04
AK
2052 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2053 sizeof(struct kvm_mmu_page),
20c2df83 2054 0, 0, NULL);
d3d25b04
AK
2055 if (!mmu_page_header_cache)
2056 goto nomem;
2057
3ee16c81
IE
2058 register_shrinker(&mmu_shrinker);
2059
b5a33a75
AK
2060 return 0;
2061
2062nomem:
3ee16c81 2063 mmu_destroy_caches();
b5a33a75
AK
2064 return -ENOMEM;
2065}
2066
3ad82a7e
ZX
2067/*
2068 * Caculate mmu pages needed for kvm.
2069 */
2070unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2071{
2072 int i;
2073 unsigned int nr_mmu_pages;
2074 unsigned int nr_pages = 0;
2075
2076 for (i = 0; i < kvm->nmemslots; i++)
2077 nr_pages += kvm->memslots[i].npages;
2078
2079 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2080 nr_mmu_pages = max(nr_mmu_pages,
2081 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2082
2083 return nr_mmu_pages;
2084}
2085
2f333bcb
MT
2086static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2087 unsigned len)
2088{
2089 if (len > buffer->len)
2090 return NULL;
2091 return buffer->ptr;
2092}
2093
2094static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2095 unsigned len)
2096{
2097 void *ret;
2098
2099 ret = pv_mmu_peek_buffer(buffer, len);
2100 if (!ret)
2101 return ret;
2102 buffer->ptr += len;
2103 buffer->len -= len;
2104 buffer->processed += len;
2105 return ret;
2106}
2107
2108static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2109 gpa_t addr, gpa_t value)
2110{
2111 int bytes = 8;
2112 int r;
2113
2114 if (!is_long_mode(vcpu) && !is_pae(vcpu))
2115 bytes = 4;
2116
2117 r = mmu_topup_memory_caches(vcpu);
2118 if (r)
2119 return r;
2120
3200f405 2121 if (!emulator_write_phys(vcpu, addr, &value, bytes))
2f333bcb
MT
2122 return -EFAULT;
2123
2124 return 1;
2125}
2126
2127static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2128{
2129 kvm_x86_ops->tlb_flush(vcpu);
2130 return 1;
2131}
2132
2133static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2134{
2135 spin_lock(&vcpu->kvm->mmu_lock);
2136 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2137 spin_unlock(&vcpu->kvm->mmu_lock);
2138 return 1;
2139}
2140
2141static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2142 struct kvm_pv_mmu_op_buffer *buffer)
2143{
2144 struct kvm_mmu_op_header *header;
2145
2146 header = pv_mmu_peek_buffer(buffer, sizeof *header);
2147 if (!header)
2148 return 0;
2149 switch (header->op) {
2150 case KVM_MMU_OP_WRITE_PTE: {
2151 struct kvm_mmu_op_write_pte *wpte;
2152
2153 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2154 if (!wpte)
2155 return 0;
2156 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2157 wpte->pte_val);
2158 }
2159 case KVM_MMU_OP_FLUSH_TLB: {
2160 struct kvm_mmu_op_flush_tlb *ftlb;
2161
2162 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2163 if (!ftlb)
2164 return 0;
2165 return kvm_pv_mmu_flush_tlb(vcpu);
2166 }
2167 case KVM_MMU_OP_RELEASE_PT: {
2168 struct kvm_mmu_op_release_pt *rpt;
2169
2170 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
2171 if (!rpt)
2172 return 0;
2173 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
2174 }
2175 default: return 0;
2176 }
2177}
2178
2179int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2180 gpa_t addr, unsigned long *ret)
2181{
2182 int r;
2183 struct kvm_pv_mmu_op_buffer buffer;
2184
2f333bcb
MT
2185 buffer.ptr = buffer.buf;
2186 buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
2187 buffer.processed = 0;
2188
2189 r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
2190 if (r)
2191 goto out;
2192
2193 while (buffer.len) {
2194 r = kvm_pv_mmu_op_one(vcpu, &buffer);
2195 if (r < 0)
2196 goto out;
2197 if (r == 0)
2198 break;
2199 }
2200
2201 r = 1;
2202out:
2203 *ret = buffer.processed;
2f333bcb
MT
2204 return r;
2205}
2206
37a7d8b0
AK
2207#ifdef AUDIT
2208
2209static const char *audit_msg;
2210
2211static gva_t canonicalize(gva_t gva)
2212{
2213#ifdef CONFIG_X86_64
2214 gva = (long long)(gva << 16) >> 16;
2215#endif
2216 return gva;
2217}
2218
2219static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2220 gva_t va, int level)
2221{
2222 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2223 int i;
2224 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2225
2226 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2227 u64 ent = pt[i];
2228
c7addb90 2229 if (ent == shadow_trap_nonpresent_pte)
37a7d8b0
AK
2230 continue;
2231
2232 va = canonicalize(va);
c7addb90
AK
2233 if (level > 1) {
2234 if (ent == shadow_notrap_nonpresent_pte)
2235 printk(KERN_ERR "audit: (%s) nontrapping pte"
2236 " in nonleaf level: levels %d gva %lx"
2237 " level %d pte %llx\n", audit_msg,
ad312c7c 2238 vcpu->arch.mmu.root_level, va, level, ent);
c7addb90 2239
37a7d8b0 2240 audit_mappings_page(vcpu, ent, va, level - 1);
c7addb90 2241 } else {
ad312c7c 2242 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
35149e21 2243 hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
37a7d8b0 2244
c7addb90 2245 if (is_shadow_present_pte(ent)
37a7d8b0 2246 && (ent & PT64_BASE_ADDR_MASK) != hpa)
c7addb90
AK
2247 printk(KERN_ERR "xx audit error: (%s) levels %d"
2248 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
ad312c7c 2249 audit_msg, vcpu->arch.mmu.root_level,
d77c26fc
MD
2250 va, gpa, hpa, ent,
2251 is_shadow_present_pte(ent));
c7addb90
AK
2252 else if (ent == shadow_notrap_nonpresent_pte
2253 && !is_error_hpa(hpa))
2254 printk(KERN_ERR "audit: (%s) notrap shadow,"
2255 " valid guest gva %lx\n", audit_msg, va);
35149e21 2256 kvm_release_pfn_clean(pfn);
c7addb90 2257
37a7d8b0
AK
2258 }
2259 }
2260}
2261
2262static void audit_mappings(struct kvm_vcpu *vcpu)
2263{
1ea252af 2264 unsigned i;
37a7d8b0 2265
ad312c7c
ZX
2266 if (vcpu->arch.mmu.root_level == 4)
2267 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
37a7d8b0
AK
2268 else
2269 for (i = 0; i < 4; ++i)
ad312c7c 2270 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
37a7d8b0 2271 audit_mappings_page(vcpu,
ad312c7c 2272 vcpu->arch.mmu.pae_root[i],
37a7d8b0
AK
2273 i << 30,
2274 2);
2275}
2276
2277static int count_rmaps(struct kvm_vcpu *vcpu)
2278{
2279 int nmaps = 0;
2280 int i, j, k;
2281
2282 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2283 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2284 struct kvm_rmap_desc *d;
2285
2286 for (j = 0; j < m->npages; ++j) {
290fc38d 2287 unsigned long *rmapp = &m->rmap[j];
37a7d8b0 2288
290fc38d 2289 if (!*rmapp)
37a7d8b0 2290 continue;
290fc38d 2291 if (!(*rmapp & 1)) {
37a7d8b0
AK
2292 ++nmaps;
2293 continue;
2294 }
290fc38d 2295 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
37a7d8b0
AK
2296 while (d) {
2297 for (k = 0; k < RMAP_EXT; ++k)
2298 if (d->shadow_ptes[k])
2299 ++nmaps;
2300 else
2301 break;
2302 d = d->more;
2303 }
2304 }
2305 }
2306 return nmaps;
2307}
2308
2309static int count_writable_mappings(struct kvm_vcpu *vcpu)
2310{
2311 int nmaps = 0;
4db35314 2312 struct kvm_mmu_page *sp;
37a7d8b0
AK
2313 int i;
2314
f05e70ac 2315 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 2316 u64 *pt = sp->spt;
37a7d8b0 2317
4db35314 2318 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
37a7d8b0
AK
2319 continue;
2320
2321 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2322 u64 ent = pt[i];
2323
2324 if (!(ent & PT_PRESENT_MASK))
2325 continue;
2326 if (!(ent & PT_WRITABLE_MASK))
2327 continue;
2328 ++nmaps;
2329 }
2330 }
2331 return nmaps;
2332}
2333
2334static void audit_rmap(struct kvm_vcpu *vcpu)
2335{
2336 int n_rmap = count_rmaps(vcpu);
2337 int n_actual = count_writable_mappings(vcpu);
2338
2339 if (n_rmap != n_actual)
2340 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
b8688d51 2341 __func__, audit_msg, n_rmap, n_actual);
37a7d8b0
AK
2342}
2343
2344static void audit_write_protection(struct kvm_vcpu *vcpu)
2345{
4db35314 2346 struct kvm_mmu_page *sp;
290fc38d
IE
2347 struct kvm_memory_slot *slot;
2348 unsigned long *rmapp;
2349 gfn_t gfn;
37a7d8b0 2350
f05e70ac 2351 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 2352 if (sp->role.metaphysical)
37a7d8b0
AK
2353 continue;
2354
4db35314
AK
2355 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2356 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
290fc38d
IE
2357 rmapp = &slot->rmap[gfn - slot->base_gfn];
2358 if (*rmapp)
37a7d8b0
AK
2359 printk(KERN_ERR "%s: (%s) shadow page has writable"
2360 " mappings: gfn %lx role %x\n",
b8688d51 2361 __func__, audit_msg, sp->gfn,
4db35314 2362 sp->role.word);
37a7d8b0
AK
2363 }
2364}
2365
2366static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2367{
2368 int olddbg = dbg;
2369
2370 dbg = 0;
2371 audit_msg = msg;
2372 audit_rmap(vcpu);
2373 audit_write_protection(vcpu);
2374 audit_mappings(vcpu);
2375 dbg = olddbg;
2376}
2377
2378#endif
This page took 0.361366 seconds and 5 git commands to generate.