KVM: SVM: add module parameter to disable Nested Paging
[deliverable/linux.git] / arch / x86 / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
e495606d
AK
19
20#include "vmx.h"
1d737c8a 21#include "mmu.h"
e495606d 22
edf88417 23#include <linux/kvm_host.h>
6aa8b732
AK
24#include <linux/types.h>
25#include <linux/string.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/module.h>
448353ca 29#include <linux/swap.h>
6aa8b732 30
e495606d
AK
31#include <asm/page.h>
32#include <asm/cmpxchg.h>
4e542370 33#include <asm/io.h>
6aa8b732 34
37a7d8b0
AK
35#undef MMU_DEBUG
36
37#undef AUDIT
38
39#ifdef AUDIT
40static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
41#else
42static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
43#endif
44
45#ifdef MMU_DEBUG
46
47#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
48#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
49
50#else
51
52#define pgprintk(x...) do { } while (0)
53#define rmap_printk(x...) do { } while (0)
54
55#endif
56
57#if defined(MMU_DEBUG) || defined(AUDIT)
58static int dbg = 1;
59#endif
6aa8b732 60
d6c69ee9
YD
61#ifndef MMU_DEBUG
62#define ASSERT(x) do { } while (0)
63#else
6aa8b732
AK
64#define ASSERT(x) \
65 if (!(x)) { \
66 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
67 __FILE__, __LINE__, #x); \
68 }
d6c69ee9 69#endif
6aa8b732 70
cea0f0e7
AK
71#define PT64_PT_BITS 9
72#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
73#define PT32_PT_BITS 10
74#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
6aa8b732
AK
75
76#define PT_WRITABLE_SHIFT 1
77
78#define PT_PRESENT_MASK (1ULL << 0)
79#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
80#define PT_USER_MASK (1ULL << 2)
81#define PT_PWT_MASK (1ULL << 3)
82#define PT_PCD_MASK (1ULL << 4)
83#define PT_ACCESSED_MASK (1ULL << 5)
84#define PT_DIRTY_MASK (1ULL << 6)
85#define PT_PAGE_SIZE_MASK (1ULL << 7)
86#define PT_PAT_MASK (1ULL << 7)
87#define PT_GLOBAL_MASK (1ULL << 8)
fe135d2c
AK
88#define PT64_NX_SHIFT 63
89#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
6aa8b732
AK
90
91#define PT_PAT_SHIFT 7
92#define PT_DIR_PAT_SHIFT 12
93#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
94
95#define PT32_DIR_PSE36_SIZE 4
96#define PT32_DIR_PSE36_SHIFT 13
d77c26fc
MD
97#define PT32_DIR_PSE36_MASK \
98 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
6aa8b732
AK
99
100
6aa8b732
AK
101#define PT_FIRST_AVAIL_BITS_SHIFT 9
102#define PT64_SECOND_AVAIL_BITS_SHIFT 52
103
6aa8b732
AK
104#define VALID_PAGE(x) ((x) != INVALID_PAGE)
105
106#define PT64_LEVEL_BITS 9
107
108#define PT64_LEVEL_SHIFT(level) \
d77c26fc 109 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
6aa8b732
AK
110
111#define PT64_LEVEL_MASK(level) \
112 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
113
114#define PT64_INDEX(address, level)\
115 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
116
117
118#define PT32_LEVEL_BITS 10
119
120#define PT32_LEVEL_SHIFT(level) \
d77c26fc 121 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
6aa8b732
AK
122
123#define PT32_LEVEL_MASK(level) \
124 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
125
126#define PT32_INDEX(address, level)\
127 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
128
129
27aba766 130#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
131#define PT64_DIR_BASE_ADDR_MASK \
132 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
133
134#define PT32_BASE_ADDR_MASK PAGE_MASK
135#define PT32_DIR_BASE_ADDR_MASK \
136 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
137
79539cec
AK
138#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
139 | PT64_NX_MASK)
6aa8b732
AK
140
141#define PFERR_PRESENT_MASK (1U << 0)
142#define PFERR_WRITE_MASK (1U << 1)
143#define PFERR_USER_MASK (1U << 2)
73b1087e 144#define PFERR_FETCH_MASK (1U << 4)
6aa8b732
AK
145
146#define PT64_ROOT_LEVEL 4
147#define PT32_ROOT_LEVEL 2
148#define PT32E_ROOT_LEVEL 3
149
150#define PT_DIRECTORY_LEVEL 2
151#define PT_PAGE_TABLE_LEVEL 1
152
cd4a4e53
AK
153#define RMAP_EXT 4
154
fe135d2c
AK
155#define ACC_EXEC_MASK 1
156#define ACC_WRITE_MASK PT_WRITABLE_MASK
157#define ACC_USER_MASK PT_USER_MASK
158#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
159
cd4a4e53
AK
160struct kvm_rmap_desc {
161 u64 *shadow_ptes[RMAP_EXT];
162 struct kvm_rmap_desc *more;
163};
164
b5a33a75
AK
165static struct kmem_cache *pte_chain_cache;
166static struct kmem_cache *rmap_desc_cache;
d3d25b04 167static struct kmem_cache *mmu_page_header_cache;
b5a33a75 168
c7addb90
AK
169static u64 __read_mostly shadow_trap_nonpresent_pte;
170static u64 __read_mostly shadow_notrap_nonpresent_pte;
171
172void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
173{
174 shadow_trap_nonpresent_pte = trap_pte;
175 shadow_notrap_nonpresent_pte = notrap_pte;
176}
177EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
178
6aa8b732
AK
179static int is_write_protection(struct kvm_vcpu *vcpu)
180{
ad312c7c 181 return vcpu->arch.cr0 & X86_CR0_WP;
6aa8b732
AK
182}
183
184static int is_cpuid_PSE36(void)
185{
186 return 1;
187}
188
73b1087e
AK
189static int is_nx(struct kvm_vcpu *vcpu)
190{
ad312c7c 191 return vcpu->arch.shadow_efer & EFER_NX;
73b1087e
AK
192}
193
6aa8b732
AK
194static int is_present_pte(unsigned long pte)
195{
196 return pte & PT_PRESENT_MASK;
197}
198
c7addb90
AK
199static int is_shadow_present_pte(u64 pte)
200{
c7addb90
AK
201 return pte != shadow_trap_nonpresent_pte
202 && pte != shadow_notrap_nonpresent_pte;
203}
204
6aa8b732
AK
205static int is_writeble_pte(unsigned long pte)
206{
207 return pte & PT_WRITABLE_MASK;
208}
209
e3c5e7ec
AK
210static int is_dirty_pte(unsigned long pte)
211{
212 return pte & PT_DIRTY_MASK;
213}
214
cd4a4e53
AK
215static int is_rmap_pte(u64 pte)
216{
4b1a80fa 217 return is_shadow_present_pte(pte);
cd4a4e53
AK
218}
219
da928521
AK
220static gfn_t pse36_gfn_delta(u32 gpte)
221{
222 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
223
224 return (gpte & PT32_DIR_PSE36_MASK) << shift;
225}
226
e663ee64
AK
227static void set_shadow_pte(u64 *sptep, u64 spte)
228{
229#ifdef CONFIG_X86_64
230 set_64bit((unsigned long *)sptep, spte);
231#else
232 set_64bit((unsigned long long *)sptep, spte);
233#endif
234}
235
e2dec939 236static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
2e3e5882 237 struct kmem_cache *base_cache, int min)
714b93da
AK
238{
239 void *obj;
240
241 if (cache->nobjs >= min)
e2dec939 242 return 0;
714b93da 243 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 244 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
714b93da 245 if (!obj)
e2dec939 246 return -ENOMEM;
714b93da
AK
247 cache->objects[cache->nobjs++] = obj;
248 }
e2dec939 249 return 0;
714b93da
AK
250}
251
252static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
253{
254 while (mc->nobjs)
255 kfree(mc->objects[--mc->nobjs]);
256}
257
c1158e63 258static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
2e3e5882 259 int min)
c1158e63
AK
260{
261 struct page *page;
262
263 if (cache->nobjs >= min)
264 return 0;
265 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 266 page = alloc_page(GFP_KERNEL);
c1158e63
AK
267 if (!page)
268 return -ENOMEM;
269 set_page_private(page, 0);
270 cache->objects[cache->nobjs++] = page_address(page);
271 }
272 return 0;
273}
274
275static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
276{
277 while (mc->nobjs)
c4d198d5 278 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
279}
280
2e3e5882 281static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
714b93da 282{
e2dec939
AK
283 int r;
284
ad312c7c 285 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
2e3e5882 286 pte_chain_cache, 4);
e2dec939
AK
287 if (r)
288 goto out;
ad312c7c 289 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
2e3e5882 290 rmap_desc_cache, 1);
d3d25b04
AK
291 if (r)
292 goto out;
ad312c7c 293 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
d3d25b04
AK
294 if (r)
295 goto out;
ad312c7c 296 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
2e3e5882 297 mmu_page_header_cache, 4);
e2dec939
AK
298out:
299 return r;
714b93da
AK
300}
301
302static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
303{
ad312c7c
ZX
304 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
305 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
306 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
307 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
714b93da
AK
308}
309
310static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
311 size_t size)
312{
313 void *p;
314
315 BUG_ON(!mc->nobjs);
316 p = mc->objects[--mc->nobjs];
317 memset(p, 0, size);
318 return p;
319}
320
714b93da
AK
321static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
322{
ad312c7c 323 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
714b93da
AK
324 sizeof(struct kvm_pte_chain));
325}
326
90cb0529 327static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 328{
90cb0529 329 kfree(pc);
714b93da
AK
330}
331
332static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
333{
ad312c7c 334 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
714b93da
AK
335 sizeof(struct kvm_rmap_desc));
336}
337
90cb0529 338static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 339{
90cb0529 340 kfree(rd);
714b93da
AK
341}
342
290fc38d
IE
343/*
344 * Take gfn and return the reverse mapping to it.
345 * Note: gfn must be unaliased before this function get called
346 */
347
348static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
349{
350 struct kvm_memory_slot *slot;
351
352 slot = gfn_to_memslot(kvm, gfn);
353 return &slot->rmap[gfn - slot->base_gfn];
354}
355
cd4a4e53
AK
356/*
357 * Reverse mapping data structures:
358 *
290fc38d
IE
359 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
360 * that points to page_address(page).
cd4a4e53 361 *
290fc38d
IE
362 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
363 * containing more mappings.
cd4a4e53 364 */
290fc38d 365static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
cd4a4e53 366{
4db35314 367 struct kvm_mmu_page *sp;
cd4a4e53 368 struct kvm_rmap_desc *desc;
290fc38d 369 unsigned long *rmapp;
cd4a4e53
AK
370 int i;
371
372 if (!is_rmap_pte(*spte))
373 return;
290fc38d 374 gfn = unalias_gfn(vcpu->kvm, gfn);
4db35314
AK
375 sp = page_header(__pa(spte));
376 sp->gfns[spte - sp->spt] = gfn;
290fc38d
IE
377 rmapp = gfn_to_rmap(vcpu->kvm, gfn);
378 if (!*rmapp) {
cd4a4e53 379 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
290fc38d
IE
380 *rmapp = (unsigned long)spte;
381 } else if (!(*rmapp & 1)) {
cd4a4e53 382 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 383 desc = mmu_alloc_rmap_desc(vcpu);
290fc38d 384 desc->shadow_ptes[0] = (u64 *)*rmapp;
cd4a4e53 385 desc->shadow_ptes[1] = spte;
290fc38d 386 *rmapp = (unsigned long)desc | 1;
cd4a4e53
AK
387 } else {
388 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
290fc38d 389 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
390 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
391 desc = desc->more;
392 if (desc->shadow_ptes[RMAP_EXT-1]) {
714b93da 393 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
394 desc = desc->more;
395 }
396 for (i = 0; desc->shadow_ptes[i]; ++i)
397 ;
398 desc->shadow_ptes[i] = spte;
399 }
400}
401
290fc38d 402static void rmap_desc_remove_entry(unsigned long *rmapp,
cd4a4e53
AK
403 struct kvm_rmap_desc *desc,
404 int i,
405 struct kvm_rmap_desc *prev_desc)
406{
407 int j;
408
409 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
410 ;
411 desc->shadow_ptes[i] = desc->shadow_ptes[j];
11718b4d 412 desc->shadow_ptes[j] = NULL;
cd4a4e53
AK
413 if (j != 0)
414 return;
415 if (!prev_desc && !desc->more)
290fc38d 416 *rmapp = (unsigned long)desc->shadow_ptes[0];
cd4a4e53
AK
417 else
418 if (prev_desc)
419 prev_desc->more = desc->more;
420 else
290fc38d 421 *rmapp = (unsigned long)desc->more | 1;
90cb0529 422 mmu_free_rmap_desc(desc);
cd4a4e53
AK
423}
424
290fc38d 425static void rmap_remove(struct kvm *kvm, u64 *spte)
cd4a4e53 426{
cd4a4e53
AK
427 struct kvm_rmap_desc *desc;
428 struct kvm_rmap_desc *prev_desc;
4db35314 429 struct kvm_mmu_page *sp;
76c35c6e 430 struct page *page;
290fc38d 431 unsigned long *rmapp;
cd4a4e53
AK
432 int i;
433
434 if (!is_rmap_pte(*spte))
435 return;
4db35314 436 sp = page_header(__pa(spte));
76c35c6e 437 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
448353ca 438 mark_page_accessed(page);
b4231d61 439 if (is_writeble_pte(*spte))
76c35c6e 440 kvm_release_page_dirty(page);
b4231d61 441 else
76c35c6e 442 kvm_release_page_clean(page);
4db35314 443 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
290fc38d 444 if (!*rmapp) {
cd4a4e53
AK
445 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
446 BUG();
290fc38d 447 } else if (!(*rmapp & 1)) {
cd4a4e53 448 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
290fc38d 449 if ((u64 *)*rmapp != spte) {
cd4a4e53
AK
450 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
451 spte, *spte);
452 BUG();
453 }
290fc38d 454 *rmapp = 0;
cd4a4e53
AK
455 } else {
456 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
290fc38d 457 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
458 prev_desc = NULL;
459 while (desc) {
460 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
461 if (desc->shadow_ptes[i] == spte) {
290fc38d 462 rmap_desc_remove_entry(rmapp,
714b93da 463 desc, i,
cd4a4e53
AK
464 prev_desc);
465 return;
466 }
467 prev_desc = desc;
468 desc = desc->more;
469 }
470 BUG();
471 }
472}
473
98348e95 474static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
374cbac0 475{
374cbac0 476 struct kvm_rmap_desc *desc;
98348e95
IE
477 struct kvm_rmap_desc *prev_desc;
478 u64 *prev_spte;
479 int i;
480
481 if (!*rmapp)
482 return NULL;
483 else if (!(*rmapp & 1)) {
484 if (!spte)
485 return (u64 *)*rmapp;
486 return NULL;
487 }
488 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
489 prev_desc = NULL;
490 prev_spte = NULL;
491 while (desc) {
492 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
493 if (prev_spte == spte)
494 return desc->shadow_ptes[i];
495 prev_spte = desc->shadow_ptes[i];
496 }
497 desc = desc->more;
498 }
499 return NULL;
500}
501
502static void rmap_write_protect(struct kvm *kvm, u64 gfn)
503{
290fc38d 504 unsigned long *rmapp;
374cbac0 505 u64 *spte;
caa5b8a5 506 int write_protected = 0;
374cbac0 507
4a4c9924
AL
508 gfn = unalias_gfn(kvm, gfn);
509 rmapp = gfn_to_rmap(kvm, gfn);
374cbac0 510
98348e95
IE
511 spte = rmap_next(kvm, rmapp, NULL);
512 while (spte) {
374cbac0 513 BUG_ON(!spte);
374cbac0 514 BUG_ON(!(*spte & PT_PRESENT_MASK));
374cbac0 515 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
caa5b8a5 516 if (is_writeble_pte(*spte)) {
9647c14c 517 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
caa5b8a5
ED
518 write_protected = 1;
519 }
9647c14c 520 spte = rmap_next(kvm, rmapp, spte);
374cbac0 521 }
caa5b8a5
ED
522 if (write_protected)
523 kvm_flush_remote_tlbs(kvm);
374cbac0
AK
524}
525
d6c69ee9 526#ifdef MMU_DEBUG
47ad8e68 527static int is_empty_shadow_page(u64 *spt)
6aa8b732 528{
139bdb2d
AK
529 u64 *pos;
530 u64 *end;
531
47ad8e68 532 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
d196e343 533 if (*pos != shadow_trap_nonpresent_pte) {
139bdb2d
AK
534 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
535 pos, *pos);
6aa8b732 536 return 0;
139bdb2d 537 }
6aa8b732
AK
538 return 1;
539}
d6c69ee9 540#endif
6aa8b732 541
4db35314 542static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
260746c0 543{
4db35314
AK
544 ASSERT(is_empty_shadow_page(sp->spt));
545 list_del(&sp->link);
546 __free_page(virt_to_page(sp->spt));
547 __free_page(virt_to_page(sp->gfns));
548 kfree(sp);
f05e70ac 549 ++kvm->arch.n_free_mmu_pages;
260746c0
AK
550}
551
cea0f0e7
AK
552static unsigned kvm_page_table_hashfn(gfn_t gfn)
553{
1ae0a13d 554 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
cea0f0e7
AK
555}
556
25c0de2c
AK
557static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
558 u64 *parent_pte)
6aa8b732 559{
4db35314 560 struct kvm_mmu_page *sp;
6aa8b732 561
ad312c7c
ZX
562 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
563 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
564 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
4db35314 565 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
f05e70ac 566 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
4db35314
AK
567 ASSERT(is_empty_shadow_page(sp->spt));
568 sp->slot_bitmap = 0;
569 sp->multimapped = 0;
570 sp->parent_pte = parent_pte;
f05e70ac 571 --vcpu->kvm->arch.n_free_mmu_pages;
4db35314 572 return sp;
6aa8b732
AK
573}
574
714b93da 575static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
4db35314 576 struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7
AK
577{
578 struct kvm_pte_chain *pte_chain;
579 struct hlist_node *node;
580 int i;
581
582 if (!parent_pte)
583 return;
4db35314
AK
584 if (!sp->multimapped) {
585 u64 *old = sp->parent_pte;
cea0f0e7
AK
586
587 if (!old) {
4db35314 588 sp->parent_pte = parent_pte;
cea0f0e7
AK
589 return;
590 }
4db35314 591 sp->multimapped = 1;
714b93da 592 pte_chain = mmu_alloc_pte_chain(vcpu);
4db35314
AK
593 INIT_HLIST_HEAD(&sp->parent_ptes);
594 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
595 pte_chain->parent_ptes[0] = old;
596 }
4db35314 597 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
cea0f0e7
AK
598 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
599 continue;
600 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
601 if (!pte_chain->parent_ptes[i]) {
602 pte_chain->parent_ptes[i] = parent_pte;
603 return;
604 }
605 }
714b93da 606 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7 607 BUG_ON(!pte_chain);
4db35314 608 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
609 pte_chain->parent_ptes[0] = parent_pte;
610}
611
4db35314 612static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
cea0f0e7
AK
613 u64 *parent_pte)
614{
615 struct kvm_pte_chain *pte_chain;
616 struct hlist_node *node;
617 int i;
618
4db35314
AK
619 if (!sp->multimapped) {
620 BUG_ON(sp->parent_pte != parent_pte);
621 sp->parent_pte = NULL;
cea0f0e7
AK
622 return;
623 }
4db35314 624 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
cea0f0e7
AK
625 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
626 if (!pte_chain->parent_ptes[i])
627 break;
628 if (pte_chain->parent_ptes[i] != parent_pte)
629 continue;
697fe2e2
AK
630 while (i + 1 < NR_PTE_CHAIN_ENTRIES
631 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
632 pte_chain->parent_ptes[i]
633 = pte_chain->parent_ptes[i + 1];
634 ++i;
635 }
636 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
637 if (i == 0) {
638 hlist_del(&pte_chain->link);
90cb0529 639 mmu_free_pte_chain(pte_chain);
4db35314
AK
640 if (hlist_empty(&sp->parent_ptes)) {
641 sp->multimapped = 0;
642 sp->parent_pte = NULL;
697fe2e2
AK
643 }
644 }
cea0f0e7
AK
645 return;
646 }
647 BUG();
648}
649
4db35314 650static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
cea0f0e7
AK
651{
652 unsigned index;
653 struct hlist_head *bucket;
4db35314 654 struct kvm_mmu_page *sp;
cea0f0e7
AK
655 struct hlist_node *node;
656
657 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
1ae0a13d 658 index = kvm_page_table_hashfn(gfn);
f05e70ac 659 bucket = &kvm->arch.mmu_page_hash[index];
4db35314
AK
660 hlist_for_each_entry(sp, node, bucket, hash_link)
661 if (sp->gfn == gfn && !sp->role.metaphysical) {
cea0f0e7 662 pgprintk("%s: found role %x\n",
4db35314
AK
663 __FUNCTION__, sp->role.word);
664 return sp;
cea0f0e7
AK
665 }
666 return NULL;
667}
668
669static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
670 gfn_t gfn,
671 gva_t gaddr,
672 unsigned level,
673 int metaphysical,
41074d07 674 unsigned access,
f7d9c7b7 675 u64 *parent_pte)
cea0f0e7
AK
676{
677 union kvm_mmu_page_role role;
678 unsigned index;
679 unsigned quadrant;
680 struct hlist_head *bucket;
4db35314 681 struct kvm_mmu_page *sp;
cea0f0e7
AK
682 struct hlist_node *node;
683
684 role.word = 0;
ad312c7c 685 role.glevels = vcpu->arch.mmu.root_level;
cea0f0e7
AK
686 role.level = level;
687 role.metaphysical = metaphysical;
41074d07 688 role.access = access;
ad312c7c 689 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
cea0f0e7
AK
690 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
691 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
692 role.quadrant = quadrant;
693 }
694 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
695 gfn, role.word);
1ae0a13d 696 index = kvm_page_table_hashfn(gfn);
f05e70ac 697 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
698 hlist_for_each_entry(sp, node, bucket, hash_link)
699 if (sp->gfn == gfn && sp->role.word == role.word) {
700 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
cea0f0e7 701 pgprintk("%s: found\n", __FUNCTION__);
4db35314 702 return sp;
cea0f0e7 703 }
dfc5aa00 704 ++vcpu->kvm->stat.mmu_cache_miss;
4db35314
AK
705 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
706 if (!sp)
707 return sp;
cea0f0e7 708 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
4db35314
AK
709 sp->gfn = gfn;
710 sp->role = role;
711 hlist_add_head(&sp->hash_link, bucket);
ad312c7c 712 vcpu->arch.mmu.prefetch_page(vcpu, sp);
374cbac0 713 if (!metaphysical)
4a4c9924 714 rmap_write_protect(vcpu->kvm, gfn);
4db35314 715 return sp;
cea0f0e7
AK
716}
717
90cb0529 718static void kvm_mmu_page_unlink_children(struct kvm *kvm,
4db35314 719 struct kvm_mmu_page *sp)
a436036b 720{
697fe2e2
AK
721 unsigned i;
722 u64 *pt;
723 u64 ent;
724
4db35314 725 pt = sp->spt;
697fe2e2 726
4db35314 727 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
697fe2e2 728 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
c7addb90 729 if (is_shadow_present_pte(pt[i]))
290fc38d 730 rmap_remove(kvm, &pt[i]);
c7addb90 731 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 732 }
90cb0529 733 kvm_flush_remote_tlbs(kvm);
697fe2e2
AK
734 return;
735 }
736
737 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
738 ent = pt[i];
739
c7addb90
AK
740 pt[i] = shadow_trap_nonpresent_pte;
741 if (!is_shadow_present_pte(ent))
697fe2e2
AK
742 continue;
743 ent &= PT64_BASE_ADDR_MASK;
90cb0529 744 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
697fe2e2 745 }
90cb0529 746 kvm_flush_remote_tlbs(kvm);
a436036b
AK
747}
748
4db35314 749static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7 750{
4db35314 751 mmu_page_remove_parent_pte(sp, parent_pte);
a436036b
AK
752}
753
12b7d28f
AK
754static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
755{
756 int i;
757
758 for (i = 0; i < KVM_MAX_VCPUS; ++i)
759 if (kvm->vcpus[i])
ad312c7c 760 kvm->vcpus[i]->arch.last_pte_updated = NULL;
12b7d28f
AK
761}
762
4db35314 763static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
a436036b
AK
764{
765 u64 *parent_pte;
766
4cee5764 767 ++kvm->stat.mmu_shadow_zapped;
4db35314
AK
768 while (sp->multimapped || sp->parent_pte) {
769 if (!sp->multimapped)
770 parent_pte = sp->parent_pte;
a436036b
AK
771 else {
772 struct kvm_pte_chain *chain;
773
4db35314 774 chain = container_of(sp->parent_ptes.first,
a436036b
AK
775 struct kvm_pte_chain, link);
776 parent_pte = chain->parent_ptes[0];
777 }
697fe2e2 778 BUG_ON(!parent_pte);
4db35314 779 kvm_mmu_put_page(sp, parent_pte);
c7addb90 780 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
a436036b 781 }
4db35314
AK
782 kvm_mmu_page_unlink_children(kvm, sp);
783 if (!sp->root_count) {
784 hlist_del(&sp->hash_link);
785 kvm_mmu_free_page(kvm, sp);
36868f7b 786 } else
f05e70ac 787 list_move(&sp->link, &kvm->arch.active_mmu_pages);
12b7d28f 788 kvm_mmu_reset_last_pte_updated(kvm);
a436036b
AK
789}
790
82ce2c96
IE
791/*
792 * Changing the number of mmu pages allocated to the vm
793 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
794 */
795void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
796{
797 /*
798 * If we set the number of mmu pages to be smaller be than the
799 * number of actived pages , we must to free some mmu pages before we
800 * change the value
801 */
802
f05e70ac 803 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
82ce2c96 804 kvm_nr_mmu_pages) {
f05e70ac
ZX
805 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
806 - kvm->arch.n_free_mmu_pages;
82ce2c96
IE
807
808 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
809 struct kvm_mmu_page *page;
810
f05e70ac 811 page = container_of(kvm->arch.active_mmu_pages.prev,
82ce2c96
IE
812 struct kvm_mmu_page, link);
813 kvm_mmu_zap_page(kvm, page);
814 n_used_mmu_pages--;
815 }
f05e70ac 816 kvm->arch.n_free_mmu_pages = 0;
82ce2c96
IE
817 }
818 else
f05e70ac
ZX
819 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
820 - kvm->arch.n_alloc_mmu_pages;
82ce2c96 821
f05e70ac 822 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
82ce2c96
IE
823}
824
f67a46f4 825static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
a436036b
AK
826{
827 unsigned index;
828 struct hlist_head *bucket;
4db35314 829 struct kvm_mmu_page *sp;
a436036b
AK
830 struct hlist_node *node, *n;
831 int r;
832
833 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
834 r = 0;
1ae0a13d 835 index = kvm_page_table_hashfn(gfn);
f05e70ac 836 bucket = &kvm->arch.mmu_page_hash[index];
4db35314
AK
837 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
838 if (sp->gfn == gfn && !sp->role.metaphysical) {
697fe2e2 839 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
4db35314
AK
840 sp->role.word);
841 kvm_mmu_zap_page(kvm, sp);
a436036b
AK
842 r = 1;
843 }
844 return r;
cea0f0e7
AK
845}
846
f67a46f4 847static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
97a0a01e 848{
4db35314 849 struct kvm_mmu_page *sp;
97a0a01e 850
4db35314
AK
851 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
852 pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
853 kvm_mmu_zap_page(kvm, sp);
97a0a01e
AK
854 }
855}
856
38c335f1 857static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
6aa8b732 858{
38c335f1 859 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
4db35314 860 struct kvm_mmu_page *sp = page_header(__pa(pte));
6aa8b732 861
4db35314 862 __set_bit(slot, &sp->slot_bitmap);
6aa8b732
AK
863}
864
039576c0
AK
865struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
866{
72dc67a6
IE
867 struct page *page;
868
ad312c7c 869 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
039576c0
AK
870
871 if (gpa == UNMAPPED_GVA)
872 return NULL;
72dc67a6
IE
873
874 down_read(&current->mm->mmap_sem);
875 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
876 up_read(&current->mm->mmap_sem);
877
878 return page;
039576c0
AK
879}
880
1c4f1fd6
AK
881static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
882 unsigned pt_access, unsigned pte_access,
883 int user_fault, int write_fault, int dirty,
d7824fff 884 int *ptwrite, gfn_t gfn, struct page *page)
1c4f1fd6
AK
885{
886 u64 spte;
15aaa819 887 int was_rmapped = 0;
75e68e60 888 int was_writeble = is_writeble_pte(*shadow_pte);
15aaa819 889 hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1c4f1fd6 890
bc750ba8 891 pgprintk("%s: spte %llx access %x write_fault %d"
1c4f1fd6 892 " user_fault %d gfn %lx\n",
bc750ba8 893 __FUNCTION__, *shadow_pte, pt_access,
1c4f1fd6
AK
894 write_fault, user_fault, gfn);
895
15aaa819
MT
896 if (is_rmap_pte(*shadow_pte)) {
897 if (host_pfn != page_to_pfn(page)) {
898 pgprintk("hfn old %lx new %lx\n",
899 host_pfn, page_to_pfn(page));
900 rmap_remove(vcpu->kvm, shadow_pte);
901 }
902 else
903 was_rmapped = 1;
904 }
905
1c4f1fd6
AK
906 /*
907 * We don't set the accessed bit, since we sometimes want to see
908 * whether the guest actually used the pte (in order to detect
909 * demand paging).
910 */
911 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
912 if (!dirty)
913 pte_access &= ~ACC_WRITE_MASK;
914 if (!(pte_access & ACC_EXEC_MASK))
915 spte |= PT64_NX_MASK;
916
1c4f1fd6
AK
917 spte |= PT_PRESENT_MASK;
918 if (pte_access & ACC_USER_MASK)
919 spte |= PT_USER_MASK;
920
1c4f1fd6
AK
921 spte |= page_to_phys(page);
922
923 if ((pte_access & ACC_WRITE_MASK)
924 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
925 struct kvm_mmu_page *shadow;
926
927 spte |= PT_WRITABLE_MASK;
928 if (user_fault) {
929 mmu_unshadow(vcpu->kvm, gfn);
930 goto unshadowed;
931 }
932
933 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
934 if (shadow) {
935 pgprintk("%s: found shadow page for %lx, marking ro\n",
936 __FUNCTION__, gfn);
937 pte_access &= ~ACC_WRITE_MASK;
938 if (is_writeble_pte(spte)) {
939 spte &= ~PT_WRITABLE_MASK;
940 kvm_x86_ops->tlb_flush(vcpu);
941 }
942 if (write_fault)
943 *ptwrite = 1;
944 }
945 }
946
947unshadowed:
948
949 if (pte_access & ACC_WRITE_MASK)
950 mark_page_dirty(vcpu->kvm, gfn);
951
952 pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
953 set_shadow_pte(shadow_pte, spte);
954 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
955 if (!was_rmapped) {
956 rmap_add(vcpu, shadow_pte, gfn);
957 if (!is_rmap_pte(*shadow_pte))
958 kvm_release_page_clean(page);
75e68e60
IE
959 } else {
960 if (was_writeble)
961 kvm_release_page_dirty(page);
962 else
963 kvm_release_page_clean(page);
1c4f1fd6 964 }
1c4f1fd6 965 if (!ptwrite || !*ptwrite)
ad312c7c 966 vcpu->arch.last_pte_updated = shadow_pte;
1c4f1fd6
AK
967}
968
6aa8b732
AK
969static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
970{
971}
972
aaee2c94
MT
973static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
974 gfn_t gfn, struct page *page)
6aa8b732
AK
975{
976 int level = PT32E_ROOT_LEVEL;
ad312c7c 977 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
e833240f 978 int pt_write = 0;
6aa8b732
AK
979
980 for (; ; level--) {
981 u32 index = PT64_INDEX(v, level);
982 u64 *table;
983
984 ASSERT(VALID_PAGE(table_addr));
985 table = __va(table_addr);
986
987 if (level == 1) {
e833240f 988 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
d7824fff 989 0, write, 1, &pt_write, gfn, page);
d196e343 990 return pt_write;
6aa8b732
AK
991 }
992
c7addb90 993 if (table[index] == shadow_trap_nonpresent_pte) {
25c0de2c 994 struct kvm_mmu_page *new_table;
cea0f0e7 995 gfn_t pseudo_gfn;
6aa8b732 996
cea0f0e7
AK
997 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
998 >> PAGE_SHIFT;
999 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1000 v, level - 1,
f7d9c7b7 1001 1, ACC_ALL, &table[index]);
25c0de2c 1002 if (!new_table) {
6aa8b732 1003 pgprintk("nonpaging_map: ENOMEM\n");
d7824fff 1004 kvm_release_page_clean(page);
6aa8b732
AK
1005 return -ENOMEM;
1006 }
1007
47ad8e68 1008 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
25c0de2c 1009 | PT_WRITABLE_MASK | PT_USER_MASK;
6aa8b732
AK
1010 }
1011 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1012 }
1013}
1014
10589a46
MT
1015static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1016{
1017 int r;
1018
aaee2c94
MT
1019 struct page *page;
1020
72dc67a6
IE
1021 down_read(&vcpu->kvm->slots_lock);
1022
aaee2c94
MT
1023 down_read(&current->mm->mmap_sem);
1024 page = gfn_to_page(vcpu->kvm, gfn);
72dc67a6 1025 up_read(&current->mm->mmap_sem);
aaee2c94 1026
d196e343
AK
1027 /* mmio */
1028 if (is_error_page(page)) {
1029 kvm_release_page_clean(page);
1030 up_read(&vcpu->kvm->slots_lock);
1031 return 1;
1032 }
1033
aaee2c94 1034 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1035 kvm_mmu_free_some_pages(vcpu);
aaee2c94
MT
1036 r = __nonpaging_map(vcpu, v, write, gfn, page);
1037 spin_unlock(&vcpu->kvm->mmu_lock);
1038
72dc67a6 1039 up_read(&vcpu->kvm->slots_lock);
aaee2c94 1040
10589a46
MT
1041 return r;
1042}
1043
1044
c7addb90
AK
1045static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1046 struct kvm_mmu_page *sp)
1047{
1048 int i;
1049
1050 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1051 sp->spt[i] = shadow_trap_nonpresent_pte;
1052}
1053
17ac10ad
AK
1054static void mmu_free_roots(struct kvm_vcpu *vcpu)
1055{
1056 int i;
4db35314 1057 struct kvm_mmu_page *sp;
17ac10ad 1058
ad312c7c 1059 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
7b53aa56 1060 return;
aaee2c94 1061 spin_lock(&vcpu->kvm->mmu_lock);
17ac10ad 1062#ifdef CONFIG_X86_64
ad312c7c
ZX
1063 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1064 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad 1065
4db35314
AK
1066 sp = page_header(root);
1067 --sp->root_count;
ad312c7c 1068 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
aaee2c94 1069 spin_unlock(&vcpu->kvm->mmu_lock);
17ac10ad
AK
1070 return;
1071 }
1072#endif
1073 for (i = 0; i < 4; ++i) {
ad312c7c 1074 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad 1075
417726a3 1076 if (root) {
417726a3 1077 root &= PT64_BASE_ADDR_MASK;
4db35314
AK
1078 sp = page_header(root);
1079 --sp->root_count;
417726a3 1080 }
ad312c7c 1081 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1082 }
aaee2c94 1083 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1084 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
17ac10ad
AK
1085}
1086
1087static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1088{
1089 int i;
cea0f0e7 1090 gfn_t root_gfn;
4db35314 1091 struct kvm_mmu_page *sp;
3bb65a22 1092
ad312c7c 1093 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
17ac10ad
AK
1094
1095#ifdef CONFIG_X86_64
ad312c7c
ZX
1096 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1097 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad
AK
1098
1099 ASSERT(!VALID_PAGE(root));
4db35314 1100 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
f7d9c7b7 1101 PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
4db35314
AK
1102 root = __pa(sp->spt);
1103 ++sp->root_count;
ad312c7c 1104 vcpu->arch.mmu.root_hpa = root;
17ac10ad
AK
1105 return;
1106 }
1107#endif
1108 for (i = 0; i < 4; ++i) {
ad312c7c 1109 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad
AK
1110
1111 ASSERT(!VALID_PAGE(root));
ad312c7c
ZX
1112 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1113 if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1114 vcpu->arch.mmu.pae_root[i] = 0;
417726a3
AK
1115 continue;
1116 }
ad312c7c
ZX
1117 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1118 } else if (vcpu->arch.mmu.root_level == 0)
cea0f0e7 1119 root_gfn = 0;
4db35314
AK
1120 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1121 PT32_ROOT_LEVEL, !is_paging(vcpu),
f7d9c7b7 1122 ACC_ALL, NULL);
4db35314
AK
1123 root = __pa(sp->spt);
1124 ++sp->root_count;
ad312c7c 1125 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
17ac10ad 1126 }
ad312c7c 1127 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
17ac10ad
AK
1128}
1129
6aa8b732
AK
1130static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1131{
1132 return vaddr;
1133}
1134
1135static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3f3e7124 1136 u32 error_code)
6aa8b732 1137{
e833240f 1138 gfn_t gfn;
e2dec939 1139 int r;
6aa8b732 1140
e833240f 1141 pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
e2dec939
AK
1142 r = mmu_topup_memory_caches(vcpu);
1143 if (r)
1144 return r;
714b93da 1145
6aa8b732 1146 ASSERT(vcpu);
ad312c7c 1147 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1148
e833240f 1149 gfn = gva >> PAGE_SHIFT;
6aa8b732 1150
e833240f
AK
1151 return nonpaging_map(vcpu, gva & PAGE_MASK,
1152 error_code & PFERR_WRITE_MASK, gfn);
6aa8b732
AK
1153}
1154
6aa8b732
AK
1155static void nonpaging_free(struct kvm_vcpu *vcpu)
1156{
17ac10ad 1157 mmu_free_roots(vcpu);
6aa8b732
AK
1158}
1159
1160static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1161{
ad312c7c 1162 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1163
1164 context->new_cr3 = nonpaging_new_cr3;
1165 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
1166 context->gva_to_gpa = nonpaging_gva_to_gpa;
1167 context->free = nonpaging_free;
c7addb90 1168 context->prefetch_page = nonpaging_prefetch_page;
cea0f0e7 1169 context->root_level = 0;
6aa8b732 1170 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1171 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1172 return 0;
1173}
1174
d835dfec 1175void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
6aa8b732 1176{
1165f5fe 1177 ++vcpu->stat.tlb_flush;
cbdd1bea 1178 kvm_x86_ops->tlb_flush(vcpu);
6aa8b732
AK
1179}
1180
1181static void paging_new_cr3(struct kvm_vcpu *vcpu)
1182{
24993d53 1183 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
cea0f0e7 1184 mmu_free_roots(vcpu);
6aa8b732
AK
1185}
1186
6aa8b732
AK
1187static void inject_page_fault(struct kvm_vcpu *vcpu,
1188 u64 addr,
1189 u32 err_code)
1190{
c3c91fee 1191 kvm_inject_page_fault(vcpu, addr, err_code);
6aa8b732
AK
1192}
1193
6aa8b732
AK
1194static void paging_free(struct kvm_vcpu *vcpu)
1195{
1196 nonpaging_free(vcpu);
1197}
1198
1199#define PTTYPE 64
1200#include "paging_tmpl.h"
1201#undef PTTYPE
1202
1203#define PTTYPE 32
1204#include "paging_tmpl.h"
1205#undef PTTYPE
1206
17ac10ad 1207static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732 1208{
ad312c7c 1209 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1210
1211 ASSERT(is_pae(vcpu));
1212 context->new_cr3 = paging_new_cr3;
1213 context->page_fault = paging64_page_fault;
6aa8b732 1214 context->gva_to_gpa = paging64_gva_to_gpa;
c7addb90 1215 context->prefetch_page = paging64_prefetch_page;
6aa8b732 1216 context->free = paging_free;
17ac10ad
AK
1217 context->root_level = level;
1218 context->shadow_root_level = level;
17c3ba9d 1219 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1220 return 0;
1221}
1222
17ac10ad
AK
1223static int paging64_init_context(struct kvm_vcpu *vcpu)
1224{
1225 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1226}
1227
6aa8b732
AK
1228static int paging32_init_context(struct kvm_vcpu *vcpu)
1229{
ad312c7c 1230 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1231
1232 context->new_cr3 = paging_new_cr3;
1233 context->page_fault = paging32_page_fault;
6aa8b732
AK
1234 context->gva_to_gpa = paging32_gva_to_gpa;
1235 context->free = paging_free;
c7addb90 1236 context->prefetch_page = paging32_prefetch_page;
6aa8b732
AK
1237 context->root_level = PT32_ROOT_LEVEL;
1238 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1239 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1240 return 0;
1241}
1242
1243static int paging32E_init_context(struct kvm_vcpu *vcpu)
1244{
17ac10ad 1245 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
1246}
1247
1248static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1249{
1250 ASSERT(vcpu);
ad312c7c 1251 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732
AK
1252
1253 if (!is_paging(vcpu))
1254 return nonpaging_init_context(vcpu);
a9058ecd 1255 else if (is_long_mode(vcpu))
6aa8b732
AK
1256 return paging64_init_context(vcpu);
1257 else if (is_pae(vcpu))
1258 return paging32E_init_context(vcpu);
1259 else
1260 return paging32_init_context(vcpu);
1261}
1262
1263static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1264{
1265 ASSERT(vcpu);
ad312c7c
ZX
1266 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1267 vcpu->arch.mmu.free(vcpu);
1268 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
6aa8b732
AK
1269 }
1270}
1271
1272int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
1273{
1274 destroy_kvm_mmu(vcpu);
1275 return init_kvm_mmu(vcpu);
1276}
8668a3c4 1277EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
17c3ba9d
AK
1278
1279int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 1280{
714b93da
AK
1281 int r;
1282
e2dec939 1283 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
1284 if (r)
1285 goto out;
aaee2c94 1286 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1287 kvm_mmu_free_some_pages(vcpu);
17c3ba9d 1288 mmu_alloc_roots(vcpu);
aaee2c94 1289 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1290 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
17c3ba9d 1291 kvm_mmu_flush_tlb(vcpu);
714b93da
AK
1292out:
1293 return r;
6aa8b732 1294}
17c3ba9d
AK
1295EXPORT_SYMBOL_GPL(kvm_mmu_load);
1296
1297void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1298{
1299 mmu_free_roots(vcpu);
1300}
6aa8b732 1301
09072daf 1302static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
4db35314 1303 struct kvm_mmu_page *sp,
ac1b714e
AK
1304 u64 *spte)
1305{
1306 u64 pte;
1307 struct kvm_mmu_page *child;
1308
1309 pte = *spte;
c7addb90 1310 if (is_shadow_present_pte(pte)) {
4db35314 1311 if (sp->role.level == PT_PAGE_TABLE_LEVEL)
290fc38d 1312 rmap_remove(vcpu->kvm, spte);
ac1b714e
AK
1313 else {
1314 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 1315 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
1316 }
1317 }
c7addb90 1318 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
ac1b714e
AK
1319}
1320
0028425f 1321static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4db35314 1322 struct kvm_mmu_page *sp,
0028425f 1323 u64 *spte,
489f1d65 1324 const void *new)
0028425f 1325{
4db35314 1326 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
4cee5764 1327 ++vcpu->kvm->stat.mmu_pde_zapped;
0028425f 1328 return;
4cee5764 1329 }
0028425f 1330
4cee5764 1331 ++vcpu->kvm->stat.mmu_pte_updated;
4db35314 1332 if (sp->role.glevels == PT32_ROOT_LEVEL)
489f1d65 1333 paging32_update_pte(vcpu, sp, spte, new);
0028425f 1334 else
489f1d65 1335 paging64_update_pte(vcpu, sp, spte, new);
0028425f
AK
1336}
1337
79539cec
AK
1338static bool need_remote_flush(u64 old, u64 new)
1339{
1340 if (!is_shadow_present_pte(old))
1341 return false;
1342 if (!is_shadow_present_pte(new))
1343 return true;
1344 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1345 return true;
1346 old ^= PT64_NX_MASK;
1347 new ^= PT64_NX_MASK;
1348 return (old & ~new & PT64_PERM_MASK) != 0;
1349}
1350
1351static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1352{
1353 if (need_remote_flush(old, new))
1354 kvm_flush_remote_tlbs(vcpu->kvm);
1355 else
1356 kvm_mmu_flush_tlb(vcpu);
1357}
1358
12b7d28f
AK
1359static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1360{
ad312c7c 1361 u64 *spte = vcpu->arch.last_pte_updated;
12b7d28f
AK
1362
1363 return !!(spte && (*spte & PT_ACCESSED_MASK));
1364}
1365
d7824fff
AK
1366static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1367 const u8 *new, int bytes)
1368{
1369 gfn_t gfn;
1370 int r;
1371 u64 gpte = 0;
72dc67a6 1372 struct page *page;
d7824fff
AK
1373
1374 if (bytes != 4 && bytes != 8)
1375 return;
1376
1377 /*
1378 * Assume that the pte write on a page table of the same type
1379 * as the current vcpu paging mode. This is nearly always true
1380 * (might be false while changing modes). Note it is verified later
1381 * by update_pte().
1382 */
1383 if (is_pae(vcpu)) {
1384 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1385 if ((bytes == 4) && (gpa % 4 == 0)) {
1386 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1387 if (r)
1388 return;
1389 memcpy((void *)&gpte + (gpa % 8), new, 4);
1390 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1391 memcpy((void *)&gpte, new, 8);
1392 }
1393 } else {
1394 if ((bytes == 4) && (gpa % 4 == 0))
1395 memcpy((void *)&gpte, new, 4);
1396 }
1397 if (!is_present_pte(gpte))
1398 return;
1399 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
72dc67a6 1400
d196e343 1401 down_read(&vcpu->kvm->slots_lock);
72dc67a6 1402 page = gfn_to_page(vcpu->kvm, gfn);
d196e343 1403 up_read(&vcpu->kvm->slots_lock);
72dc67a6 1404
d196e343
AK
1405 if (is_error_page(page)) {
1406 kvm_release_page_clean(page);
1407 return;
1408 }
d7824fff 1409 vcpu->arch.update_pte.gfn = gfn;
e48bb497 1410 vcpu->arch.update_pte.page = page;
d7824fff
AK
1411}
1412
09072daf 1413void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
fe551881 1414 const u8 *new, int bytes)
da4a00f0 1415{
9b7a0325 1416 gfn_t gfn = gpa >> PAGE_SHIFT;
4db35314 1417 struct kvm_mmu_page *sp;
0e7bc4b9 1418 struct hlist_node *node, *n;
9b7a0325
AK
1419 struct hlist_head *bucket;
1420 unsigned index;
489f1d65 1421 u64 entry, gentry;
9b7a0325 1422 u64 *spte;
9b7a0325 1423 unsigned offset = offset_in_page(gpa);
0e7bc4b9 1424 unsigned pte_size;
9b7a0325 1425 unsigned page_offset;
0e7bc4b9 1426 unsigned misaligned;
fce0657f 1427 unsigned quadrant;
9b7a0325 1428 int level;
86a5ba02 1429 int flooded = 0;
ac1b714e 1430 int npte;
489f1d65 1431 int r;
9b7a0325 1432
da4a00f0 1433 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
d7824fff 1434 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
aaee2c94 1435 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1436 kvm_mmu_free_some_pages(vcpu);
4cee5764 1437 ++vcpu->kvm->stat.mmu_pte_write;
c7addb90 1438 kvm_mmu_audit(vcpu, "pre pte write");
ad312c7c 1439 if (gfn == vcpu->arch.last_pt_write_gfn
12b7d28f 1440 && !last_updated_pte_accessed(vcpu)) {
ad312c7c
ZX
1441 ++vcpu->arch.last_pt_write_count;
1442 if (vcpu->arch.last_pt_write_count >= 3)
86a5ba02
AK
1443 flooded = 1;
1444 } else {
ad312c7c
ZX
1445 vcpu->arch.last_pt_write_gfn = gfn;
1446 vcpu->arch.last_pt_write_count = 1;
1447 vcpu->arch.last_pte_updated = NULL;
86a5ba02 1448 }
1ae0a13d 1449 index = kvm_page_table_hashfn(gfn);
f05e70ac 1450 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
1451 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1452 if (sp->gfn != gfn || sp->role.metaphysical)
9b7a0325 1453 continue;
4db35314 1454 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
0e7bc4b9 1455 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 1456 misaligned |= bytes < 4;
86a5ba02 1457 if (misaligned || flooded) {
0e7bc4b9
AK
1458 /*
1459 * Misaligned accesses are too much trouble to fix
1460 * up; also, they usually indicate a page is not used
1461 * as a page table.
86a5ba02
AK
1462 *
1463 * If we're seeing too many writes to a page,
1464 * it may no longer be a page table, or we may be
1465 * forking, in which case it is better to unmap the
1466 * page.
0e7bc4b9
AK
1467 */
1468 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4db35314
AK
1469 gpa, bytes, sp->role.word);
1470 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1471 ++vcpu->kvm->stat.mmu_flooded;
0e7bc4b9
AK
1472 continue;
1473 }
9b7a0325 1474 page_offset = offset;
4db35314 1475 level = sp->role.level;
ac1b714e 1476 npte = 1;
4db35314 1477 if (sp->role.glevels == PT32_ROOT_LEVEL) {
ac1b714e
AK
1478 page_offset <<= 1; /* 32->64 */
1479 /*
1480 * A 32-bit pde maps 4MB while the shadow pdes map
1481 * only 2MB. So we need to double the offset again
1482 * and zap two pdes instead of one.
1483 */
1484 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 1485 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
1486 page_offset <<= 1;
1487 npte = 2;
1488 }
fce0657f 1489 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 1490 page_offset &= ~PAGE_MASK;
4db35314 1491 if (quadrant != sp->role.quadrant)
fce0657f 1492 continue;
9b7a0325 1493 }
4db35314 1494 spte = &sp->spt[page_offset / sizeof(*spte)];
489f1d65
DE
1495 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1496 gentry = 0;
1497 r = kvm_read_guest_atomic(vcpu->kvm,
1498 gpa & ~(u64)(pte_size - 1),
1499 &gentry, pte_size);
1500 new = (const void *)&gentry;
1501 if (r < 0)
1502 new = NULL;
1503 }
ac1b714e 1504 while (npte--) {
79539cec 1505 entry = *spte;
4db35314 1506 mmu_pte_write_zap_pte(vcpu, sp, spte);
489f1d65
DE
1507 if (new)
1508 mmu_pte_write_new_pte(vcpu, sp, spte, new);
79539cec 1509 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
ac1b714e 1510 ++spte;
9b7a0325 1511 }
9b7a0325 1512 }
c7addb90 1513 kvm_mmu_audit(vcpu, "post pte write");
aaee2c94 1514 spin_unlock(&vcpu->kvm->mmu_lock);
d7824fff
AK
1515 if (vcpu->arch.update_pte.page) {
1516 kvm_release_page_clean(vcpu->arch.update_pte.page);
1517 vcpu->arch.update_pte.page = NULL;
1518 }
da4a00f0
AK
1519}
1520
a436036b
AK
1521int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1522{
10589a46
MT
1523 gpa_t gpa;
1524 int r;
a436036b 1525
72dc67a6 1526 down_read(&vcpu->kvm->slots_lock);
10589a46 1527 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
72dc67a6 1528 up_read(&vcpu->kvm->slots_lock);
10589a46 1529
aaee2c94 1530 spin_lock(&vcpu->kvm->mmu_lock);
10589a46 1531 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
aaee2c94 1532 spin_unlock(&vcpu->kvm->mmu_lock);
10589a46 1533 return r;
a436036b
AK
1534}
1535
22d95b12 1536void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86 1537{
f05e70ac 1538 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
4db35314 1539 struct kvm_mmu_page *sp;
ebeace86 1540
f05e70ac 1541 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
4db35314
AK
1542 struct kvm_mmu_page, link);
1543 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1544 ++vcpu->kvm->stat.mmu_recycled;
ebeace86
AK
1545 }
1546}
ebeace86 1547
3067714c
AK
1548int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1549{
1550 int r;
1551 enum emulation_result er;
1552
ad312c7c 1553 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
3067714c
AK
1554 if (r < 0)
1555 goto out;
1556
1557 if (!r) {
1558 r = 1;
1559 goto out;
1560 }
1561
b733bfb5
AK
1562 r = mmu_topup_memory_caches(vcpu);
1563 if (r)
1564 goto out;
1565
3067714c 1566 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
3067714c
AK
1567
1568 switch (er) {
1569 case EMULATE_DONE:
1570 return 1;
1571 case EMULATE_DO_MMIO:
1572 ++vcpu->stat.mmio_exits;
1573 return 0;
1574 case EMULATE_FAIL:
1575 kvm_report_emulation_failure(vcpu, "pagetable");
1576 return 1;
1577 default:
1578 BUG();
1579 }
1580out:
3067714c
AK
1581 return r;
1582}
1583EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1584
6aa8b732
AK
1585static void free_mmu_pages(struct kvm_vcpu *vcpu)
1586{
4db35314 1587 struct kvm_mmu_page *sp;
6aa8b732 1588
f05e70ac
ZX
1589 while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1590 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
4db35314
AK
1591 struct kvm_mmu_page, link);
1592 kvm_mmu_zap_page(vcpu->kvm, sp);
f51234c2 1593 }
ad312c7c 1594 free_page((unsigned long)vcpu->arch.mmu.pae_root);
6aa8b732
AK
1595}
1596
1597static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1598{
17ac10ad 1599 struct page *page;
6aa8b732
AK
1600 int i;
1601
1602 ASSERT(vcpu);
1603
f05e70ac
ZX
1604 if (vcpu->kvm->arch.n_requested_mmu_pages)
1605 vcpu->kvm->arch.n_free_mmu_pages =
1606 vcpu->kvm->arch.n_requested_mmu_pages;
82ce2c96 1607 else
f05e70ac
ZX
1608 vcpu->kvm->arch.n_free_mmu_pages =
1609 vcpu->kvm->arch.n_alloc_mmu_pages;
17ac10ad
AK
1610 /*
1611 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1612 * Therefore we need to allocate shadow page tables in the first
1613 * 4GB of memory, which happens to fit the DMA32 zone.
1614 */
1615 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1616 if (!page)
1617 goto error_1;
ad312c7c 1618 vcpu->arch.mmu.pae_root = page_address(page);
17ac10ad 1619 for (i = 0; i < 4; ++i)
ad312c7c 1620 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1621
6aa8b732
AK
1622 return 0;
1623
1624error_1:
1625 free_mmu_pages(vcpu);
1626 return -ENOMEM;
1627}
1628
8018c27b 1629int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 1630{
6aa8b732 1631 ASSERT(vcpu);
ad312c7c 1632 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1633
8018c27b
IM
1634 return alloc_mmu_pages(vcpu);
1635}
6aa8b732 1636
8018c27b
IM
1637int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1638{
1639 ASSERT(vcpu);
ad312c7c 1640 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2c264957 1641
8018c27b 1642 return init_kvm_mmu(vcpu);
6aa8b732
AK
1643}
1644
1645void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1646{
1647 ASSERT(vcpu);
1648
1649 destroy_kvm_mmu(vcpu);
1650 free_mmu_pages(vcpu);
714b93da 1651 mmu_free_memory_caches(vcpu);
6aa8b732
AK
1652}
1653
90cb0529 1654void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732 1655{
4db35314 1656 struct kvm_mmu_page *sp;
6aa8b732 1657
f05e70ac 1658 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
6aa8b732
AK
1659 int i;
1660 u64 *pt;
1661
4db35314 1662 if (!test_bit(slot, &sp->slot_bitmap))
6aa8b732
AK
1663 continue;
1664
4db35314 1665 pt = sp->spt;
6aa8b732
AK
1666 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1667 /* avoid RMW */
9647c14c 1668 if (pt[i] & PT_WRITABLE_MASK)
6aa8b732 1669 pt[i] &= ~PT_WRITABLE_MASK;
6aa8b732
AK
1670 }
1671}
37a7d8b0 1672
90cb0529 1673void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 1674{
4db35314 1675 struct kvm_mmu_page *sp, *node;
e0fa826f 1676
aaee2c94 1677 spin_lock(&kvm->mmu_lock);
f05e70ac 1678 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
4db35314 1679 kvm_mmu_zap_page(kvm, sp);
aaee2c94 1680 spin_unlock(&kvm->mmu_lock);
e0fa826f 1681
90cb0529 1682 kvm_flush_remote_tlbs(kvm);
e0fa826f
DL
1683}
1684
b5a33a75
AK
1685void kvm_mmu_module_exit(void)
1686{
1687 if (pte_chain_cache)
1688 kmem_cache_destroy(pte_chain_cache);
1689 if (rmap_desc_cache)
1690 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
1691 if (mmu_page_header_cache)
1692 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
1693}
1694
1695int kvm_mmu_module_init(void)
1696{
1697 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1698 sizeof(struct kvm_pte_chain),
20c2df83 1699 0, 0, NULL);
b5a33a75
AK
1700 if (!pte_chain_cache)
1701 goto nomem;
1702 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1703 sizeof(struct kvm_rmap_desc),
20c2df83 1704 0, 0, NULL);
b5a33a75
AK
1705 if (!rmap_desc_cache)
1706 goto nomem;
1707
d3d25b04
AK
1708 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1709 sizeof(struct kvm_mmu_page),
20c2df83 1710 0, 0, NULL);
d3d25b04
AK
1711 if (!mmu_page_header_cache)
1712 goto nomem;
1713
b5a33a75
AK
1714 return 0;
1715
1716nomem:
1717 kvm_mmu_module_exit();
1718 return -ENOMEM;
1719}
1720
3ad82a7e
ZX
1721/*
1722 * Caculate mmu pages needed for kvm.
1723 */
1724unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1725{
1726 int i;
1727 unsigned int nr_mmu_pages;
1728 unsigned int nr_pages = 0;
1729
1730 for (i = 0; i < kvm->nmemslots; i++)
1731 nr_pages += kvm->memslots[i].npages;
1732
1733 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1734 nr_mmu_pages = max(nr_mmu_pages,
1735 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1736
1737 return nr_mmu_pages;
1738}
1739
37a7d8b0
AK
1740#ifdef AUDIT
1741
1742static const char *audit_msg;
1743
1744static gva_t canonicalize(gva_t gva)
1745{
1746#ifdef CONFIG_X86_64
1747 gva = (long long)(gva << 16) >> 16;
1748#endif
1749 return gva;
1750}
1751
1752static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1753 gva_t va, int level)
1754{
1755 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1756 int i;
1757 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1758
1759 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1760 u64 ent = pt[i];
1761
c7addb90 1762 if (ent == shadow_trap_nonpresent_pte)
37a7d8b0
AK
1763 continue;
1764
1765 va = canonicalize(va);
c7addb90
AK
1766 if (level > 1) {
1767 if (ent == shadow_notrap_nonpresent_pte)
1768 printk(KERN_ERR "audit: (%s) nontrapping pte"
1769 " in nonleaf level: levels %d gva %lx"
1770 " level %d pte %llx\n", audit_msg,
ad312c7c 1771 vcpu->arch.mmu.root_level, va, level, ent);
c7addb90 1772
37a7d8b0 1773 audit_mappings_page(vcpu, ent, va, level - 1);
c7addb90 1774 } else {
ad312c7c 1775 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
1d28f5f4
AK
1776 struct page *page = gpa_to_page(vcpu, gpa);
1777 hpa_t hpa = page_to_phys(page);
37a7d8b0 1778
c7addb90 1779 if (is_shadow_present_pte(ent)
37a7d8b0 1780 && (ent & PT64_BASE_ADDR_MASK) != hpa)
c7addb90
AK
1781 printk(KERN_ERR "xx audit error: (%s) levels %d"
1782 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
ad312c7c 1783 audit_msg, vcpu->arch.mmu.root_level,
d77c26fc
MD
1784 va, gpa, hpa, ent,
1785 is_shadow_present_pte(ent));
c7addb90
AK
1786 else if (ent == shadow_notrap_nonpresent_pte
1787 && !is_error_hpa(hpa))
1788 printk(KERN_ERR "audit: (%s) notrap shadow,"
1789 " valid guest gva %lx\n", audit_msg, va);
b4231d61 1790 kvm_release_page_clean(page);
c7addb90 1791
37a7d8b0
AK
1792 }
1793 }
1794}
1795
1796static void audit_mappings(struct kvm_vcpu *vcpu)
1797{
1ea252af 1798 unsigned i;
37a7d8b0 1799
ad312c7c
ZX
1800 if (vcpu->arch.mmu.root_level == 4)
1801 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
37a7d8b0
AK
1802 else
1803 for (i = 0; i < 4; ++i)
ad312c7c 1804 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
37a7d8b0 1805 audit_mappings_page(vcpu,
ad312c7c 1806 vcpu->arch.mmu.pae_root[i],
37a7d8b0
AK
1807 i << 30,
1808 2);
1809}
1810
1811static int count_rmaps(struct kvm_vcpu *vcpu)
1812{
1813 int nmaps = 0;
1814 int i, j, k;
1815
1816 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1817 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1818 struct kvm_rmap_desc *d;
1819
1820 for (j = 0; j < m->npages; ++j) {
290fc38d 1821 unsigned long *rmapp = &m->rmap[j];
37a7d8b0 1822
290fc38d 1823 if (!*rmapp)
37a7d8b0 1824 continue;
290fc38d 1825 if (!(*rmapp & 1)) {
37a7d8b0
AK
1826 ++nmaps;
1827 continue;
1828 }
290fc38d 1829 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
37a7d8b0
AK
1830 while (d) {
1831 for (k = 0; k < RMAP_EXT; ++k)
1832 if (d->shadow_ptes[k])
1833 ++nmaps;
1834 else
1835 break;
1836 d = d->more;
1837 }
1838 }
1839 }
1840 return nmaps;
1841}
1842
1843static int count_writable_mappings(struct kvm_vcpu *vcpu)
1844{
1845 int nmaps = 0;
4db35314 1846 struct kvm_mmu_page *sp;
37a7d8b0
AK
1847 int i;
1848
f05e70ac 1849 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 1850 u64 *pt = sp->spt;
37a7d8b0 1851
4db35314 1852 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
37a7d8b0
AK
1853 continue;
1854
1855 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1856 u64 ent = pt[i];
1857
1858 if (!(ent & PT_PRESENT_MASK))
1859 continue;
1860 if (!(ent & PT_WRITABLE_MASK))
1861 continue;
1862 ++nmaps;
1863 }
1864 }
1865 return nmaps;
1866}
1867
1868static void audit_rmap(struct kvm_vcpu *vcpu)
1869{
1870 int n_rmap = count_rmaps(vcpu);
1871 int n_actual = count_writable_mappings(vcpu);
1872
1873 if (n_rmap != n_actual)
1874 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1875 __FUNCTION__, audit_msg, n_rmap, n_actual);
1876}
1877
1878static void audit_write_protection(struct kvm_vcpu *vcpu)
1879{
4db35314 1880 struct kvm_mmu_page *sp;
290fc38d
IE
1881 struct kvm_memory_slot *slot;
1882 unsigned long *rmapp;
1883 gfn_t gfn;
37a7d8b0 1884
f05e70ac 1885 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 1886 if (sp->role.metaphysical)
37a7d8b0
AK
1887 continue;
1888
4db35314
AK
1889 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
1890 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
290fc38d
IE
1891 rmapp = &slot->rmap[gfn - slot->base_gfn];
1892 if (*rmapp)
37a7d8b0
AK
1893 printk(KERN_ERR "%s: (%s) shadow page has writable"
1894 " mappings: gfn %lx role %x\n",
4db35314
AK
1895 __FUNCTION__, audit_msg, sp->gfn,
1896 sp->role.word);
37a7d8b0
AK
1897 }
1898}
1899
1900static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1901{
1902 int olddbg = dbg;
1903
1904 dbg = 0;
1905 audit_msg = msg;
1906 audit_rmap(vcpu);
1907 audit_write_protection(vcpu);
1908 audit_mappings(vcpu);
1909 dbg = olddbg;
1910}
1911
1912#endif
This page took 0.331111 seconds and 5 git commands to generate.