KVM: VMX: Restore tss even on x86_64
[deliverable/linux.git] / arch / x86 / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
e495606d
AK
19
20#include "vmx.h"
1d737c8a 21#include "mmu.h"
e495606d 22
edf88417 23#include <linux/kvm_host.h>
6aa8b732
AK
24#include <linux/types.h>
25#include <linux/string.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/module.h>
448353ca 29#include <linux/swap.h>
6aa8b732 30
e495606d
AK
31#include <asm/page.h>
32#include <asm/cmpxchg.h>
4e542370 33#include <asm/io.h>
6aa8b732 34
37a7d8b0
AK
35#undef MMU_DEBUG
36
37#undef AUDIT
38
39#ifdef AUDIT
40static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
41#else
42static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
43#endif
44
45#ifdef MMU_DEBUG
46
47#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
48#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
49
50#else
51
52#define pgprintk(x...) do { } while (0)
53#define rmap_printk(x...) do { } while (0)
54
55#endif
56
57#if defined(MMU_DEBUG) || defined(AUDIT)
58static int dbg = 1;
59#endif
6aa8b732 60
d6c69ee9
YD
61#ifndef MMU_DEBUG
62#define ASSERT(x) do { } while (0)
63#else
6aa8b732
AK
64#define ASSERT(x) \
65 if (!(x)) { \
66 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
67 __FILE__, __LINE__, #x); \
68 }
d6c69ee9 69#endif
6aa8b732 70
cea0f0e7
AK
71#define PT64_PT_BITS 9
72#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
73#define PT32_PT_BITS 10
74#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
6aa8b732
AK
75
76#define PT_WRITABLE_SHIFT 1
77
78#define PT_PRESENT_MASK (1ULL << 0)
79#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
80#define PT_USER_MASK (1ULL << 2)
81#define PT_PWT_MASK (1ULL << 3)
82#define PT_PCD_MASK (1ULL << 4)
83#define PT_ACCESSED_MASK (1ULL << 5)
84#define PT_DIRTY_MASK (1ULL << 6)
85#define PT_PAGE_SIZE_MASK (1ULL << 7)
86#define PT_PAT_MASK (1ULL << 7)
87#define PT_GLOBAL_MASK (1ULL << 8)
fe135d2c
AK
88#define PT64_NX_SHIFT 63
89#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
6aa8b732
AK
90
91#define PT_PAT_SHIFT 7
92#define PT_DIR_PAT_SHIFT 12
93#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
94
95#define PT32_DIR_PSE36_SIZE 4
96#define PT32_DIR_PSE36_SHIFT 13
d77c26fc
MD
97#define PT32_DIR_PSE36_MASK \
98 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
6aa8b732
AK
99
100
6aa8b732
AK
101#define PT_FIRST_AVAIL_BITS_SHIFT 9
102#define PT64_SECOND_AVAIL_BITS_SHIFT 52
103
6aa8b732
AK
104#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
105
6aa8b732
AK
106#define VALID_PAGE(x) ((x) != INVALID_PAGE)
107
108#define PT64_LEVEL_BITS 9
109
110#define PT64_LEVEL_SHIFT(level) \
d77c26fc 111 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
6aa8b732
AK
112
113#define PT64_LEVEL_MASK(level) \
114 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
115
116#define PT64_INDEX(address, level)\
117 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
118
119
120#define PT32_LEVEL_BITS 10
121
122#define PT32_LEVEL_SHIFT(level) \
d77c26fc 123 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
6aa8b732
AK
124
125#define PT32_LEVEL_MASK(level) \
126 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
127
128#define PT32_INDEX(address, level)\
129 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
130
131
27aba766 132#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
133#define PT64_DIR_BASE_ADDR_MASK \
134 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
135
136#define PT32_BASE_ADDR_MASK PAGE_MASK
137#define PT32_DIR_BASE_ADDR_MASK \
138 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
139
79539cec
AK
140#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
141 | PT64_NX_MASK)
6aa8b732
AK
142
143#define PFERR_PRESENT_MASK (1U << 0)
144#define PFERR_WRITE_MASK (1U << 1)
145#define PFERR_USER_MASK (1U << 2)
73b1087e 146#define PFERR_FETCH_MASK (1U << 4)
6aa8b732
AK
147
148#define PT64_ROOT_LEVEL 4
149#define PT32_ROOT_LEVEL 2
150#define PT32E_ROOT_LEVEL 3
151
152#define PT_DIRECTORY_LEVEL 2
153#define PT_PAGE_TABLE_LEVEL 1
154
cd4a4e53
AK
155#define RMAP_EXT 4
156
fe135d2c
AK
157#define ACC_EXEC_MASK 1
158#define ACC_WRITE_MASK PT_WRITABLE_MASK
159#define ACC_USER_MASK PT_USER_MASK
160#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
161
cd4a4e53
AK
162struct kvm_rmap_desc {
163 u64 *shadow_ptes[RMAP_EXT];
164 struct kvm_rmap_desc *more;
165};
166
b5a33a75
AK
167static struct kmem_cache *pte_chain_cache;
168static struct kmem_cache *rmap_desc_cache;
d3d25b04 169static struct kmem_cache *mmu_page_header_cache;
b5a33a75 170
c7addb90
AK
171static u64 __read_mostly shadow_trap_nonpresent_pte;
172static u64 __read_mostly shadow_notrap_nonpresent_pte;
173
174void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
175{
176 shadow_trap_nonpresent_pte = trap_pte;
177 shadow_notrap_nonpresent_pte = notrap_pte;
178}
179EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
180
6aa8b732
AK
181static int is_write_protection(struct kvm_vcpu *vcpu)
182{
ad312c7c 183 return vcpu->arch.cr0 & X86_CR0_WP;
6aa8b732
AK
184}
185
186static int is_cpuid_PSE36(void)
187{
188 return 1;
189}
190
73b1087e
AK
191static int is_nx(struct kvm_vcpu *vcpu)
192{
ad312c7c 193 return vcpu->arch.shadow_efer & EFER_NX;
73b1087e
AK
194}
195
6aa8b732
AK
196static int is_present_pte(unsigned long pte)
197{
198 return pte & PT_PRESENT_MASK;
199}
200
c7addb90
AK
201static int is_shadow_present_pte(u64 pte)
202{
203 pte &= ~PT_SHADOW_IO_MARK;
204 return pte != shadow_trap_nonpresent_pte
205 && pte != shadow_notrap_nonpresent_pte;
206}
207
6aa8b732
AK
208static int is_writeble_pte(unsigned long pte)
209{
210 return pte & PT_WRITABLE_MASK;
211}
212
e3c5e7ec
AK
213static int is_dirty_pte(unsigned long pte)
214{
215 return pte & PT_DIRTY_MASK;
216}
217
6aa8b732
AK
218static int is_io_pte(unsigned long pte)
219{
220 return pte & PT_SHADOW_IO_MARK;
221}
222
cd4a4e53
AK
223static int is_rmap_pte(u64 pte)
224{
9647c14c
IE
225 return pte != shadow_trap_nonpresent_pte
226 && pte != shadow_notrap_nonpresent_pte;
cd4a4e53
AK
227}
228
da928521
AK
229static gfn_t pse36_gfn_delta(u32 gpte)
230{
231 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
232
233 return (gpte & PT32_DIR_PSE36_MASK) << shift;
234}
235
e663ee64
AK
236static void set_shadow_pte(u64 *sptep, u64 spte)
237{
238#ifdef CONFIG_X86_64
239 set_64bit((unsigned long *)sptep, spte);
240#else
241 set_64bit((unsigned long long *)sptep, spte);
242#endif
243}
244
e2dec939 245static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
2e3e5882 246 struct kmem_cache *base_cache, int min)
714b93da
AK
247{
248 void *obj;
249
250 if (cache->nobjs >= min)
e2dec939 251 return 0;
714b93da 252 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 253 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
714b93da 254 if (!obj)
e2dec939 255 return -ENOMEM;
714b93da
AK
256 cache->objects[cache->nobjs++] = obj;
257 }
e2dec939 258 return 0;
714b93da
AK
259}
260
261static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
262{
263 while (mc->nobjs)
264 kfree(mc->objects[--mc->nobjs]);
265}
266
c1158e63 267static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
2e3e5882 268 int min)
c1158e63
AK
269{
270 struct page *page;
271
272 if (cache->nobjs >= min)
273 return 0;
274 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 275 page = alloc_page(GFP_KERNEL);
c1158e63
AK
276 if (!page)
277 return -ENOMEM;
278 set_page_private(page, 0);
279 cache->objects[cache->nobjs++] = page_address(page);
280 }
281 return 0;
282}
283
284static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
285{
286 while (mc->nobjs)
c4d198d5 287 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
288}
289
2e3e5882 290static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
714b93da 291{
e2dec939
AK
292 int r;
293
ad312c7c 294 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
2e3e5882 295 pte_chain_cache, 4);
e2dec939
AK
296 if (r)
297 goto out;
ad312c7c 298 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
2e3e5882 299 rmap_desc_cache, 1);
d3d25b04
AK
300 if (r)
301 goto out;
ad312c7c 302 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
d3d25b04
AK
303 if (r)
304 goto out;
ad312c7c 305 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
2e3e5882 306 mmu_page_header_cache, 4);
e2dec939
AK
307out:
308 return r;
714b93da
AK
309}
310
311static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
312{
ad312c7c
ZX
313 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
314 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
315 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
316 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
714b93da
AK
317}
318
319static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
320 size_t size)
321{
322 void *p;
323
324 BUG_ON(!mc->nobjs);
325 p = mc->objects[--mc->nobjs];
326 memset(p, 0, size);
327 return p;
328}
329
714b93da
AK
330static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
331{
ad312c7c 332 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
714b93da
AK
333 sizeof(struct kvm_pte_chain));
334}
335
90cb0529 336static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 337{
90cb0529 338 kfree(pc);
714b93da
AK
339}
340
341static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
342{
ad312c7c 343 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
714b93da
AK
344 sizeof(struct kvm_rmap_desc));
345}
346
90cb0529 347static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 348{
90cb0529 349 kfree(rd);
714b93da
AK
350}
351
290fc38d
IE
352/*
353 * Take gfn and return the reverse mapping to it.
354 * Note: gfn must be unaliased before this function get called
355 */
356
357static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
358{
359 struct kvm_memory_slot *slot;
360
361 slot = gfn_to_memslot(kvm, gfn);
362 return &slot->rmap[gfn - slot->base_gfn];
363}
364
cd4a4e53
AK
365/*
366 * Reverse mapping data structures:
367 *
290fc38d
IE
368 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
369 * that points to page_address(page).
cd4a4e53 370 *
290fc38d
IE
371 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
372 * containing more mappings.
cd4a4e53 373 */
290fc38d 374static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
cd4a4e53 375{
4db35314 376 struct kvm_mmu_page *sp;
cd4a4e53 377 struct kvm_rmap_desc *desc;
290fc38d 378 unsigned long *rmapp;
cd4a4e53
AK
379 int i;
380
381 if (!is_rmap_pte(*spte))
382 return;
290fc38d 383 gfn = unalias_gfn(vcpu->kvm, gfn);
4db35314
AK
384 sp = page_header(__pa(spte));
385 sp->gfns[spte - sp->spt] = gfn;
290fc38d
IE
386 rmapp = gfn_to_rmap(vcpu->kvm, gfn);
387 if (!*rmapp) {
cd4a4e53 388 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
290fc38d
IE
389 *rmapp = (unsigned long)spte;
390 } else if (!(*rmapp & 1)) {
cd4a4e53 391 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 392 desc = mmu_alloc_rmap_desc(vcpu);
290fc38d 393 desc->shadow_ptes[0] = (u64 *)*rmapp;
cd4a4e53 394 desc->shadow_ptes[1] = spte;
290fc38d 395 *rmapp = (unsigned long)desc | 1;
cd4a4e53
AK
396 } else {
397 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
290fc38d 398 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
399 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
400 desc = desc->more;
401 if (desc->shadow_ptes[RMAP_EXT-1]) {
714b93da 402 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
403 desc = desc->more;
404 }
405 for (i = 0; desc->shadow_ptes[i]; ++i)
406 ;
407 desc->shadow_ptes[i] = spte;
408 }
409}
410
290fc38d 411static void rmap_desc_remove_entry(unsigned long *rmapp,
cd4a4e53
AK
412 struct kvm_rmap_desc *desc,
413 int i,
414 struct kvm_rmap_desc *prev_desc)
415{
416 int j;
417
418 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
419 ;
420 desc->shadow_ptes[i] = desc->shadow_ptes[j];
11718b4d 421 desc->shadow_ptes[j] = NULL;
cd4a4e53
AK
422 if (j != 0)
423 return;
424 if (!prev_desc && !desc->more)
290fc38d 425 *rmapp = (unsigned long)desc->shadow_ptes[0];
cd4a4e53
AK
426 else
427 if (prev_desc)
428 prev_desc->more = desc->more;
429 else
290fc38d 430 *rmapp = (unsigned long)desc->more | 1;
90cb0529 431 mmu_free_rmap_desc(desc);
cd4a4e53
AK
432}
433
290fc38d 434static void rmap_remove(struct kvm *kvm, u64 *spte)
cd4a4e53 435{
cd4a4e53
AK
436 struct kvm_rmap_desc *desc;
437 struct kvm_rmap_desc *prev_desc;
4db35314 438 struct kvm_mmu_page *sp;
76c35c6e 439 struct page *page;
290fc38d 440 unsigned long *rmapp;
cd4a4e53
AK
441 int i;
442
443 if (!is_rmap_pte(*spte))
444 return;
4db35314 445 sp = page_header(__pa(spte));
76c35c6e 446 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
448353ca 447 mark_page_accessed(page);
b4231d61 448 if (is_writeble_pte(*spte))
76c35c6e 449 kvm_release_page_dirty(page);
b4231d61 450 else
76c35c6e 451 kvm_release_page_clean(page);
4db35314 452 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
290fc38d 453 if (!*rmapp) {
cd4a4e53
AK
454 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
455 BUG();
290fc38d 456 } else if (!(*rmapp & 1)) {
cd4a4e53 457 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
290fc38d 458 if ((u64 *)*rmapp != spte) {
cd4a4e53
AK
459 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
460 spte, *spte);
461 BUG();
462 }
290fc38d 463 *rmapp = 0;
cd4a4e53
AK
464 } else {
465 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
290fc38d 466 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
467 prev_desc = NULL;
468 while (desc) {
469 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
470 if (desc->shadow_ptes[i] == spte) {
290fc38d 471 rmap_desc_remove_entry(rmapp,
714b93da 472 desc, i,
cd4a4e53
AK
473 prev_desc);
474 return;
475 }
476 prev_desc = desc;
477 desc = desc->more;
478 }
479 BUG();
480 }
481}
482
98348e95 483static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
374cbac0 484{
374cbac0 485 struct kvm_rmap_desc *desc;
98348e95
IE
486 struct kvm_rmap_desc *prev_desc;
487 u64 *prev_spte;
488 int i;
489
490 if (!*rmapp)
491 return NULL;
492 else if (!(*rmapp & 1)) {
493 if (!spte)
494 return (u64 *)*rmapp;
495 return NULL;
496 }
497 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
498 prev_desc = NULL;
499 prev_spte = NULL;
500 while (desc) {
501 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
502 if (prev_spte == spte)
503 return desc->shadow_ptes[i];
504 prev_spte = desc->shadow_ptes[i];
505 }
506 desc = desc->more;
507 }
508 return NULL;
509}
510
511static void rmap_write_protect(struct kvm *kvm, u64 gfn)
512{
290fc38d 513 unsigned long *rmapp;
374cbac0 514 u64 *spte;
caa5b8a5 515 int write_protected = 0;
374cbac0 516
4a4c9924
AL
517 gfn = unalias_gfn(kvm, gfn);
518 rmapp = gfn_to_rmap(kvm, gfn);
374cbac0 519
98348e95
IE
520 spte = rmap_next(kvm, rmapp, NULL);
521 while (spte) {
374cbac0 522 BUG_ON(!spte);
374cbac0 523 BUG_ON(!(*spte & PT_PRESENT_MASK));
374cbac0 524 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
caa5b8a5 525 if (is_writeble_pte(*spte)) {
9647c14c 526 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
caa5b8a5
ED
527 write_protected = 1;
528 }
9647c14c 529 spte = rmap_next(kvm, rmapp, spte);
374cbac0 530 }
caa5b8a5
ED
531 if (write_protected)
532 kvm_flush_remote_tlbs(kvm);
374cbac0
AK
533}
534
d6c69ee9 535#ifdef MMU_DEBUG
47ad8e68 536static int is_empty_shadow_page(u64 *spt)
6aa8b732 537{
139bdb2d
AK
538 u64 *pos;
539 u64 *end;
540
47ad8e68 541 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
c7addb90 542 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
139bdb2d
AK
543 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
544 pos, *pos);
6aa8b732 545 return 0;
139bdb2d 546 }
6aa8b732
AK
547 return 1;
548}
d6c69ee9 549#endif
6aa8b732 550
4db35314 551static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
260746c0 552{
4db35314
AK
553 ASSERT(is_empty_shadow_page(sp->spt));
554 list_del(&sp->link);
555 __free_page(virt_to_page(sp->spt));
556 __free_page(virt_to_page(sp->gfns));
557 kfree(sp);
f05e70ac 558 ++kvm->arch.n_free_mmu_pages;
260746c0
AK
559}
560
cea0f0e7
AK
561static unsigned kvm_page_table_hashfn(gfn_t gfn)
562{
563 return gfn;
564}
565
25c0de2c
AK
566static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
567 u64 *parent_pte)
6aa8b732 568{
4db35314 569 struct kvm_mmu_page *sp;
6aa8b732 570
ad312c7c
ZX
571 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
572 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
573 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
4db35314 574 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
f05e70ac 575 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
4db35314
AK
576 ASSERT(is_empty_shadow_page(sp->spt));
577 sp->slot_bitmap = 0;
578 sp->multimapped = 0;
579 sp->parent_pte = parent_pte;
f05e70ac 580 --vcpu->kvm->arch.n_free_mmu_pages;
4db35314 581 return sp;
6aa8b732
AK
582}
583
714b93da 584static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
4db35314 585 struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7
AK
586{
587 struct kvm_pte_chain *pte_chain;
588 struct hlist_node *node;
589 int i;
590
591 if (!parent_pte)
592 return;
4db35314
AK
593 if (!sp->multimapped) {
594 u64 *old = sp->parent_pte;
cea0f0e7
AK
595
596 if (!old) {
4db35314 597 sp->parent_pte = parent_pte;
cea0f0e7
AK
598 return;
599 }
4db35314 600 sp->multimapped = 1;
714b93da 601 pte_chain = mmu_alloc_pte_chain(vcpu);
4db35314
AK
602 INIT_HLIST_HEAD(&sp->parent_ptes);
603 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
604 pte_chain->parent_ptes[0] = old;
605 }
4db35314 606 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
cea0f0e7
AK
607 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
608 continue;
609 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
610 if (!pte_chain->parent_ptes[i]) {
611 pte_chain->parent_ptes[i] = parent_pte;
612 return;
613 }
614 }
714b93da 615 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7 616 BUG_ON(!pte_chain);
4db35314 617 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
618 pte_chain->parent_ptes[0] = parent_pte;
619}
620
4db35314 621static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
cea0f0e7
AK
622 u64 *parent_pte)
623{
624 struct kvm_pte_chain *pte_chain;
625 struct hlist_node *node;
626 int i;
627
4db35314
AK
628 if (!sp->multimapped) {
629 BUG_ON(sp->parent_pte != parent_pte);
630 sp->parent_pte = NULL;
cea0f0e7
AK
631 return;
632 }
4db35314 633 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
cea0f0e7
AK
634 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
635 if (!pte_chain->parent_ptes[i])
636 break;
637 if (pte_chain->parent_ptes[i] != parent_pte)
638 continue;
697fe2e2
AK
639 while (i + 1 < NR_PTE_CHAIN_ENTRIES
640 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
641 pte_chain->parent_ptes[i]
642 = pte_chain->parent_ptes[i + 1];
643 ++i;
644 }
645 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
646 if (i == 0) {
647 hlist_del(&pte_chain->link);
90cb0529 648 mmu_free_pte_chain(pte_chain);
4db35314
AK
649 if (hlist_empty(&sp->parent_ptes)) {
650 sp->multimapped = 0;
651 sp->parent_pte = NULL;
697fe2e2
AK
652 }
653 }
cea0f0e7
AK
654 return;
655 }
656 BUG();
657}
658
4db35314 659static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
cea0f0e7
AK
660{
661 unsigned index;
662 struct hlist_head *bucket;
4db35314 663 struct kvm_mmu_page *sp;
cea0f0e7
AK
664 struct hlist_node *node;
665
666 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
667 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
f05e70ac 668 bucket = &kvm->arch.mmu_page_hash[index];
4db35314
AK
669 hlist_for_each_entry(sp, node, bucket, hash_link)
670 if (sp->gfn == gfn && !sp->role.metaphysical) {
cea0f0e7 671 pgprintk("%s: found role %x\n",
4db35314
AK
672 __FUNCTION__, sp->role.word);
673 return sp;
cea0f0e7
AK
674 }
675 return NULL;
676}
677
678static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
679 gfn_t gfn,
680 gva_t gaddr,
681 unsigned level,
682 int metaphysical,
41074d07 683 unsigned access,
f7d9c7b7 684 u64 *parent_pte)
cea0f0e7
AK
685{
686 union kvm_mmu_page_role role;
687 unsigned index;
688 unsigned quadrant;
689 struct hlist_head *bucket;
4db35314 690 struct kvm_mmu_page *sp;
cea0f0e7
AK
691 struct hlist_node *node;
692
693 role.word = 0;
ad312c7c 694 role.glevels = vcpu->arch.mmu.root_level;
cea0f0e7
AK
695 role.level = level;
696 role.metaphysical = metaphysical;
41074d07 697 role.access = access;
ad312c7c 698 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
cea0f0e7
AK
699 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
700 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
701 role.quadrant = quadrant;
702 }
703 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
704 gfn, role.word);
705 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
f05e70ac 706 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
707 hlist_for_each_entry(sp, node, bucket, hash_link)
708 if (sp->gfn == gfn && sp->role.word == role.word) {
709 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
cea0f0e7 710 pgprintk("%s: found\n", __FUNCTION__);
4db35314 711 return sp;
cea0f0e7 712 }
dfc5aa00 713 ++vcpu->kvm->stat.mmu_cache_miss;
4db35314
AK
714 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
715 if (!sp)
716 return sp;
cea0f0e7 717 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
4db35314
AK
718 sp->gfn = gfn;
719 sp->role = role;
720 hlist_add_head(&sp->hash_link, bucket);
ad312c7c 721 vcpu->arch.mmu.prefetch_page(vcpu, sp);
374cbac0 722 if (!metaphysical)
4a4c9924 723 rmap_write_protect(vcpu->kvm, gfn);
4db35314 724 return sp;
cea0f0e7
AK
725}
726
90cb0529 727static void kvm_mmu_page_unlink_children(struct kvm *kvm,
4db35314 728 struct kvm_mmu_page *sp)
a436036b 729{
697fe2e2
AK
730 unsigned i;
731 u64 *pt;
732 u64 ent;
733
4db35314 734 pt = sp->spt;
697fe2e2 735
4db35314 736 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
697fe2e2 737 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
c7addb90 738 if (is_shadow_present_pte(pt[i]))
290fc38d 739 rmap_remove(kvm, &pt[i]);
c7addb90 740 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 741 }
90cb0529 742 kvm_flush_remote_tlbs(kvm);
697fe2e2
AK
743 return;
744 }
745
746 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
747 ent = pt[i];
748
c7addb90
AK
749 pt[i] = shadow_trap_nonpresent_pte;
750 if (!is_shadow_present_pte(ent))
697fe2e2
AK
751 continue;
752 ent &= PT64_BASE_ADDR_MASK;
90cb0529 753 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
697fe2e2 754 }
90cb0529 755 kvm_flush_remote_tlbs(kvm);
a436036b
AK
756}
757
4db35314 758static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7 759{
4db35314 760 mmu_page_remove_parent_pte(sp, parent_pte);
a436036b
AK
761}
762
12b7d28f
AK
763static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
764{
765 int i;
766
767 for (i = 0; i < KVM_MAX_VCPUS; ++i)
768 if (kvm->vcpus[i])
ad312c7c 769 kvm->vcpus[i]->arch.last_pte_updated = NULL;
12b7d28f
AK
770}
771
4db35314 772static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
a436036b
AK
773{
774 u64 *parent_pte;
775
4cee5764 776 ++kvm->stat.mmu_shadow_zapped;
4db35314
AK
777 while (sp->multimapped || sp->parent_pte) {
778 if (!sp->multimapped)
779 parent_pte = sp->parent_pte;
a436036b
AK
780 else {
781 struct kvm_pte_chain *chain;
782
4db35314 783 chain = container_of(sp->parent_ptes.first,
a436036b
AK
784 struct kvm_pte_chain, link);
785 parent_pte = chain->parent_ptes[0];
786 }
697fe2e2 787 BUG_ON(!parent_pte);
4db35314 788 kvm_mmu_put_page(sp, parent_pte);
c7addb90 789 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
a436036b 790 }
4db35314
AK
791 kvm_mmu_page_unlink_children(kvm, sp);
792 if (!sp->root_count) {
793 hlist_del(&sp->hash_link);
794 kvm_mmu_free_page(kvm, sp);
36868f7b 795 } else
f05e70ac 796 list_move(&sp->link, &kvm->arch.active_mmu_pages);
12b7d28f 797 kvm_mmu_reset_last_pte_updated(kvm);
a436036b
AK
798}
799
82ce2c96
IE
800/*
801 * Changing the number of mmu pages allocated to the vm
802 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
803 */
804void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
805{
806 /*
807 * If we set the number of mmu pages to be smaller be than the
808 * number of actived pages , we must to free some mmu pages before we
809 * change the value
810 */
811
f05e70ac 812 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
82ce2c96 813 kvm_nr_mmu_pages) {
f05e70ac
ZX
814 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
815 - kvm->arch.n_free_mmu_pages;
82ce2c96
IE
816
817 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
818 struct kvm_mmu_page *page;
819
f05e70ac 820 page = container_of(kvm->arch.active_mmu_pages.prev,
82ce2c96
IE
821 struct kvm_mmu_page, link);
822 kvm_mmu_zap_page(kvm, page);
823 n_used_mmu_pages--;
824 }
f05e70ac 825 kvm->arch.n_free_mmu_pages = 0;
82ce2c96
IE
826 }
827 else
f05e70ac
ZX
828 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
829 - kvm->arch.n_alloc_mmu_pages;
82ce2c96 830
f05e70ac 831 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
82ce2c96
IE
832}
833
f67a46f4 834static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
a436036b
AK
835{
836 unsigned index;
837 struct hlist_head *bucket;
4db35314 838 struct kvm_mmu_page *sp;
a436036b
AK
839 struct hlist_node *node, *n;
840 int r;
841
842 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
843 r = 0;
844 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
f05e70ac 845 bucket = &kvm->arch.mmu_page_hash[index];
4db35314
AK
846 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
847 if (sp->gfn == gfn && !sp->role.metaphysical) {
697fe2e2 848 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
4db35314
AK
849 sp->role.word);
850 kvm_mmu_zap_page(kvm, sp);
a436036b
AK
851 r = 1;
852 }
853 return r;
cea0f0e7
AK
854}
855
f67a46f4 856static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
97a0a01e 857{
4db35314 858 struct kvm_mmu_page *sp;
97a0a01e 859
4db35314
AK
860 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
861 pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
862 kvm_mmu_zap_page(kvm, sp);
97a0a01e
AK
863 }
864}
865
38c335f1 866static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
6aa8b732 867{
38c335f1 868 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
4db35314 869 struct kvm_mmu_page *sp = page_header(__pa(pte));
6aa8b732 870
4db35314 871 __set_bit(slot, &sp->slot_bitmap);
6aa8b732
AK
872}
873
039576c0
AK
874struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
875{
72dc67a6
IE
876 struct page *page;
877
ad312c7c 878 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
039576c0
AK
879
880 if (gpa == UNMAPPED_GVA)
881 return NULL;
72dc67a6
IE
882
883 down_read(&current->mm->mmap_sem);
884 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
885 up_read(&current->mm->mmap_sem);
886
887 return page;
039576c0
AK
888}
889
1c4f1fd6
AK
890static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
891 unsigned pt_access, unsigned pte_access,
892 int user_fault, int write_fault, int dirty,
d7824fff 893 int *ptwrite, gfn_t gfn, struct page *page)
1c4f1fd6
AK
894{
895 u64 spte;
896 int was_rmapped = is_rmap_pte(*shadow_pte);
75e68e60 897 int was_writeble = is_writeble_pte(*shadow_pte);
1c4f1fd6 898
bc750ba8 899 pgprintk("%s: spte %llx access %x write_fault %d"
1c4f1fd6 900 " user_fault %d gfn %lx\n",
bc750ba8 901 __FUNCTION__, *shadow_pte, pt_access,
1c4f1fd6
AK
902 write_fault, user_fault, gfn);
903
904 /*
905 * We don't set the accessed bit, since we sometimes want to see
906 * whether the guest actually used the pte (in order to detect
907 * demand paging).
908 */
909 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
910 if (!dirty)
911 pte_access &= ~ACC_WRITE_MASK;
912 if (!(pte_access & ACC_EXEC_MASK))
913 spte |= PT64_NX_MASK;
914
1c4f1fd6
AK
915 spte |= PT_PRESENT_MASK;
916 if (pte_access & ACC_USER_MASK)
917 spte |= PT_USER_MASK;
918
919 if (is_error_page(page)) {
920 set_shadow_pte(shadow_pte,
921 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
922 kvm_release_page_clean(page);
923 return;
924 }
925
926 spte |= page_to_phys(page);
927
928 if ((pte_access & ACC_WRITE_MASK)
929 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
930 struct kvm_mmu_page *shadow;
931
932 spte |= PT_WRITABLE_MASK;
933 if (user_fault) {
934 mmu_unshadow(vcpu->kvm, gfn);
935 goto unshadowed;
936 }
937
938 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
939 if (shadow) {
940 pgprintk("%s: found shadow page for %lx, marking ro\n",
941 __FUNCTION__, gfn);
942 pte_access &= ~ACC_WRITE_MASK;
943 if (is_writeble_pte(spte)) {
944 spte &= ~PT_WRITABLE_MASK;
945 kvm_x86_ops->tlb_flush(vcpu);
946 }
947 if (write_fault)
948 *ptwrite = 1;
949 }
950 }
951
952unshadowed:
953
954 if (pte_access & ACC_WRITE_MASK)
955 mark_page_dirty(vcpu->kvm, gfn);
956
957 pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
958 set_shadow_pte(shadow_pte, spte);
959 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
960 if (!was_rmapped) {
961 rmap_add(vcpu, shadow_pte, gfn);
962 if (!is_rmap_pte(*shadow_pte))
963 kvm_release_page_clean(page);
75e68e60
IE
964 } else {
965 if (was_writeble)
966 kvm_release_page_dirty(page);
967 else
968 kvm_release_page_clean(page);
1c4f1fd6 969 }
1c4f1fd6 970 if (!ptwrite || !*ptwrite)
ad312c7c 971 vcpu->arch.last_pte_updated = shadow_pte;
1c4f1fd6
AK
972}
973
6aa8b732
AK
974static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
975{
976}
977
aaee2c94
MT
978static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
979 gfn_t gfn, struct page *page)
6aa8b732
AK
980{
981 int level = PT32E_ROOT_LEVEL;
ad312c7c 982 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
e833240f 983 int pt_write = 0;
6aa8b732
AK
984
985 for (; ; level--) {
986 u32 index = PT64_INDEX(v, level);
987 u64 *table;
988
989 ASSERT(VALID_PAGE(table_addr));
990 table = __va(table_addr);
991
992 if (level == 1) {
e833240f 993 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
d7824fff 994 0, write, 1, &pt_write, gfn, page);
e833240f 995 return pt_write || is_io_pte(table[index]);
6aa8b732
AK
996 }
997
c7addb90 998 if (table[index] == shadow_trap_nonpresent_pte) {
25c0de2c 999 struct kvm_mmu_page *new_table;
cea0f0e7 1000 gfn_t pseudo_gfn;
6aa8b732 1001
cea0f0e7
AK
1002 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
1003 >> PAGE_SHIFT;
1004 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1005 v, level - 1,
f7d9c7b7 1006 1, ACC_ALL, &table[index]);
25c0de2c 1007 if (!new_table) {
6aa8b732 1008 pgprintk("nonpaging_map: ENOMEM\n");
d7824fff 1009 kvm_release_page_clean(page);
6aa8b732
AK
1010 return -ENOMEM;
1011 }
1012
47ad8e68 1013 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
25c0de2c 1014 | PT_WRITABLE_MASK | PT_USER_MASK;
6aa8b732
AK
1015 }
1016 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1017 }
1018}
1019
10589a46
MT
1020static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1021{
1022 int r;
1023
aaee2c94
MT
1024 struct page *page;
1025
72dc67a6
IE
1026 down_read(&vcpu->kvm->slots_lock);
1027
aaee2c94
MT
1028 down_read(&current->mm->mmap_sem);
1029 page = gfn_to_page(vcpu->kvm, gfn);
72dc67a6 1030 up_read(&current->mm->mmap_sem);
aaee2c94
MT
1031
1032 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1033 kvm_mmu_free_some_pages(vcpu);
aaee2c94
MT
1034 r = __nonpaging_map(vcpu, v, write, gfn, page);
1035 spin_unlock(&vcpu->kvm->mmu_lock);
1036
72dc67a6 1037 up_read(&vcpu->kvm->slots_lock);
aaee2c94 1038
10589a46
MT
1039 return r;
1040}
1041
1042
c7addb90
AK
1043static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1044 struct kvm_mmu_page *sp)
1045{
1046 int i;
1047
1048 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1049 sp->spt[i] = shadow_trap_nonpresent_pte;
1050}
1051
17ac10ad
AK
1052static void mmu_free_roots(struct kvm_vcpu *vcpu)
1053{
1054 int i;
4db35314 1055 struct kvm_mmu_page *sp;
17ac10ad 1056
ad312c7c 1057 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
7b53aa56 1058 return;
aaee2c94 1059 spin_lock(&vcpu->kvm->mmu_lock);
17ac10ad 1060#ifdef CONFIG_X86_64
ad312c7c
ZX
1061 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1062 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad 1063
4db35314
AK
1064 sp = page_header(root);
1065 --sp->root_count;
ad312c7c 1066 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
aaee2c94 1067 spin_unlock(&vcpu->kvm->mmu_lock);
17ac10ad
AK
1068 return;
1069 }
1070#endif
1071 for (i = 0; i < 4; ++i) {
ad312c7c 1072 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad 1073
417726a3 1074 if (root) {
417726a3 1075 root &= PT64_BASE_ADDR_MASK;
4db35314
AK
1076 sp = page_header(root);
1077 --sp->root_count;
417726a3 1078 }
ad312c7c 1079 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1080 }
aaee2c94 1081 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1082 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
17ac10ad
AK
1083}
1084
1085static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1086{
1087 int i;
cea0f0e7 1088 gfn_t root_gfn;
4db35314 1089 struct kvm_mmu_page *sp;
3bb65a22 1090
ad312c7c 1091 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
17ac10ad
AK
1092
1093#ifdef CONFIG_X86_64
ad312c7c
ZX
1094 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1095 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad
AK
1096
1097 ASSERT(!VALID_PAGE(root));
4db35314 1098 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
f7d9c7b7 1099 PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
4db35314
AK
1100 root = __pa(sp->spt);
1101 ++sp->root_count;
ad312c7c 1102 vcpu->arch.mmu.root_hpa = root;
17ac10ad
AK
1103 return;
1104 }
1105#endif
1106 for (i = 0; i < 4; ++i) {
ad312c7c 1107 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad
AK
1108
1109 ASSERT(!VALID_PAGE(root));
ad312c7c
ZX
1110 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1111 if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1112 vcpu->arch.mmu.pae_root[i] = 0;
417726a3
AK
1113 continue;
1114 }
ad312c7c
ZX
1115 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1116 } else if (vcpu->arch.mmu.root_level == 0)
cea0f0e7 1117 root_gfn = 0;
4db35314
AK
1118 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1119 PT32_ROOT_LEVEL, !is_paging(vcpu),
f7d9c7b7 1120 ACC_ALL, NULL);
4db35314
AK
1121 root = __pa(sp->spt);
1122 ++sp->root_count;
ad312c7c 1123 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
17ac10ad 1124 }
ad312c7c 1125 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
17ac10ad
AK
1126}
1127
6aa8b732
AK
1128static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1129{
1130 return vaddr;
1131}
1132
1133static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3f3e7124 1134 u32 error_code)
6aa8b732 1135{
e833240f 1136 gfn_t gfn;
e2dec939 1137 int r;
6aa8b732 1138
e833240f 1139 pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
e2dec939
AK
1140 r = mmu_topup_memory_caches(vcpu);
1141 if (r)
1142 return r;
714b93da 1143
6aa8b732 1144 ASSERT(vcpu);
ad312c7c 1145 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1146
e833240f 1147 gfn = gva >> PAGE_SHIFT;
6aa8b732 1148
e833240f
AK
1149 return nonpaging_map(vcpu, gva & PAGE_MASK,
1150 error_code & PFERR_WRITE_MASK, gfn);
6aa8b732
AK
1151}
1152
6aa8b732
AK
1153static void nonpaging_free(struct kvm_vcpu *vcpu)
1154{
17ac10ad 1155 mmu_free_roots(vcpu);
6aa8b732
AK
1156}
1157
1158static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1159{
ad312c7c 1160 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1161
1162 context->new_cr3 = nonpaging_new_cr3;
1163 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
1164 context->gva_to_gpa = nonpaging_gva_to_gpa;
1165 context->free = nonpaging_free;
c7addb90 1166 context->prefetch_page = nonpaging_prefetch_page;
cea0f0e7 1167 context->root_level = 0;
6aa8b732 1168 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1169 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1170 return 0;
1171}
1172
d835dfec 1173void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
6aa8b732 1174{
1165f5fe 1175 ++vcpu->stat.tlb_flush;
cbdd1bea 1176 kvm_x86_ops->tlb_flush(vcpu);
6aa8b732
AK
1177}
1178
1179static void paging_new_cr3(struct kvm_vcpu *vcpu)
1180{
24993d53 1181 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
cea0f0e7 1182 mmu_free_roots(vcpu);
6aa8b732
AK
1183}
1184
6aa8b732
AK
1185static void inject_page_fault(struct kvm_vcpu *vcpu,
1186 u64 addr,
1187 u32 err_code)
1188{
c3c91fee 1189 kvm_inject_page_fault(vcpu, addr, err_code);
6aa8b732
AK
1190}
1191
6aa8b732
AK
1192static void paging_free(struct kvm_vcpu *vcpu)
1193{
1194 nonpaging_free(vcpu);
1195}
1196
1197#define PTTYPE 64
1198#include "paging_tmpl.h"
1199#undef PTTYPE
1200
1201#define PTTYPE 32
1202#include "paging_tmpl.h"
1203#undef PTTYPE
1204
17ac10ad 1205static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732 1206{
ad312c7c 1207 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1208
1209 ASSERT(is_pae(vcpu));
1210 context->new_cr3 = paging_new_cr3;
1211 context->page_fault = paging64_page_fault;
6aa8b732 1212 context->gva_to_gpa = paging64_gva_to_gpa;
c7addb90 1213 context->prefetch_page = paging64_prefetch_page;
6aa8b732 1214 context->free = paging_free;
17ac10ad
AK
1215 context->root_level = level;
1216 context->shadow_root_level = level;
17c3ba9d 1217 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1218 return 0;
1219}
1220
17ac10ad
AK
1221static int paging64_init_context(struct kvm_vcpu *vcpu)
1222{
1223 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1224}
1225
6aa8b732
AK
1226static int paging32_init_context(struct kvm_vcpu *vcpu)
1227{
ad312c7c 1228 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1229
1230 context->new_cr3 = paging_new_cr3;
1231 context->page_fault = paging32_page_fault;
6aa8b732
AK
1232 context->gva_to_gpa = paging32_gva_to_gpa;
1233 context->free = paging_free;
c7addb90 1234 context->prefetch_page = paging32_prefetch_page;
6aa8b732
AK
1235 context->root_level = PT32_ROOT_LEVEL;
1236 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1237 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1238 return 0;
1239}
1240
1241static int paging32E_init_context(struct kvm_vcpu *vcpu)
1242{
17ac10ad 1243 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
1244}
1245
1246static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1247{
1248 ASSERT(vcpu);
ad312c7c 1249 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732
AK
1250
1251 if (!is_paging(vcpu))
1252 return nonpaging_init_context(vcpu);
a9058ecd 1253 else if (is_long_mode(vcpu))
6aa8b732
AK
1254 return paging64_init_context(vcpu);
1255 else if (is_pae(vcpu))
1256 return paging32E_init_context(vcpu);
1257 else
1258 return paging32_init_context(vcpu);
1259}
1260
1261static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1262{
1263 ASSERT(vcpu);
ad312c7c
ZX
1264 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1265 vcpu->arch.mmu.free(vcpu);
1266 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
6aa8b732
AK
1267 }
1268}
1269
1270int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
1271{
1272 destroy_kvm_mmu(vcpu);
1273 return init_kvm_mmu(vcpu);
1274}
8668a3c4 1275EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
17c3ba9d
AK
1276
1277int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 1278{
714b93da
AK
1279 int r;
1280
e2dec939 1281 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
1282 if (r)
1283 goto out;
aaee2c94 1284 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1285 kvm_mmu_free_some_pages(vcpu);
17c3ba9d 1286 mmu_alloc_roots(vcpu);
aaee2c94 1287 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1288 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
17c3ba9d 1289 kvm_mmu_flush_tlb(vcpu);
714b93da
AK
1290out:
1291 return r;
6aa8b732 1292}
17c3ba9d
AK
1293EXPORT_SYMBOL_GPL(kvm_mmu_load);
1294
1295void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1296{
1297 mmu_free_roots(vcpu);
1298}
6aa8b732 1299
09072daf 1300static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
4db35314 1301 struct kvm_mmu_page *sp,
ac1b714e
AK
1302 u64 *spte)
1303{
1304 u64 pte;
1305 struct kvm_mmu_page *child;
1306
1307 pte = *spte;
c7addb90 1308 if (is_shadow_present_pte(pte)) {
4db35314 1309 if (sp->role.level == PT_PAGE_TABLE_LEVEL)
290fc38d 1310 rmap_remove(vcpu->kvm, spte);
ac1b714e
AK
1311 else {
1312 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 1313 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
1314 }
1315 }
c7addb90 1316 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
ac1b714e
AK
1317}
1318
0028425f 1319static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4db35314 1320 struct kvm_mmu_page *sp,
0028425f 1321 u64 *spte,
c7addb90
AK
1322 const void *new, int bytes,
1323 int offset_in_pte)
0028425f 1324{
4db35314 1325 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
4cee5764 1326 ++vcpu->kvm->stat.mmu_pde_zapped;
0028425f 1327 return;
4cee5764 1328 }
0028425f 1329
4cee5764 1330 ++vcpu->kvm->stat.mmu_pte_updated;
4db35314
AK
1331 if (sp->role.glevels == PT32_ROOT_LEVEL)
1332 paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
0028425f 1333 else
4db35314 1334 paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
0028425f
AK
1335}
1336
79539cec
AK
1337static bool need_remote_flush(u64 old, u64 new)
1338{
1339 if (!is_shadow_present_pte(old))
1340 return false;
1341 if (!is_shadow_present_pte(new))
1342 return true;
1343 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1344 return true;
1345 old ^= PT64_NX_MASK;
1346 new ^= PT64_NX_MASK;
1347 return (old & ~new & PT64_PERM_MASK) != 0;
1348}
1349
1350static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1351{
1352 if (need_remote_flush(old, new))
1353 kvm_flush_remote_tlbs(vcpu->kvm);
1354 else
1355 kvm_mmu_flush_tlb(vcpu);
1356}
1357
12b7d28f
AK
1358static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1359{
ad312c7c 1360 u64 *spte = vcpu->arch.last_pte_updated;
12b7d28f
AK
1361
1362 return !!(spte && (*spte & PT_ACCESSED_MASK));
1363}
1364
d7824fff
AK
1365static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1366 const u8 *new, int bytes)
1367{
1368 gfn_t gfn;
1369 int r;
1370 u64 gpte = 0;
72dc67a6 1371 struct page *page;
d7824fff
AK
1372
1373 if (bytes != 4 && bytes != 8)
1374 return;
1375
1376 /*
1377 * Assume that the pte write on a page table of the same type
1378 * as the current vcpu paging mode. This is nearly always true
1379 * (might be false while changing modes). Note it is verified later
1380 * by update_pte().
1381 */
1382 if (is_pae(vcpu)) {
1383 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1384 if ((bytes == 4) && (gpa % 4 == 0)) {
1385 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1386 if (r)
1387 return;
1388 memcpy((void *)&gpte + (gpa % 8), new, 4);
1389 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1390 memcpy((void *)&gpte, new, 8);
1391 }
1392 } else {
1393 if ((bytes == 4) && (gpa % 4 == 0))
1394 memcpy((void *)&gpte, new, 4);
1395 }
1396 if (!is_present_pte(gpte))
1397 return;
1398 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
72dc67a6
IE
1399
1400 down_read(&current->mm->mmap_sem);
1401 page = gfn_to_page(vcpu->kvm, gfn);
1402 up_read(&current->mm->mmap_sem);
1403
d7824fff
AK
1404 vcpu->arch.update_pte.gfn = gfn;
1405 vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn);
1406}
1407
09072daf 1408void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
fe551881 1409 const u8 *new, int bytes)
da4a00f0 1410{
9b7a0325 1411 gfn_t gfn = gpa >> PAGE_SHIFT;
4db35314 1412 struct kvm_mmu_page *sp;
0e7bc4b9 1413 struct hlist_node *node, *n;
9b7a0325
AK
1414 struct hlist_head *bucket;
1415 unsigned index;
79539cec 1416 u64 entry;
9b7a0325 1417 u64 *spte;
9b7a0325 1418 unsigned offset = offset_in_page(gpa);
0e7bc4b9 1419 unsigned pte_size;
9b7a0325 1420 unsigned page_offset;
0e7bc4b9 1421 unsigned misaligned;
fce0657f 1422 unsigned quadrant;
9b7a0325 1423 int level;
86a5ba02 1424 int flooded = 0;
ac1b714e 1425 int npte;
9b7a0325 1426
da4a00f0 1427 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
d7824fff 1428 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
aaee2c94 1429 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1430 kvm_mmu_free_some_pages(vcpu);
4cee5764 1431 ++vcpu->kvm->stat.mmu_pte_write;
c7addb90 1432 kvm_mmu_audit(vcpu, "pre pte write");
ad312c7c 1433 if (gfn == vcpu->arch.last_pt_write_gfn
12b7d28f 1434 && !last_updated_pte_accessed(vcpu)) {
ad312c7c
ZX
1435 ++vcpu->arch.last_pt_write_count;
1436 if (vcpu->arch.last_pt_write_count >= 3)
86a5ba02
AK
1437 flooded = 1;
1438 } else {
ad312c7c
ZX
1439 vcpu->arch.last_pt_write_gfn = gfn;
1440 vcpu->arch.last_pt_write_count = 1;
1441 vcpu->arch.last_pte_updated = NULL;
86a5ba02 1442 }
9b7a0325 1443 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
f05e70ac 1444 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
1445 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1446 if (sp->gfn != gfn || sp->role.metaphysical)
9b7a0325 1447 continue;
4db35314 1448 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
0e7bc4b9 1449 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 1450 misaligned |= bytes < 4;
86a5ba02 1451 if (misaligned || flooded) {
0e7bc4b9
AK
1452 /*
1453 * Misaligned accesses are too much trouble to fix
1454 * up; also, they usually indicate a page is not used
1455 * as a page table.
86a5ba02
AK
1456 *
1457 * If we're seeing too many writes to a page,
1458 * it may no longer be a page table, or we may be
1459 * forking, in which case it is better to unmap the
1460 * page.
0e7bc4b9
AK
1461 */
1462 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4db35314
AK
1463 gpa, bytes, sp->role.word);
1464 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1465 ++vcpu->kvm->stat.mmu_flooded;
0e7bc4b9
AK
1466 continue;
1467 }
9b7a0325 1468 page_offset = offset;
4db35314 1469 level = sp->role.level;
ac1b714e 1470 npte = 1;
4db35314 1471 if (sp->role.glevels == PT32_ROOT_LEVEL) {
ac1b714e
AK
1472 page_offset <<= 1; /* 32->64 */
1473 /*
1474 * A 32-bit pde maps 4MB while the shadow pdes map
1475 * only 2MB. So we need to double the offset again
1476 * and zap two pdes instead of one.
1477 */
1478 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 1479 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
1480 page_offset <<= 1;
1481 npte = 2;
1482 }
fce0657f 1483 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 1484 page_offset &= ~PAGE_MASK;
4db35314 1485 if (quadrant != sp->role.quadrant)
fce0657f 1486 continue;
9b7a0325 1487 }
4db35314 1488 spte = &sp->spt[page_offset / sizeof(*spte)];
ac1b714e 1489 while (npte--) {
79539cec 1490 entry = *spte;
4db35314
AK
1491 mmu_pte_write_zap_pte(vcpu, sp, spte);
1492 mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
c7addb90 1493 page_offset & (pte_size - 1));
79539cec 1494 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
ac1b714e 1495 ++spte;
9b7a0325 1496 }
9b7a0325 1497 }
c7addb90 1498 kvm_mmu_audit(vcpu, "post pte write");
aaee2c94 1499 spin_unlock(&vcpu->kvm->mmu_lock);
d7824fff
AK
1500 if (vcpu->arch.update_pte.page) {
1501 kvm_release_page_clean(vcpu->arch.update_pte.page);
1502 vcpu->arch.update_pte.page = NULL;
1503 }
da4a00f0
AK
1504}
1505
a436036b
AK
1506int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1507{
10589a46
MT
1508 gpa_t gpa;
1509 int r;
a436036b 1510
72dc67a6 1511 down_read(&vcpu->kvm->slots_lock);
10589a46 1512 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
72dc67a6 1513 up_read(&vcpu->kvm->slots_lock);
10589a46 1514
aaee2c94 1515 spin_lock(&vcpu->kvm->mmu_lock);
10589a46 1516 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
aaee2c94 1517 spin_unlock(&vcpu->kvm->mmu_lock);
10589a46 1518 return r;
a436036b
AK
1519}
1520
22d95b12 1521void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86 1522{
f05e70ac 1523 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
4db35314 1524 struct kvm_mmu_page *sp;
ebeace86 1525
f05e70ac 1526 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
4db35314
AK
1527 struct kvm_mmu_page, link);
1528 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1529 ++vcpu->kvm->stat.mmu_recycled;
ebeace86
AK
1530 }
1531}
ebeace86 1532
3067714c
AK
1533int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1534{
1535 int r;
1536 enum emulation_result er;
1537
ad312c7c 1538 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
3067714c
AK
1539 if (r < 0)
1540 goto out;
1541
1542 if (!r) {
1543 r = 1;
1544 goto out;
1545 }
1546
b733bfb5
AK
1547 r = mmu_topup_memory_caches(vcpu);
1548 if (r)
1549 goto out;
1550
3067714c 1551 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
3067714c
AK
1552
1553 switch (er) {
1554 case EMULATE_DONE:
1555 return 1;
1556 case EMULATE_DO_MMIO:
1557 ++vcpu->stat.mmio_exits;
1558 return 0;
1559 case EMULATE_FAIL:
1560 kvm_report_emulation_failure(vcpu, "pagetable");
1561 return 1;
1562 default:
1563 BUG();
1564 }
1565out:
3067714c
AK
1566 return r;
1567}
1568EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1569
6aa8b732
AK
1570static void free_mmu_pages(struct kvm_vcpu *vcpu)
1571{
4db35314 1572 struct kvm_mmu_page *sp;
6aa8b732 1573
f05e70ac
ZX
1574 while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1575 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
4db35314
AK
1576 struct kvm_mmu_page, link);
1577 kvm_mmu_zap_page(vcpu->kvm, sp);
f51234c2 1578 }
ad312c7c 1579 free_page((unsigned long)vcpu->arch.mmu.pae_root);
6aa8b732
AK
1580}
1581
1582static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1583{
17ac10ad 1584 struct page *page;
6aa8b732
AK
1585 int i;
1586
1587 ASSERT(vcpu);
1588
f05e70ac
ZX
1589 if (vcpu->kvm->arch.n_requested_mmu_pages)
1590 vcpu->kvm->arch.n_free_mmu_pages =
1591 vcpu->kvm->arch.n_requested_mmu_pages;
82ce2c96 1592 else
f05e70ac
ZX
1593 vcpu->kvm->arch.n_free_mmu_pages =
1594 vcpu->kvm->arch.n_alloc_mmu_pages;
17ac10ad
AK
1595 /*
1596 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1597 * Therefore we need to allocate shadow page tables in the first
1598 * 4GB of memory, which happens to fit the DMA32 zone.
1599 */
1600 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1601 if (!page)
1602 goto error_1;
ad312c7c 1603 vcpu->arch.mmu.pae_root = page_address(page);
17ac10ad 1604 for (i = 0; i < 4; ++i)
ad312c7c 1605 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1606
6aa8b732
AK
1607 return 0;
1608
1609error_1:
1610 free_mmu_pages(vcpu);
1611 return -ENOMEM;
1612}
1613
8018c27b 1614int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 1615{
6aa8b732 1616 ASSERT(vcpu);
ad312c7c 1617 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1618
8018c27b
IM
1619 return alloc_mmu_pages(vcpu);
1620}
6aa8b732 1621
8018c27b
IM
1622int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1623{
1624 ASSERT(vcpu);
ad312c7c 1625 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2c264957 1626
8018c27b 1627 return init_kvm_mmu(vcpu);
6aa8b732
AK
1628}
1629
1630void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1631{
1632 ASSERT(vcpu);
1633
1634 destroy_kvm_mmu(vcpu);
1635 free_mmu_pages(vcpu);
714b93da 1636 mmu_free_memory_caches(vcpu);
6aa8b732
AK
1637}
1638
90cb0529 1639void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732 1640{
4db35314 1641 struct kvm_mmu_page *sp;
6aa8b732 1642
f05e70ac 1643 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
6aa8b732
AK
1644 int i;
1645 u64 *pt;
1646
4db35314 1647 if (!test_bit(slot, &sp->slot_bitmap))
6aa8b732
AK
1648 continue;
1649
4db35314 1650 pt = sp->spt;
6aa8b732
AK
1651 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1652 /* avoid RMW */
9647c14c 1653 if (pt[i] & PT_WRITABLE_MASK)
6aa8b732 1654 pt[i] &= ~PT_WRITABLE_MASK;
6aa8b732
AK
1655 }
1656}
37a7d8b0 1657
90cb0529 1658void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 1659{
4db35314 1660 struct kvm_mmu_page *sp, *node;
e0fa826f 1661
aaee2c94 1662 spin_lock(&kvm->mmu_lock);
f05e70ac 1663 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
4db35314 1664 kvm_mmu_zap_page(kvm, sp);
aaee2c94 1665 spin_unlock(&kvm->mmu_lock);
e0fa826f 1666
90cb0529 1667 kvm_flush_remote_tlbs(kvm);
e0fa826f
DL
1668}
1669
b5a33a75
AK
1670void kvm_mmu_module_exit(void)
1671{
1672 if (pte_chain_cache)
1673 kmem_cache_destroy(pte_chain_cache);
1674 if (rmap_desc_cache)
1675 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
1676 if (mmu_page_header_cache)
1677 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
1678}
1679
1680int kvm_mmu_module_init(void)
1681{
1682 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1683 sizeof(struct kvm_pte_chain),
20c2df83 1684 0, 0, NULL);
b5a33a75
AK
1685 if (!pte_chain_cache)
1686 goto nomem;
1687 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1688 sizeof(struct kvm_rmap_desc),
20c2df83 1689 0, 0, NULL);
b5a33a75
AK
1690 if (!rmap_desc_cache)
1691 goto nomem;
1692
d3d25b04
AK
1693 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1694 sizeof(struct kvm_mmu_page),
20c2df83 1695 0, 0, NULL);
d3d25b04
AK
1696 if (!mmu_page_header_cache)
1697 goto nomem;
1698
b5a33a75
AK
1699 return 0;
1700
1701nomem:
1702 kvm_mmu_module_exit();
1703 return -ENOMEM;
1704}
1705
3ad82a7e
ZX
1706/*
1707 * Caculate mmu pages needed for kvm.
1708 */
1709unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1710{
1711 int i;
1712 unsigned int nr_mmu_pages;
1713 unsigned int nr_pages = 0;
1714
1715 for (i = 0; i < kvm->nmemslots; i++)
1716 nr_pages += kvm->memslots[i].npages;
1717
1718 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1719 nr_mmu_pages = max(nr_mmu_pages,
1720 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1721
1722 return nr_mmu_pages;
1723}
1724
37a7d8b0
AK
1725#ifdef AUDIT
1726
1727static const char *audit_msg;
1728
1729static gva_t canonicalize(gva_t gva)
1730{
1731#ifdef CONFIG_X86_64
1732 gva = (long long)(gva << 16) >> 16;
1733#endif
1734 return gva;
1735}
1736
1737static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1738 gva_t va, int level)
1739{
1740 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1741 int i;
1742 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1743
1744 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1745 u64 ent = pt[i];
1746
c7addb90 1747 if (ent == shadow_trap_nonpresent_pte)
37a7d8b0
AK
1748 continue;
1749
1750 va = canonicalize(va);
c7addb90
AK
1751 if (level > 1) {
1752 if (ent == shadow_notrap_nonpresent_pte)
1753 printk(KERN_ERR "audit: (%s) nontrapping pte"
1754 " in nonleaf level: levels %d gva %lx"
1755 " level %d pte %llx\n", audit_msg,
ad312c7c 1756 vcpu->arch.mmu.root_level, va, level, ent);
c7addb90 1757
37a7d8b0 1758 audit_mappings_page(vcpu, ent, va, level - 1);
c7addb90 1759 } else {
ad312c7c 1760 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
1d28f5f4
AK
1761 struct page *page = gpa_to_page(vcpu, gpa);
1762 hpa_t hpa = page_to_phys(page);
37a7d8b0 1763
c7addb90 1764 if (is_shadow_present_pte(ent)
37a7d8b0 1765 && (ent & PT64_BASE_ADDR_MASK) != hpa)
c7addb90
AK
1766 printk(KERN_ERR "xx audit error: (%s) levels %d"
1767 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
ad312c7c 1768 audit_msg, vcpu->arch.mmu.root_level,
d77c26fc
MD
1769 va, gpa, hpa, ent,
1770 is_shadow_present_pte(ent));
c7addb90
AK
1771 else if (ent == shadow_notrap_nonpresent_pte
1772 && !is_error_hpa(hpa))
1773 printk(KERN_ERR "audit: (%s) notrap shadow,"
1774 " valid guest gva %lx\n", audit_msg, va);
b4231d61 1775 kvm_release_page_clean(page);
c7addb90 1776
37a7d8b0
AK
1777 }
1778 }
1779}
1780
1781static void audit_mappings(struct kvm_vcpu *vcpu)
1782{
1ea252af 1783 unsigned i;
37a7d8b0 1784
ad312c7c
ZX
1785 if (vcpu->arch.mmu.root_level == 4)
1786 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
37a7d8b0
AK
1787 else
1788 for (i = 0; i < 4; ++i)
ad312c7c 1789 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
37a7d8b0 1790 audit_mappings_page(vcpu,
ad312c7c 1791 vcpu->arch.mmu.pae_root[i],
37a7d8b0
AK
1792 i << 30,
1793 2);
1794}
1795
1796static int count_rmaps(struct kvm_vcpu *vcpu)
1797{
1798 int nmaps = 0;
1799 int i, j, k;
1800
1801 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1802 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1803 struct kvm_rmap_desc *d;
1804
1805 for (j = 0; j < m->npages; ++j) {
290fc38d 1806 unsigned long *rmapp = &m->rmap[j];
37a7d8b0 1807
290fc38d 1808 if (!*rmapp)
37a7d8b0 1809 continue;
290fc38d 1810 if (!(*rmapp & 1)) {
37a7d8b0
AK
1811 ++nmaps;
1812 continue;
1813 }
290fc38d 1814 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
37a7d8b0
AK
1815 while (d) {
1816 for (k = 0; k < RMAP_EXT; ++k)
1817 if (d->shadow_ptes[k])
1818 ++nmaps;
1819 else
1820 break;
1821 d = d->more;
1822 }
1823 }
1824 }
1825 return nmaps;
1826}
1827
1828static int count_writable_mappings(struct kvm_vcpu *vcpu)
1829{
1830 int nmaps = 0;
4db35314 1831 struct kvm_mmu_page *sp;
37a7d8b0
AK
1832 int i;
1833
f05e70ac 1834 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 1835 u64 *pt = sp->spt;
37a7d8b0 1836
4db35314 1837 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
37a7d8b0
AK
1838 continue;
1839
1840 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1841 u64 ent = pt[i];
1842
1843 if (!(ent & PT_PRESENT_MASK))
1844 continue;
1845 if (!(ent & PT_WRITABLE_MASK))
1846 continue;
1847 ++nmaps;
1848 }
1849 }
1850 return nmaps;
1851}
1852
1853static void audit_rmap(struct kvm_vcpu *vcpu)
1854{
1855 int n_rmap = count_rmaps(vcpu);
1856 int n_actual = count_writable_mappings(vcpu);
1857
1858 if (n_rmap != n_actual)
1859 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1860 __FUNCTION__, audit_msg, n_rmap, n_actual);
1861}
1862
1863static void audit_write_protection(struct kvm_vcpu *vcpu)
1864{
4db35314 1865 struct kvm_mmu_page *sp;
290fc38d
IE
1866 struct kvm_memory_slot *slot;
1867 unsigned long *rmapp;
1868 gfn_t gfn;
37a7d8b0 1869
f05e70ac 1870 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 1871 if (sp->role.metaphysical)
37a7d8b0
AK
1872 continue;
1873
4db35314
AK
1874 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
1875 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
290fc38d
IE
1876 rmapp = &slot->rmap[gfn - slot->base_gfn];
1877 if (*rmapp)
37a7d8b0
AK
1878 printk(KERN_ERR "%s: (%s) shadow page has writable"
1879 " mappings: gfn %lx role %x\n",
4db35314
AK
1880 __FUNCTION__, audit_msg, sp->gfn,
1881 sp->role.word);
37a7d8b0
AK
1882 }
1883}
1884
1885static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1886{
1887 int olddbg = dbg;
1888
1889 dbg = 0;
1890 audit_msg = msg;
1891 audit_rmap(vcpu);
1892 audit_write_protection(vcpu);
1893 audit_mappings(vcpu);
1894 dbg = olddbg;
1895}
1896
1897#endif
This page took 0.317688 seconds and 5 git commands to generate.