KVM: x86 emulator: don't depend on cr2 for mov abs emulation
[deliverable/linux.git] / drivers / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
e495606d
AK
19
20#include "vmx.h"
21#include "kvm.h"
34c16eec 22#include "x86.h"
e495606d 23
6aa8b732
AK
24#include <linux/types.h>
25#include <linux/string.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/module.h>
29
e495606d
AK
30#include <asm/page.h>
31#include <asm/cmpxchg.h>
6aa8b732 32
37a7d8b0
AK
33#undef MMU_DEBUG
34
35#undef AUDIT
36
37#ifdef AUDIT
38static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
39#else
40static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
41#endif
42
43#ifdef MMU_DEBUG
44
45#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
46#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
47
48#else
49
50#define pgprintk(x...) do { } while (0)
51#define rmap_printk(x...) do { } while (0)
52
53#endif
54
55#if defined(MMU_DEBUG) || defined(AUDIT)
56static int dbg = 1;
57#endif
6aa8b732 58
d6c69ee9
YD
59#ifndef MMU_DEBUG
60#define ASSERT(x) do { } while (0)
61#else
6aa8b732
AK
62#define ASSERT(x) \
63 if (!(x)) { \
64 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
65 __FILE__, __LINE__, #x); \
66 }
d6c69ee9 67#endif
6aa8b732 68
cea0f0e7
AK
69#define PT64_PT_BITS 9
70#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
71#define PT32_PT_BITS 10
72#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
6aa8b732
AK
73
74#define PT_WRITABLE_SHIFT 1
75
76#define PT_PRESENT_MASK (1ULL << 0)
77#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
78#define PT_USER_MASK (1ULL << 2)
79#define PT_PWT_MASK (1ULL << 3)
80#define PT_PCD_MASK (1ULL << 4)
81#define PT_ACCESSED_MASK (1ULL << 5)
82#define PT_DIRTY_MASK (1ULL << 6)
83#define PT_PAGE_SIZE_MASK (1ULL << 7)
84#define PT_PAT_MASK (1ULL << 7)
85#define PT_GLOBAL_MASK (1ULL << 8)
86#define PT64_NX_MASK (1ULL << 63)
87
88#define PT_PAT_SHIFT 7
89#define PT_DIR_PAT_SHIFT 12
90#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
91
92#define PT32_DIR_PSE36_SIZE 4
93#define PT32_DIR_PSE36_SHIFT 13
d77c26fc
MD
94#define PT32_DIR_PSE36_MASK \
95 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
6aa8b732
AK
96
97
6aa8b732
AK
98#define PT_FIRST_AVAIL_BITS_SHIFT 9
99#define PT64_SECOND_AVAIL_BITS_SHIFT 52
100
6aa8b732
AK
101#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
102
6aa8b732
AK
103#define VALID_PAGE(x) ((x) != INVALID_PAGE)
104
105#define PT64_LEVEL_BITS 9
106
107#define PT64_LEVEL_SHIFT(level) \
d77c26fc 108 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
6aa8b732
AK
109
110#define PT64_LEVEL_MASK(level) \
111 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
112
113#define PT64_INDEX(address, level)\
114 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
115
116
117#define PT32_LEVEL_BITS 10
118
119#define PT32_LEVEL_SHIFT(level) \
d77c26fc 120 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
6aa8b732
AK
121
122#define PT32_LEVEL_MASK(level) \
123 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
124
125#define PT32_INDEX(address, level)\
126 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
127
128
27aba766 129#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
130#define PT64_DIR_BASE_ADDR_MASK \
131 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
132
133#define PT32_BASE_ADDR_MASK PAGE_MASK
134#define PT32_DIR_BASE_ADDR_MASK \
135 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
136
137
138#define PFERR_PRESENT_MASK (1U << 0)
139#define PFERR_WRITE_MASK (1U << 1)
140#define PFERR_USER_MASK (1U << 2)
73b1087e 141#define PFERR_FETCH_MASK (1U << 4)
6aa8b732
AK
142
143#define PT64_ROOT_LEVEL 4
144#define PT32_ROOT_LEVEL 2
145#define PT32E_ROOT_LEVEL 3
146
147#define PT_DIRECTORY_LEVEL 2
148#define PT_PAGE_TABLE_LEVEL 1
149
cd4a4e53
AK
150#define RMAP_EXT 4
151
152struct kvm_rmap_desc {
153 u64 *shadow_ptes[RMAP_EXT];
154 struct kvm_rmap_desc *more;
155};
156
b5a33a75
AK
157static struct kmem_cache *pte_chain_cache;
158static struct kmem_cache *rmap_desc_cache;
d3d25b04 159static struct kmem_cache *mmu_page_header_cache;
b5a33a75 160
c7addb90
AK
161static u64 __read_mostly shadow_trap_nonpresent_pte;
162static u64 __read_mostly shadow_notrap_nonpresent_pte;
163
164void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
165{
166 shadow_trap_nonpresent_pte = trap_pte;
167 shadow_notrap_nonpresent_pte = notrap_pte;
168}
169EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
170
6aa8b732
AK
171static int is_write_protection(struct kvm_vcpu *vcpu)
172{
707d92fa 173 return vcpu->cr0 & X86_CR0_WP;
6aa8b732
AK
174}
175
176static int is_cpuid_PSE36(void)
177{
178 return 1;
179}
180
73b1087e
AK
181static int is_nx(struct kvm_vcpu *vcpu)
182{
183 return vcpu->shadow_efer & EFER_NX;
184}
185
6aa8b732
AK
186static int is_present_pte(unsigned long pte)
187{
188 return pte & PT_PRESENT_MASK;
189}
190
c7addb90
AK
191static int is_shadow_present_pte(u64 pte)
192{
193 pte &= ~PT_SHADOW_IO_MARK;
194 return pte != shadow_trap_nonpresent_pte
195 && pte != shadow_notrap_nonpresent_pte;
196}
197
6aa8b732
AK
198static int is_writeble_pte(unsigned long pte)
199{
200 return pte & PT_WRITABLE_MASK;
201}
202
e3c5e7ec
AK
203static int is_dirty_pte(unsigned long pte)
204{
205 return pte & PT_DIRTY_MASK;
206}
207
6aa8b732
AK
208static int is_io_pte(unsigned long pte)
209{
210 return pte & PT_SHADOW_IO_MARK;
211}
212
cd4a4e53
AK
213static int is_rmap_pte(u64 pte)
214{
9647c14c
IE
215 return pte != shadow_trap_nonpresent_pte
216 && pte != shadow_notrap_nonpresent_pte;
cd4a4e53
AK
217}
218
e663ee64
AK
219static void set_shadow_pte(u64 *sptep, u64 spte)
220{
221#ifdef CONFIG_X86_64
222 set_64bit((unsigned long *)sptep, spte);
223#else
224 set_64bit((unsigned long long *)sptep, spte);
225#endif
226}
227
e2dec939 228static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
2e3e5882 229 struct kmem_cache *base_cache, int min)
714b93da
AK
230{
231 void *obj;
232
233 if (cache->nobjs >= min)
e2dec939 234 return 0;
714b93da 235 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 236 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
714b93da 237 if (!obj)
e2dec939 238 return -ENOMEM;
714b93da
AK
239 cache->objects[cache->nobjs++] = obj;
240 }
e2dec939 241 return 0;
714b93da
AK
242}
243
244static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
245{
246 while (mc->nobjs)
247 kfree(mc->objects[--mc->nobjs]);
248}
249
c1158e63 250static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
2e3e5882 251 int min)
c1158e63
AK
252{
253 struct page *page;
254
255 if (cache->nobjs >= min)
256 return 0;
257 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 258 page = alloc_page(GFP_KERNEL);
c1158e63
AK
259 if (!page)
260 return -ENOMEM;
261 set_page_private(page, 0);
262 cache->objects[cache->nobjs++] = page_address(page);
263 }
264 return 0;
265}
266
267static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
268{
269 while (mc->nobjs)
c4d198d5 270 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
271}
272
2e3e5882 273static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
714b93da 274{
e2dec939
AK
275 int r;
276
2e3e5882 277 kvm_mmu_free_some_pages(vcpu);
e2dec939 278 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
2e3e5882 279 pte_chain_cache, 4);
e2dec939
AK
280 if (r)
281 goto out;
282 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
2e3e5882 283 rmap_desc_cache, 1);
d3d25b04
AK
284 if (r)
285 goto out;
290fc38d 286 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
d3d25b04
AK
287 if (r)
288 goto out;
289 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
2e3e5882 290 mmu_page_header_cache, 4);
e2dec939
AK
291out:
292 return r;
714b93da
AK
293}
294
295static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
296{
297 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
298 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
c1158e63 299 mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
d3d25b04 300 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
714b93da
AK
301}
302
303static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
304 size_t size)
305{
306 void *p;
307
308 BUG_ON(!mc->nobjs);
309 p = mc->objects[--mc->nobjs];
310 memset(p, 0, size);
311 return p;
312}
313
714b93da
AK
314static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
315{
316 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
317 sizeof(struct kvm_pte_chain));
318}
319
90cb0529 320static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 321{
90cb0529 322 kfree(pc);
714b93da
AK
323}
324
325static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
326{
327 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
328 sizeof(struct kvm_rmap_desc));
329}
330
90cb0529 331static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 332{
90cb0529 333 kfree(rd);
714b93da
AK
334}
335
290fc38d
IE
336/*
337 * Take gfn and return the reverse mapping to it.
338 * Note: gfn must be unaliased before this function get called
339 */
340
341static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
342{
343 struct kvm_memory_slot *slot;
344
345 slot = gfn_to_memslot(kvm, gfn);
346 return &slot->rmap[gfn - slot->base_gfn];
347}
348
cd4a4e53
AK
349/*
350 * Reverse mapping data structures:
351 *
290fc38d
IE
352 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
353 * that points to page_address(page).
cd4a4e53 354 *
290fc38d
IE
355 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
356 * containing more mappings.
cd4a4e53 357 */
290fc38d 358static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
cd4a4e53 359{
290fc38d 360 struct kvm_mmu_page *page;
cd4a4e53 361 struct kvm_rmap_desc *desc;
290fc38d 362 unsigned long *rmapp;
cd4a4e53
AK
363 int i;
364
365 if (!is_rmap_pte(*spte))
366 return;
290fc38d
IE
367 gfn = unalias_gfn(vcpu->kvm, gfn);
368 page = page_header(__pa(spte));
369 page->gfns[spte - page->spt] = gfn;
370 rmapp = gfn_to_rmap(vcpu->kvm, gfn);
371 if (!*rmapp) {
cd4a4e53 372 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
290fc38d
IE
373 *rmapp = (unsigned long)spte;
374 } else if (!(*rmapp & 1)) {
cd4a4e53 375 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 376 desc = mmu_alloc_rmap_desc(vcpu);
290fc38d 377 desc->shadow_ptes[0] = (u64 *)*rmapp;
cd4a4e53 378 desc->shadow_ptes[1] = spte;
290fc38d 379 *rmapp = (unsigned long)desc | 1;
cd4a4e53
AK
380 } else {
381 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
290fc38d 382 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
383 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
384 desc = desc->more;
385 if (desc->shadow_ptes[RMAP_EXT-1]) {
714b93da 386 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
387 desc = desc->more;
388 }
389 for (i = 0; desc->shadow_ptes[i]; ++i)
390 ;
391 desc->shadow_ptes[i] = spte;
392 }
393}
394
290fc38d 395static void rmap_desc_remove_entry(unsigned long *rmapp,
cd4a4e53
AK
396 struct kvm_rmap_desc *desc,
397 int i,
398 struct kvm_rmap_desc *prev_desc)
399{
400 int j;
401
402 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
403 ;
404 desc->shadow_ptes[i] = desc->shadow_ptes[j];
11718b4d 405 desc->shadow_ptes[j] = NULL;
cd4a4e53
AK
406 if (j != 0)
407 return;
408 if (!prev_desc && !desc->more)
290fc38d 409 *rmapp = (unsigned long)desc->shadow_ptes[0];
cd4a4e53
AK
410 else
411 if (prev_desc)
412 prev_desc->more = desc->more;
413 else
290fc38d 414 *rmapp = (unsigned long)desc->more | 1;
90cb0529 415 mmu_free_rmap_desc(desc);
cd4a4e53
AK
416}
417
290fc38d 418static void rmap_remove(struct kvm *kvm, u64 *spte)
cd4a4e53 419{
cd4a4e53
AK
420 struct kvm_rmap_desc *desc;
421 struct kvm_rmap_desc *prev_desc;
290fc38d
IE
422 struct kvm_mmu_page *page;
423 unsigned long *rmapp;
cd4a4e53
AK
424 int i;
425
426 if (!is_rmap_pte(*spte))
427 return;
290fc38d 428 page = page_header(__pa(spte));
8a7ae055
IE
429 kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >>
430 PAGE_SHIFT));
290fc38d
IE
431 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
432 if (!*rmapp) {
cd4a4e53
AK
433 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
434 BUG();
290fc38d 435 } else if (!(*rmapp & 1)) {
cd4a4e53 436 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
290fc38d 437 if ((u64 *)*rmapp != spte) {
cd4a4e53
AK
438 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
439 spte, *spte);
440 BUG();
441 }
290fc38d 442 *rmapp = 0;
cd4a4e53
AK
443 } else {
444 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
290fc38d 445 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
446 prev_desc = NULL;
447 while (desc) {
448 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
449 if (desc->shadow_ptes[i] == spte) {
290fc38d 450 rmap_desc_remove_entry(rmapp,
714b93da 451 desc, i,
cd4a4e53
AK
452 prev_desc);
453 return;
454 }
455 prev_desc = desc;
456 desc = desc->more;
457 }
458 BUG();
459 }
460}
461
98348e95 462static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
374cbac0 463{
374cbac0 464 struct kvm_rmap_desc *desc;
98348e95
IE
465 struct kvm_rmap_desc *prev_desc;
466 u64 *prev_spte;
467 int i;
468
469 if (!*rmapp)
470 return NULL;
471 else if (!(*rmapp & 1)) {
472 if (!spte)
473 return (u64 *)*rmapp;
474 return NULL;
475 }
476 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
477 prev_desc = NULL;
478 prev_spte = NULL;
479 while (desc) {
480 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
481 if (prev_spte == spte)
482 return desc->shadow_ptes[i];
483 prev_spte = desc->shadow_ptes[i];
484 }
485 desc = desc->more;
486 }
487 return NULL;
488}
489
490static void rmap_write_protect(struct kvm *kvm, u64 gfn)
491{
290fc38d 492 unsigned long *rmapp;
374cbac0
AK
493 u64 *spte;
494
4a4c9924
AL
495 gfn = unalias_gfn(kvm, gfn);
496 rmapp = gfn_to_rmap(kvm, gfn);
374cbac0 497
98348e95
IE
498 spte = rmap_next(kvm, rmapp, NULL);
499 while (spte) {
374cbac0 500 BUG_ON(!spte);
374cbac0 501 BUG_ON(!(*spte & PT_PRESENT_MASK));
374cbac0 502 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
9647c14c
IE
503 if (is_writeble_pte(*spte))
504 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
4a4c9924 505 kvm_flush_remote_tlbs(kvm);
9647c14c 506 spte = rmap_next(kvm, rmapp, spte);
374cbac0
AK
507 }
508}
509
d6c69ee9 510#ifdef MMU_DEBUG
47ad8e68 511static int is_empty_shadow_page(u64 *spt)
6aa8b732 512{
139bdb2d
AK
513 u64 *pos;
514 u64 *end;
515
47ad8e68 516 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
c7addb90 517 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
139bdb2d
AK
518 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
519 pos, *pos);
6aa8b732 520 return 0;
139bdb2d 521 }
6aa8b732
AK
522 return 1;
523}
d6c69ee9 524#endif
6aa8b732 525
90cb0529 526static void kvm_mmu_free_page(struct kvm *kvm,
4b02d6da 527 struct kvm_mmu_page *page_head)
260746c0 528{
47ad8e68 529 ASSERT(is_empty_shadow_page(page_head->spt));
d3d25b04 530 list_del(&page_head->link);
c1158e63 531 __free_page(virt_to_page(page_head->spt));
290fc38d 532 __free_page(virt_to_page(page_head->gfns));
90cb0529
AK
533 kfree(page_head);
534 ++kvm->n_free_mmu_pages;
260746c0
AK
535}
536
cea0f0e7
AK
537static unsigned kvm_page_table_hashfn(gfn_t gfn)
538{
539 return gfn;
540}
541
25c0de2c
AK
542static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
543 u64 *parent_pte)
6aa8b732
AK
544{
545 struct kvm_mmu_page *page;
546
d3d25b04 547 if (!vcpu->kvm->n_free_mmu_pages)
25c0de2c 548 return NULL;
6aa8b732 549
d3d25b04
AK
550 page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
551 sizeof *page);
552 page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
290fc38d 553 page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
d3d25b04
AK
554 set_page_private(virt_to_page(page->spt), (unsigned long)page);
555 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
47ad8e68 556 ASSERT(is_empty_shadow_page(page->spt));
6aa8b732 557 page->slot_bitmap = 0;
cea0f0e7 558 page->multimapped = 0;
6aa8b732 559 page->parent_pte = parent_pte;
ebeace86 560 --vcpu->kvm->n_free_mmu_pages;
25c0de2c 561 return page;
6aa8b732
AK
562}
563
714b93da
AK
564static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
565 struct kvm_mmu_page *page, u64 *parent_pte)
cea0f0e7
AK
566{
567 struct kvm_pte_chain *pte_chain;
568 struct hlist_node *node;
569 int i;
570
571 if (!parent_pte)
572 return;
573 if (!page->multimapped) {
574 u64 *old = page->parent_pte;
575
576 if (!old) {
577 page->parent_pte = parent_pte;
578 return;
579 }
580 page->multimapped = 1;
714b93da 581 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7
AK
582 INIT_HLIST_HEAD(&page->parent_ptes);
583 hlist_add_head(&pte_chain->link, &page->parent_ptes);
584 pte_chain->parent_ptes[0] = old;
585 }
586 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
587 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
588 continue;
589 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
590 if (!pte_chain->parent_ptes[i]) {
591 pte_chain->parent_ptes[i] = parent_pte;
592 return;
593 }
594 }
714b93da 595 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7
AK
596 BUG_ON(!pte_chain);
597 hlist_add_head(&pte_chain->link, &page->parent_ptes);
598 pte_chain->parent_ptes[0] = parent_pte;
599}
600
90cb0529 601static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
cea0f0e7
AK
602 u64 *parent_pte)
603{
604 struct kvm_pte_chain *pte_chain;
605 struct hlist_node *node;
606 int i;
607
608 if (!page->multimapped) {
609 BUG_ON(page->parent_pte != parent_pte);
610 page->parent_pte = NULL;
611 return;
612 }
613 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
614 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
615 if (!pte_chain->parent_ptes[i])
616 break;
617 if (pte_chain->parent_ptes[i] != parent_pte)
618 continue;
697fe2e2
AK
619 while (i + 1 < NR_PTE_CHAIN_ENTRIES
620 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
621 pte_chain->parent_ptes[i]
622 = pte_chain->parent_ptes[i + 1];
623 ++i;
624 }
625 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
626 if (i == 0) {
627 hlist_del(&pte_chain->link);
90cb0529 628 mmu_free_pte_chain(pte_chain);
697fe2e2
AK
629 if (hlist_empty(&page->parent_ptes)) {
630 page->multimapped = 0;
631 page->parent_pte = NULL;
632 }
633 }
cea0f0e7
AK
634 return;
635 }
636 BUG();
637}
638
f67a46f4 639static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
cea0f0e7
AK
640 gfn_t gfn)
641{
642 unsigned index;
643 struct hlist_head *bucket;
644 struct kvm_mmu_page *page;
645 struct hlist_node *node;
646
647 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
648 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
f67a46f4 649 bucket = &kvm->mmu_page_hash[index];
cea0f0e7
AK
650 hlist_for_each_entry(page, node, bucket, hash_link)
651 if (page->gfn == gfn && !page->role.metaphysical) {
652 pgprintk("%s: found role %x\n",
653 __FUNCTION__, page->role.word);
654 return page;
655 }
656 return NULL;
657}
658
659static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
660 gfn_t gfn,
661 gva_t gaddr,
662 unsigned level,
663 int metaphysical,
d28c6cfb 664 unsigned hugepage_access,
cea0f0e7
AK
665 u64 *parent_pte)
666{
667 union kvm_mmu_page_role role;
668 unsigned index;
669 unsigned quadrant;
670 struct hlist_head *bucket;
671 struct kvm_mmu_page *page;
672 struct hlist_node *node;
673
674 role.word = 0;
675 role.glevels = vcpu->mmu.root_level;
676 role.level = level;
677 role.metaphysical = metaphysical;
d28c6cfb 678 role.hugepage_access = hugepage_access;
cea0f0e7
AK
679 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
680 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
681 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
682 role.quadrant = quadrant;
683 }
684 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
685 gfn, role.word);
686 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
687 bucket = &vcpu->kvm->mmu_page_hash[index];
688 hlist_for_each_entry(page, node, bucket, hash_link)
689 if (page->gfn == gfn && page->role.word == role.word) {
714b93da 690 mmu_page_add_parent_pte(vcpu, page, parent_pte);
cea0f0e7
AK
691 pgprintk("%s: found\n", __FUNCTION__);
692 return page;
693 }
694 page = kvm_mmu_alloc_page(vcpu, parent_pte);
695 if (!page)
696 return page;
697 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
698 page->gfn = gfn;
699 page->role = role;
700 hlist_add_head(&page->hash_link, bucket);
c7addb90 701 vcpu->mmu.prefetch_page(vcpu, page);
374cbac0 702 if (!metaphysical)
4a4c9924 703 rmap_write_protect(vcpu->kvm, gfn);
cea0f0e7
AK
704 return page;
705}
706
90cb0529 707static void kvm_mmu_page_unlink_children(struct kvm *kvm,
a436036b
AK
708 struct kvm_mmu_page *page)
709{
697fe2e2
AK
710 unsigned i;
711 u64 *pt;
712 u64 ent;
713
47ad8e68 714 pt = page->spt;
697fe2e2
AK
715
716 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
717 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
c7addb90 718 if (is_shadow_present_pte(pt[i]))
290fc38d 719 rmap_remove(kvm, &pt[i]);
c7addb90 720 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 721 }
90cb0529 722 kvm_flush_remote_tlbs(kvm);
697fe2e2
AK
723 return;
724 }
725
726 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
727 ent = pt[i];
728
c7addb90
AK
729 pt[i] = shadow_trap_nonpresent_pte;
730 if (!is_shadow_present_pte(ent))
697fe2e2
AK
731 continue;
732 ent &= PT64_BASE_ADDR_MASK;
90cb0529 733 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
697fe2e2 734 }
90cb0529 735 kvm_flush_remote_tlbs(kvm);
a436036b
AK
736}
737
90cb0529 738static void kvm_mmu_put_page(struct kvm_mmu_page *page,
cea0f0e7
AK
739 u64 *parent_pte)
740{
90cb0529 741 mmu_page_remove_parent_pte(page, parent_pte);
a436036b
AK
742}
743
12b7d28f
AK
744static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
745{
746 int i;
747
748 for (i = 0; i < KVM_MAX_VCPUS; ++i)
749 if (kvm->vcpus[i])
750 kvm->vcpus[i]->last_pte_updated = NULL;
751}
752
90cb0529 753static void kvm_mmu_zap_page(struct kvm *kvm,
a436036b
AK
754 struct kvm_mmu_page *page)
755{
756 u64 *parent_pte;
757
758 while (page->multimapped || page->parent_pte) {
759 if (!page->multimapped)
760 parent_pte = page->parent_pte;
761 else {
762 struct kvm_pte_chain *chain;
763
764 chain = container_of(page->parent_ptes.first,
765 struct kvm_pte_chain, link);
766 parent_pte = chain->parent_ptes[0];
767 }
697fe2e2 768 BUG_ON(!parent_pte);
90cb0529 769 kvm_mmu_put_page(page, parent_pte);
c7addb90 770 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
a436036b 771 }
90cb0529 772 kvm_mmu_page_unlink_children(kvm, page);
3bb65a22
AK
773 if (!page->root_count) {
774 hlist_del(&page->hash_link);
90cb0529 775 kvm_mmu_free_page(kvm, page);
36868f7b 776 } else
90cb0529 777 list_move(&page->link, &kvm->active_mmu_pages);
12b7d28f 778 kvm_mmu_reset_last_pte_updated(kvm);
a436036b
AK
779}
780
82ce2c96
IE
781/*
782 * Changing the number of mmu pages allocated to the vm
783 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
784 */
785void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
786{
787 /*
788 * If we set the number of mmu pages to be smaller be than the
789 * number of actived pages , we must to free some mmu pages before we
790 * change the value
791 */
792
793 if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
794 kvm_nr_mmu_pages) {
795 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
796 - kvm->n_free_mmu_pages;
797
798 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
799 struct kvm_mmu_page *page;
800
801 page = container_of(kvm->active_mmu_pages.prev,
802 struct kvm_mmu_page, link);
803 kvm_mmu_zap_page(kvm, page);
804 n_used_mmu_pages--;
805 }
806 kvm->n_free_mmu_pages = 0;
807 }
808 else
809 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
810 - kvm->n_alloc_mmu_pages;
811
812 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
813}
814
f67a46f4 815static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
a436036b
AK
816{
817 unsigned index;
818 struct hlist_head *bucket;
819 struct kvm_mmu_page *page;
820 struct hlist_node *node, *n;
821 int r;
822
823 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
824 r = 0;
825 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
f67a46f4 826 bucket = &kvm->mmu_page_hash[index];
a436036b
AK
827 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
828 if (page->gfn == gfn && !page->role.metaphysical) {
697fe2e2
AK
829 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
830 page->role.word);
f67a46f4 831 kvm_mmu_zap_page(kvm, page);
a436036b
AK
832 r = 1;
833 }
834 return r;
cea0f0e7
AK
835}
836
f67a46f4 837static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
97a0a01e
AK
838{
839 struct kvm_mmu_page *page;
840
f67a46f4 841 while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
97a0a01e
AK
842 pgprintk("%s: zap %lx %x\n",
843 __FUNCTION__, gfn, page->role.word);
f67a46f4 844 kvm_mmu_zap_page(kvm, page);
97a0a01e
AK
845 }
846}
847
6aa8b732
AK
848static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
849{
850 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
851 struct kvm_mmu_page *page_head = page_header(__pa(pte));
852
853 __set_bit(slot, &page_head->slot_bitmap);
854}
855
4a4c9924 856hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
6aa8b732 857{
6aa8b732 858 struct page *page;
cea7bb21 859 hpa_t hpa;
6aa8b732
AK
860
861 ASSERT((gpa & HPA_ERR_MASK) == 0);
4a4c9924 862 page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
cea7bb21
IE
863 hpa = ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | (gpa & (PAGE_SIZE-1));
864 if (is_error_page(page))
865 return hpa | HPA_ERR_MASK;
866 return hpa;
6aa8b732
AK
867}
868
869hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
870{
871 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
872
873 if (gpa == UNMAPPED_GVA)
874 return UNMAPPED_GVA;
4a4c9924 875 return gpa_to_hpa(vcpu->kvm, gpa);
6aa8b732
AK
876}
877
039576c0
AK
878struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
879{
880 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
881
882 if (gpa == UNMAPPED_GVA)
883 return NULL;
4a4c9924 884 return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
039576c0
AK
885}
886
6aa8b732
AK
887static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
888{
889}
890
891static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
892{
893 int level = PT32E_ROOT_LEVEL;
894 hpa_t table_addr = vcpu->mmu.root_hpa;
895
896 for (; ; level--) {
897 u32 index = PT64_INDEX(v, level);
898 u64 *table;
cea0f0e7 899 u64 pte;
6aa8b732
AK
900
901 ASSERT(VALID_PAGE(table_addr));
902 table = __va(table_addr);
903
904 if (level == 1) {
9647c14c
IE
905 int was_rmapped;
906
cea0f0e7 907 pte = table[index];
9647c14c 908 was_rmapped = is_rmap_pte(pte);
c7addb90 909 if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
cea0f0e7 910 return 0;
6aa8b732
AK
911 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
912 page_header_update_slot(vcpu->kvm, table, v);
913 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
914 PT_USER_MASK;
9647c14c
IE
915 if (!was_rmapped)
916 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
8a7ae055
IE
917 else
918 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
6aa8b732
AK
919 return 0;
920 }
921
c7addb90 922 if (table[index] == shadow_trap_nonpresent_pte) {
25c0de2c 923 struct kvm_mmu_page *new_table;
cea0f0e7 924 gfn_t pseudo_gfn;
6aa8b732 925
cea0f0e7
AK
926 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
927 >> PAGE_SHIFT;
928 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
929 v, level - 1,
6bfccdc9 930 1, 3, &table[index]);
25c0de2c 931 if (!new_table) {
6aa8b732 932 pgprintk("nonpaging_map: ENOMEM\n");
8a7ae055 933 kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
6aa8b732
AK
934 return -ENOMEM;
935 }
936
47ad8e68 937 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
25c0de2c 938 | PT_WRITABLE_MASK | PT_USER_MASK;
6aa8b732
AK
939 }
940 table_addr = table[index] & PT64_BASE_ADDR_MASK;
941 }
942}
943
c7addb90
AK
944static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
945 struct kvm_mmu_page *sp)
946{
947 int i;
948
949 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
950 sp->spt[i] = shadow_trap_nonpresent_pte;
951}
952
17ac10ad
AK
953static void mmu_free_roots(struct kvm_vcpu *vcpu)
954{
955 int i;
3bb65a22 956 struct kvm_mmu_page *page;
17ac10ad 957
7b53aa56
AK
958 if (!VALID_PAGE(vcpu->mmu.root_hpa))
959 return;
17ac10ad
AK
960#ifdef CONFIG_X86_64
961 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
962 hpa_t root = vcpu->mmu.root_hpa;
963
3bb65a22
AK
964 page = page_header(root);
965 --page->root_count;
17ac10ad
AK
966 vcpu->mmu.root_hpa = INVALID_PAGE;
967 return;
968 }
969#endif
970 for (i = 0; i < 4; ++i) {
971 hpa_t root = vcpu->mmu.pae_root[i];
972
417726a3 973 if (root) {
417726a3
AK
974 root &= PT64_BASE_ADDR_MASK;
975 page = page_header(root);
976 --page->root_count;
977 }
17ac10ad
AK
978 vcpu->mmu.pae_root[i] = INVALID_PAGE;
979 }
980 vcpu->mmu.root_hpa = INVALID_PAGE;
981}
982
983static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
984{
985 int i;
cea0f0e7 986 gfn_t root_gfn;
3bb65a22
AK
987 struct kvm_mmu_page *page;
988
cea0f0e7 989 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
17ac10ad
AK
990
991#ifdef CONFIG_X86_64
992 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
993 hpa_t root = vcpu->mmu.root_hpa;
994
995 ASSERT(!VALID_PAGE(root));
68a99f6d 996 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
d28c6cfb 997 PT64_ROOT_LEVEL, 0, 0, NULL);
47ad8e68 998 root = __pa(page->spt);
3bb65a22 999 ++page->root_count;
17ac10ad
AK
1000 vcpu->mmu.root_hpa = root;
1001 return;
1002 }
1003#endif
1004 for (i = 0; i < 4; ++i) {
1005 hpa_t root = vcpu->mmu.pae_root[i];
1006
1007 ASSERT(!VALID_PAGE(root));
417726a3
AK
1008 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
1009 if (!is_present_pte(vcpu->pdptrs[i])) {
1010 vcpu->mmu.pae_root[i] = 0;
1011 continue;
1012 }
cea0f0e7 1013 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
417726a3 1014 } else if (vcpu->mmu.root_level == 0)
cea0f0e7 1015 root_gfn = 0;
68a99f6d 1016 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
cea0f0e7 1017 PT32_ROOT_LEVEL, !is_paging(vcpu),
d28c6cfb 1018 0, NULL);
47ad8e68 1019 root = __pa(page->spt);
3bb65a22 1020 ++page->root_count;
17ac10ad
AK
1021 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
1022 }
1023 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
1024}
1025
6aa8b732
AK
1026static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1027{
1028 return vaddr;
1029}
1030
1031static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1032 u32 error_code)
1033{
6aa8b732 1034 gpa_t addr = gva;
ebeace86 1035 hpa_t paddr;
e2dec939 1036 int r;
6aa8b732 1037
e2dec939
AK
1038 r = mmu_topup_memory_caches(vcpu);
1039 if (r)
1040 return r;
714b93da 1041
6aa8b732
AK
1042 ASSERT(vcpu);
1043 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1044
6aa8b732 1045
4a4c9924 1046 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
6aa8b732 1047
8a7ae055
IE
1048 if (is_error_hpa(paddr)) {
1049 kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
1050 >> PAGE_SHIFT));
ebeace86 1051 return 1;
8a7ae055 1052 }
6aa8b732 1053
ebeace86 1054 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
6aa8b732
AK
1055}
1056
6aa8b732
AK
1057static void nonpaging_free(struct kvm_vcpu *vcpu)
1058{
17ac10ad 1059 mmu_free_roots(vcpu);
6aa8b732
AK
1060}
1061
1062static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1063{
1064 struct kvm_mmu *context = &vcpu->mmu;
1065
1066 context->new_cr3 = nonpaging_new_cr3;
1067 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
1068 context->gva_to_gpa = nonpaging_gva_to_gpa;
1069 context->free = nonpaging_free;
c7addb90 1070 context->prefetch_page = nonpaging_prefetch_page;
cea0f0e7 1071 context->root_level = 0;
6aa8b732 1072 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1073 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1074 return 0;
1075}
1076
6aa8b732
AK
1077static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1078{
1165f5fe 1079 ++vcpu->stat.tlb_flush;
cbdd1bea 1080 kvm_x86_ops->tlb_flush(vcpu);
6aa8b732
AK
1081}
1082
1083static void paging_new_cr3(struct kvm_vcpu *vcpu)
1084{
374cbac0 1085 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
cea0f0e7 1086 mmu_free_roots(vcpu);
6aa8b732
AK
1087}
1088
6aa8b732
AK
1089static void inject_page_fault(struct kvm_vcpu *vcpu,
1090 u64 addr,
1091 u32 err_code)
1092{
cbdd1bea 1093 kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
6aa8b732
AK
1094}
1095
6aa8b732
AK
1096static void paging_free(struct kvm_vcpu *vcpu)
1097{
1098 nonpaging_free(vcpu);
1099}
1100
1101#define PTTYPE 64
1102#include "paging_tmpl.h"
1103#undef PTTYPE
1104
1105#define PTTYPE 32
1106#include "paging_tmpl.h"
1107#undef PTTYPE
1108
17ac10ad 1109static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732
AK
1110{
1111 struct kvm_mmu *context = &vcpu->mmu;
1112
1113 ASSERT(is_pae(vcpu));
1114 context->new_cr3 = paging_new_cr3;
1115 context->page_fault = paging64_page_fault;
6aa8b732 1116 context->gva_to_gpa = paging64_gva_to_gpa;
c7addb90 1117 context->prefetch_page = paging64_prefetch_page;
6aa8b732 1118 context->free = paging_free;
17ac10ad
AK
1119 context->root_level = level;
1120 context->shadow_root_level = level;
17c3ba9d 1121 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1122 return 0;
1123}
1124
17ac10ad
AK
1125static int paging64_init_context(struct kvm_vcpu *vcpu)
1126{
1127 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1128}
1129
6aa8b732
AK
1130static int paging32_init_context(struct kvm_vcpu *vcpu)
1131{
1132 struct kvm_mmu *context = &vcpu->mmu;
1133
1134 context->new_cr3 = paging_new_cr3;
1135 context->page_fault = paging32_page_fault;
6aa8b732
AK
1136 context->gva_to_gpa = paging32_gva_to_gpa;
1137 context->free = paging_free;
c7addb90 1138 context->prefetch_page = paging32_prefetch_page;
6aa8b732
AK
1139 context->root_level = PT32_ROOT_LEVEL;
1140 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1141 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1142 return 0;
1143}
1144
1145static int paging32E_init_context(struct kvm_vcpu *vcpu)
1146{
17ac10ad 1147 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
1148}
1149
1150static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1151{
1152 ASSERT(vcpu);
1153 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1154
1155 if (!is_paging(vcpu))
1156 return nonpaging_init_context(vcpu);
a9058ecd 1157 else if (is_long_mode(vcpu))
6aa8b732
AK
1158 return paging64_init_context(vcpu);
1159 else if (is_pae(vcpu))
1160 return paging32E_init_context(vcpu);
1161 else
1162 return paging32_init_context(vcpu);
1163}
1164
1165static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1166{
1167 ASSERT(vcpu);
1168 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1169 vcpu->mmu.free(vcpu);
1170 vcpu->mmu.root_hpa = INVALID_PAGE;
1171 }
1172}
1173
1174int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
1175{
1176 destroy_kvm_mmu(vcpu);
1177 return init_kvm_mmu(vcpu);
1178}
8668a3c4 1179EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
17c3ba9d
AK
1180
1181int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 1182{
714b93da
AK
1183 int r;
1184
11ec2804 1185 mutex_lock(&vcpu->kvm->lock);
e2dec939 1186 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
1187 if (r)
1188 goto out;
1189 mmu_alloc_roots(vcpu);
cbdd1bea 1190 kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
17c3ba9d 1191 kvm_mmu_flush_tlb(vcpu);
714b93da 1192out:
11ec2804 1193 mutex_unlock(&vcpu->kvm->lock);
714b93da 1194 return r;
6aa8b732 1195}
17c3ba9d
AK
1196EXPORT_SYMBOL_GPL(kvm_mmu_load);
1197
1198void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1199{
1200 mmu_free_roots(vcpu);
1201}
6aa8b732 1202
09072daf 1203static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
ac1b714e
AK
1204 struct kvm_mmu_page *page,
1205 u64 *spte)
1206{
1207 u64 pte;
1208 struct kvm_mmu_page *child;
1209
1210 pte = *spte;
c7addb90 1211 if (is_shadow_present_pte(pte)) {
ac1b714e 1212 if (page->role.level == PT_PAGE_TABLE_LEVEL)
290fc38d 1213 rmap_remove(vcpu->kvm, spte);
ac1b714e
AK
1214 else {
1215 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 1216 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
1217 }
1218 }
c7addb90 1219 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
d9e368d6 1220 kvm_flush_remote_tlbs(vcpu->kvm);
ac1b714e
AK
1221}
1222
0028425f
AK
1223static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1224 struct kvm_mmu_page *page,
1225 u64 *spte,
c7addb90
AK
1226 const void *new, int bytes,
1227 int offset_in_pte)
0028425f
AK
1228{
1229 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1230 return;
1231
1232 if (page->role.glevels == PT32_ROOT_LEVEL)
c7addb90
AK
1233 paging32_update_pte(vcpu, page, spte, new, bytes,
1234 offset_in_pte);
0028425f 1235 else
c7addb90
AK
1236 paging64_update_pte(vcpu, page, spte, new, bytes,
1237 offset_in_pte);
0028425f
AK
1238}
1239
12b7d28f
AK
1240static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1241{
1242 u64 *spte = vcpu->last_pte_updated;
1243
1244 return !!(spte && (*spte & PT_ACCESSED_MASK));
1245}
1246
09072daf 1247void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
fe551881 1248 const u8 *new, int bytes)
da4a00f0 1249{
9b7a0325
AK
1250 gfn_t gfn = gpa >> PAGE_SHIFT;
1251 struct kvm_mmu_page *page;
0e7bc4b9 1252 struct hlist_node *node, *n;
9b7a0325
AK
1253 struct hlist_head *bucket;
1254 unsigned index;
1255 u64 *spte;
9b7a0325 1256 unsigned offset = offset_in_page(gpa);
0e7bc4b9 1257 unsigned pte_size;
9b7a0325 1258 unsigned page_offset;
0e7bc4b9 1259 unsigned misaligned;
fce0657f 1260 unsigned quadrant;
9b7a0325 1261 int level;
86a5ba02 1262 int flooded = 0;
ac1b714e 1263 int npte;
9b7a0325 1264
da4a00f0 1265 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
c7addb90 1266 kvm_mmu_audit(vcpu, "pre pte write");
12b7d28f
AK
1267 if (gfn == vcpu->last_pt_write_gfn
1268 && !last_updated_pte_accessed(vcpu)) {
86a5ba02
AK
1269 ++vcpu->last_pt_write_count;
1270 if (vcpu->last_pt_write_count >= 3)
1271 flooded = 1;
1272 } else {
1273 vcpu->last_pt_write_gfn = gfn;
1274 vcpu->last_pt_write_count = 1;
12b7d28f 1275 vcpu->last_pte_updated = NULL;
86a5ba02 1276 }
9b7a0325
AK
1277 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1278 bucket = &vcpu->kvm->mmu_page_hash[index];
0e7bc4b9 1279 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
9b7a0325
AK
1280 if (page->gfn != gfn || page->role.metaphysical)
1281 continue;
0e7bc4b9
AK
1282 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1283 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 1284 misaligned |= bytes < 4;
86a5ba02 1285 if (misaligned || flooded) {
0e7bc4b9
AK
1286 /*
1287 * Misaligned accesses are too much trouble to fix
1288 * up; also, they usually indicate a page is not used
1289 * as a page table.
86a5ba02
AK
1290 *
1291 * If we're seeing too many writes to a page,
1292 * it may no longer be a page table, or we may be
1293 * forking, in which case it is better to unmap the
1294 * page.
0e7bc4b9
AK
1295 */
1296 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1297 gpa, bytes, page->role.word);
90cb0529 1298 kvm_mmu_zap_page(vcpu->kvm, page);
0e7bc4b9
AK
1299 continue;
1300 }
9b7a0325
AK
1301 page_offset = offset;
1302 level = page->role.level;
ac1b714e 1303 npte = 1;
9b7a0325 1304 if (page->role.glevels == PT32_ROOT_LEVEL) {
ac1b714e
AK
1305 page_offset <<= 1; /* 32->64 */
1306 /*
1307 * A 32-bit pde maps 4MB while the shadow pdes map
1308 * only 2MB. So we need to double the offset again
1309 * and zap two pdes instead of one.
1310 */
1311 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 1312 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
1313 page_offset <<= 1;
1314 npte = 2;
1315 }
fce0657f 1316 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 1317 page_offset &= ~PAGE_MASK;
fce0657f
AK
1318 if (quadrant != page->role.quadrant)
1319 continue;
9b7a0325 1320 }
47ad8e68 1321 spte = &page->spt[page_offset / sizeof(*spte)];
ac1b714e 1322 while (npte--) {
09072daf 1323 mmu_pte_write_zap_pte(vcpu, page, spte);
c7addb90
AK
1324 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
1325 page_offset & (pte_size - 1));
ac1b714e 1326 ++spte;
9b7a0325 1327 }
9b7a0325 1328 }
c7addb90 1329 kvm_mmu_audit(vcpu, "post pte write");
da4a00f0
AK
1330}
1331
a436036b
AK
1332int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1333{
1334 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1335
f67a46f4 1336 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
a436036b
AK
1337}
1338
22d95b12 1339void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86
AK
1340{
1341 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1342 struct kvm_mmu_page *page;
1343
1344 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1345 struct kvm_mmu_page, link);
90cb0529 1346 kvm_mmu_zap_page(vcpu->kvm, page);
ebeace86
AK
1347 }
1348}
ebeace86 1349
6aa8b732
AK
1350static void free_mmu_pages(struct kvm_vcpu *vcpu)
1351{
f51234c2 1352 struct kvm_mmu_page *page;
6aa8b732 1353
f51234c2
AK
1354 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1355 page = container_of(vcpu->kvm->active_mmu_pages.next,
1356 struct kvm_mmu_page, link);
90cb0529 1357 kvm_mmu_zap_page(vcpu->kvm, page);
f51234c2 1358 }
17ac10ad 1359 free_page((unsigned long)vcpu->mmu.pae_root);
6aa8b732
AK
1360}
1361
1362static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1363{
17ac10ad 1364 struct page *page;
6aa8b732
AK
1365 int i;
1366
1367 ASSERT(vcpu);
1368
82ce2c96
IE
1369 if (vcpu->kvm->n_requested_mmu_pages)
1370 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1371 else
1372 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
17ac10ad
AK
1373 /*
1374 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1375 * Therefore we need to allocate shadow page tables in the first
1376 * 4GB of memory, which happens to fit the DMA32 zone.
1377 */
1378 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1379 if (!page)
1380 goto error_1;
1381 vcpu->mmu.pae_root = page_address(page);
1382 for (i = 0; i < 4; ++i)
1383 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1384
6aa8b732
AK
1385 return 0;
1386
1387error_1:
1388 free_mmu_pages(vcpu);
1389 return -ENOMEM;
1390}
1391
8018c27b 1392int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 1393{
6aa8b732
AK
1394 ASSERT(vcpu);
1395 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
6aa8b732 1396
8018c27b
IM
1397 return alloc_mmu_pages(vcpu);
1398}
6aa8b732 1399
8018c27b
IM
1400int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1401{
1402 ASSERT(vcpu);
1403 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
2c264957 1404
8018c27b 1405 return init_kvm_mmu(vcpu);
6aa8b732
AK
1406}
1407
1408void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1409{
1410 ASSERT(vcpu);
1411
1412 destroy_kvm_mmu(vcpu);
1413 free_mmu_pages(vcpu);
714b93da 1414 mmu_free_memory_caches(vcpu);
6aa8b732
AK
1415}
1416
90cb0529 1417void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732
AK
1418{
1419 struct kvm_mmu_page *page;
1420
1421 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1422 int i;
1423 u64 *pt;
1424
1425 if (!test_bit(slot, &page->slot_bitmap))
1426 continue;
1427
47ad8e68 1428 pt = page->spt;
6aa8b732
AK
1429 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1430 /* avoid RMW */
9647c14c 1431 if (pt[i] & PT_WRITABLE_MASK)
6aa8b732 1432 pt[i] &= ~PT_WRITABLE_MASK;
6aa8b732
AK
1433 }
1434}
37a7d8b0 1435
90cb0529 1436void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 1437{
90cb0529 1438 struct kvm_mmu_page *page, *node;
e0fa826f 1439
90cb0529
AK
1440 list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
1441 kvm_mmu_zap_page(kvm, page);
e0fa826f 1442
90cb0529 1443 kvm_flush_remote_tlbs(kvm);
e0fa826f
DL
1444}
1445
b5a33a75
AK
1446void kvm_mmu_module_exit(void)
1447{
1448 if (pte_chain_cache)
1449 kmem_cache_destroy(pte_chain_cache);
1450 if (rmap_desc_cache)
1451 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
1452 if (mmu_page_header_cache)
1453 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
1454}
1455
1456int kvm_mmu_module_init(void)
1457{
1458 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1459 sizeof(struct kvm_pte_chain),
20c2df83 1460 0, 0, NULL);
b5a33a75
AK
1461 if (!pte_chain_cache)
1462 goto nomem;
1463 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1464 sizeof(struct kvm_rmap_desc),
20c2df83 1465 0, 0, NULL);
b5a33a75
AK
1466 if (!rmap_desc_cache)
1467 goto nomem;
1468
d3d25b04
AK
1469 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1470 sizeof(struct kvm_mmu_page),
20c2df83 1471 0, 0, NULL);
d3d25b04
AK
1472 if (!mmu_page_header_cache)
1473 goto nomem;
1474
b5a33a75
AK
1475 return 0;
1476
1477nomem:
1478 kvm_mmu_module_exit();
1479 return -ENOMEM;
1480}
1481
37a7d8b0
AK
1482#ifdef AUDIT
1483
1484static const char *audit_msg;
1485
1486static gva_t canonicalize(gva_t gva)
1487{
1488#ifdef CONFIG_X86_64
1489 gva = (long long)(gva << 16) >> 16;
1490#endif
1491 return gva;
1492}
1493
1494static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1495 gva_t va, int level)
1496{
1497 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1498 int i;
1499 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1500
1501 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1502 u64 ent = pt[i];
1503
c7addb90 1504 if (ent == shadow_trap_nonpresent_pte)
37a7d8b0
AK
1505 continue;
1506
1507 va = canonicalize(va);
c7addb90
AK
1508 if (level > 1) {
1509 if (ent == shadow_notrap_nonpresent_pte)
1510 printk(KERN_ERR "audit: (%s) nontrapping pte"
1511 " in nonleaf level: levels %d gva %lx"
1512 " level %d pte %llx\n", audit_msg,
1513 vcpu->mmu.root_level, va, level, ent);
1514
37a7d8b0 1515 audit_mappings_page(vcpu, ent, va, level - 1);
c7addb90 1516 } else {
37a7d8b0
AK
1517 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1518 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
8a7ae055 1519 struct page *page;
37a7d8b0 1520
c7addb90 1521 if (is_shadow_present_pte(ent)
37a7d8b0 1522 && (ent & PT64_BASE_ADDR_MASK) != hpa)
c7addb90
AK
1523 printk(KERN_ERR "xx audit error: (%s) levels %d"
1524 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
37a7d8b0 1525 audit_msg, vcpu->mmu.root_level,
d77c26fc
MD
1526 va, gpa, hpa, ent,
1527 is_shadow_present_pte(ent));
c7addb90
AK
1528 else if (ent == shadow_notrap_nonpresent_pte
1529 && !is_error_hpa(hpa))
1530 printk(KERN_ERR "audit: (%s) notrap shadow,"
1531 " valid guest gva %lx\n", audit_msg, va);
8a7ae055
IE
1532 page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
1533 >> PAGE_SHIFT);
1534 kvm_release_page(page);
c7addb90 1535
37a7d8b0
AK
1536 }
1537 }
1538}
1539
1540static void audit_mappings(struct kvm_vcpu *vcpu)
1541{
1ea252af 1542 unsigned i;
37a7d8b0
AK
1543
1544 if (vcpu->mmu.root_level == 4)
1545 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1546 else
1547 for (i = 0; i < 4; ++i)
1548 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1549 audit_mappings_page(vcpu,
1550 vcpu->mmu.pae_root[i],
1551 i << 30,
1552 2);
1553}
1554
1555static int count_rmaps(struct kvm_vcpu *vcpu)
1556{
1557 int nmaps = 0;
1558 int i, j, k;
1559
1560 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1561 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1562 struct kvm_rmap_desc *d;
1563
1564 for (j = 0; j < m->npages; ++j) {
290fc38d 1565 unsigned long *rmapp = &m->rmap[j];
37a7d8b0 1566
290fc38d 1567 if (!*rmapp)
37a7d8b0 1568 continue;
290fc38d 1569 if (!(*rmapp & 1)) {
37a7d8b0
AK
1570 ++nmaps;
1571 continue;
1572 }
290fc38d 1573 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
37a7d8b0
AK
1574 while (d) {
1575 for (k = 0; k < RMAP_EXT; ++k)
1576 if (d->shadow_ptes[k])
1577 ++nmaps;
1578 else
1579 break;
1580 d = d->more;
1581 }
1582 }
1583 }
1584 return nmaps;
1585}
1586
1587static int count_writable_mappings(struct kvm_vcpu *vcpu)
1588{
1589 int nmaps = 0;
1590 struct kvm_mmu_page *page;
1591 int i;
1592
1593 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
47ad8e68 1594 u64 *pt = page->spt;
37a7d8b0
AK
1595
1596 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1597 continue;
1598
1599 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1600 u64 ent = pt[i];
1601
1602 if (!(ent & PT_PRESENT_MASK))
1603 continue;
1604 if (!(ent & PT_WRITABLE_MASK))
1605 continue;
1606 ++nmaps;
1607 }
1608 }
1609 return nmaps;
1610}
1611
1612static void audit_rmap(struct kvm_vcpu *vcpu)
1613{
1614 int n_rmap = count_rmaps(vcpu);
1615 int n_actual = count_writable_mappings(vcpu);
1616
1617 if (n_rmap != n_actual)
1618 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1619 __FUNCTION__, audit_msg, n_rmap, n_actual);
1620}
1621
1622static void audit_write_protection(struct kvm_vcpu *vcpu)
1623{
1624 struct kvm_mmu_page *page;
290fc38d
IE
1625 struct kvm_memory_slot *slot;
1626 unsigned long *rmapp;
1627 gfn_t gfn;
37a7d8b0
AK
1628
1629 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
37a7d8b0
AK
1630 if (page->role.metaphysical)
1631 continue;
1632
290fc38d
IE
1633 slot = gfn_to_memslot(vcpu->kvm, page->gfn);
1634 gfn = unalias_gfn(vcpu->kvm, page->gfn);
1635 rmapp = &slot->rmap[gfn - slot->base_gfn];
1636 if (*rmapp)
37a7d8b0
AK
1637 printk(KERN_ERR "%s: (%s) shadow page has writable"
1638 " mappings: gfn %lx role %x\n",
1639 __FUNCTION__, audit_msg, page->gfn,
1640 page->role.word);
1641 }
1642}
1643
1644static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1645{
1646 int olddbg = dbg;
1647
1648 dbg = 0;
1649 audit_msg = msg;
1650 audit_rmap(vcpu);
1651 audit_write_protection(vcpu);
1652 audit_mappings(vcpu);
1653 dbg = olddbg;
1654}
1655
1656#endif
This page took 0.236777 seconds and 5 git commands to generate.