ARM: KVM: use kvm_kernel_vfp_t as an abstract type for VFP containers
[deliverable/linux.git] / arch / arm / kvm / mmu.c
CommitLineData
749cf76c
CD
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
342cd0ab
CD
18
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
45e96ea6 22#include <trace/events/kvm.h>
342cd0ab
CD
23#include <asm/idmap.h>
24#include <asm/pgalloc.h>
94f8e641 25#include <asm/cacheflush.h>
342cd0ab
CD
26#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
45e96ea6 28#include <asm/kvm_mmio.h>
d5d8184d 29#include <asm/kvm_asm.h>
94f8e641 30#include <asm/kvm_emulate.h>
d5d8184d
CD
31
32#include "trace.h"
342cd0ab
CD
33
34extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35
36static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
37
d5d8184d
CD
38static void kvm_tlb_flush_vmid(struct kvm *kvm)
39{
40 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
41}
42
d5d8184d
CD
43static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
44 int min, int max)
45{
46 void *page;
47
48 BUG_ON(max > KVM_NR_MEM_OBJS);
49 if (cache->nobjs >= min)
50 return 0;
51 while (cache->nobjs < max) {
52 page = (void *)__get_free_page(PGALLOC_GFP);
53 if (!page)
54 return -ENOMEM;
55 cache->objects[cache->nobjs++] = page;
56 }
57 return 0;
58}
59
60static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
61{
62 while (mc->nobjs)
63 free_page((unsigned long)mc->objects[--mc->nobjs]);
64}
65
66static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
67{
68 void *p;
69
70 BUG_ON(!mc || !mc->nobjs);
71 p = mc->objects[--mc->nobjs];
72 return p;
73}
74
342cd0ab
CD
75static void free_ptes(pmd_t *pmd, unsigned long addr)
76{
77 pte_t *pte;
78 unsigned int i;
79
80 for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
81 if (!pmd_none(*pmd) && pmd_table(*pmd)) {
82 pte = pte_offset_kernel(pmd, addr);
83 pte_free_kernel(NULL, pte);
84 }
85 pmd++;
86 }
87}
88
89/**
90 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
91 *
92 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
93 * only mappings in the kernel memory area, which is above PAGE_OFFSET.
94 */
95void free_hyp_pmds(void)
96{
97 pgd_t *pgd;
98 pud_t *pud;
99 pmd_t *pmd;
100 unsigned long addr;
101
102 mutex_lock(&kvm_hyp_pgd_mutex);
103 for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
104 pgd = hyp_pgd + pgd_index(addr);
105 pud = pud_offset(pgd, addr);
106
107 if (pud_none(*pud))
108 continue;
109 BUG_ON(pud_bad(*pud));
110
111 pmd = pmd_offset(pud, addr);
112 free_ptes(pmd, addr);
113 pmd_free(NULL, pmd);
114 pud_clear(pud);
115 }
116 mutex_unlock(&kvm_hyp_pgd_mutex);
117}
118
119static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
120 unsigned long end)
121{
122 pte_t *pte;
123 unsigned long addr;
124 struct page *page;
125
126 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
127 pte = pte_offset_kernel(pmd, addr);
128 BUG_ON(!virt_addr_valid(addr));
129 page = virt_to_page(addr);
130 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
131 }
132}
133
134static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
135 unsigned long end,
136 unsigned long *pfn_base)
137{
138 pte_t *pte;
139 unsigned long addr;
140
141 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
142 pte = pte_offset_kernel(pmd, addr);
143 BUG_ON(pfn_valid(*pfn_base));
144 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
145 (*pfn_base)++;
146 }
147}
148
149static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
150 unsigned long end, unsigned long *pfn_base)
151{
152 pmd_t *pmd;
153 pte_t *pte;
154 unsigned long addr, next;
155
156 for (addr = start; addr < end; addr = next) {
157 pmd = pmd_offset(pud, addr);
158
159 BUG_ON(pmd_sect(*pmd));
160
161 if (pmd_none(*pmd)) {
162 pte = pte_alloc_one_kernel(NULL, addr);
163 if (!pte) {
164 kvm_err("Cannot allocate Hyp pte\n");
165 return -ENOMEM;
166 }
167 pmd_populate_kernel(NULL, pmd, pte);
168 }
169
170 next = pmd_addr_end(addr, end);
171
172 /*
173 * If pfn_base is NULL, we map kernel pages into HYP with the
174 * virtual address. Otherwise, this is considered an I/O
175 * mapping and we map the physical region starting at
176 * *pfn_base to [start, end[.
177 */
178 if (!pfn_base)
179 create_hyp_pte_mappings(pmd, addr, next);
180 else
181 create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
182 }
183
184 return 0;
185}
186
187static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
188{
189 unsigned long start = (unsigned long)from;
190 unsigned long end = (unsigned long)to;
191 pgd_t *pgd;
192 pud_t *pud;
193 pmd_t *pmd;
194 unsigned long addr, next;
195 int err = 0;
196
197 BUG_ON(start > end);
198 if (start < PAGE_OFFSET)
199 return -EINVAL;
200
201 mutex_lock(&kvm_hyp_pgd_mutex);
202 for (addr = start; addr < end; addr = next) {
203 pgd = hyp_pgd + pgd_index(addr);
204 pud = pud_offset(pgd, addr);
205
206 if (pud_none_or_clear_bad(pud)) {
207 pmd = pmd_alloc_one(NULL, addr);
208 if (!pmd) {
209 kvm_err("Cannot allocate Hyp pmd\n");
210 err = -ENOMEM;
211 goto out;
212 }
213 pud_populate(NULL, pud, pmd);
214 }
215
216 next = pgd_addr_end(addr, end);
217 err = create_hyp_pmd_mappings(pud, addr, next, pfn_base);
218 if (err)
219 goto out;
220 }
221out:
222 mutex_unlock(&kvm_hyp_pgd_mutex);
223 return err;
224}
225
226/**
227 * create_hyp_mappings - map a kernel virtual address range in Hyp mode
228 * @from: The virtual kernel start address of the range
229 * @to: The virtual kernel end address of the range (exclusive)
230 *
231 * The same virtual address as the kernel virtual address is also used in
232 * Hyp-mode mapping to the same underlying physical pages.
233 *
234 * Note: Wrapping around zero in the "to" address is not supported.
235 */
236int create_hyp_mappings(void *from, void *to)
237{
238 return __create_hyp_mappings(from, to, NULL);
239}
240
241/**
242 * create_hyp_io_mappings - map a physical IO range in Hyp mode
243 * @from: The virtual HYP start address of the range
244 * @to: The virtual HYP end address of the range (exclusive)
245 * @addr: The physical start address which gets mapped
246 */
247int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
248{
249 unsigned long pfn = __phys_to_pfn(addr);
250 return __create_hyp_mappings(from, to, &pfn);
251}
252
d5d8184d
CD
253/**
254 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
255 * @kvm: The KVM struct pointer for the VM.
256 *
257 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
258 * support either full 40-bit input addresses or limited to 32-bit input
259 * addresses). Clears the allocated pages.
260 *
261 * Note we don't need locking here as this is only called when the VM is
262 * created, which can only be done once.
263 */
264int kvm_alloc_stage2_pgd(struct kvm *kvm)
265{
266 pgd_t *pgd;
267
268 if (kvm->arch.pgd != NULL) {
269 kvm_err("kvm_arch already initialized?\n");
270 return -EINVAL;
271 }
272
273 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
274 if (!pgd)
275 return -ENOMEM;
276
277 /* stage-2 pgd must be aligned to its size */
278 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
279
280 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
c62ee2b2 281 kvm_clean_pgd(pgd);
d5d8184d
CD
282 kvm->arch.pgd = pgd;
283
284 return 0;
285}
286
287static void clear_pud_entry(pud_t *pud)
288{
289 pmd_t *pmd_table = pmd_offset(pud, 0);
290 pud_clear(pud);
291 pmd_free(NULL, pmd_table);
292 put_page(virt_to_page(pud));
293}
294
295static void clear_pmd_entry(pmd_t *pmd)
296{
297 pte_t *pte_table = pte_offset_kernel(pmd, 0);
298 pmd_clear(pmd);
299 pte_free_kernel(NULL, pte_table);
300 put_page(virt_to_page(pmd));
301}
302
303static bool pmd_empty(pmd_t *pmd)
304{
305 struct page *pmd_page = virt_to_page(pmd);
306 return page_count(pmd_page) == 1;
307}
308
309static void clear_pte_entry(pte_t *pte)
310{
311 if (pte_present(*pte)) {
312 kvm_set_pte(pte, __pte(0));
313 put_page(virt_to_page(pte));
314 }
315}
316
317static bool pte_empty(pte_t *pte)
318{
319 struct page *pte_page = virt_to_page(pte);
320 return page_count(pte_page) == 1;
321}
322
323/**
324 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
325 * @kvm: The VM pointer
326 * @start: The intermediate physical base address of the range to unmap
327 * @size: The size of the area to unmap
328 *
329 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
330 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
331 * destroying the VM), otherwise another faulting VCPU may come in and mess
332 * with things behind our backs.
333 */
334static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
335{
336 pgd_t *pgd;
337 pud_t *pud;
338 pmd_t *pmd;
339 pte_t *pte;
340 phys_addr_t addr = start, end = start + size;
341 u64 range;
342
343 while (addr < end) {
344 pgd = kvm->arch.pgd + pgd_index(addr);
345 pud = pud_offset(pgd, addr);
346 if (pud_none(*pud)) {
347 addr += PUD_SIZE;
348 continue;
349 }
350
351 pmd = pmd_offset(pud, addr);
352 if (pmd_none(*pmd)) {
353 addr += PMD_SIZE;
354 continue;
355 }
356
357 pte = pte_offset_kernel(pmd, addr);
358 clear_pte_entry(pte);
359 range = PAGE_SIZE;
360
361 /* If we emptied the pte, walk back up the ladder */
362 if (pte_empty(pte)) {
363 clear_pmd_entry(pmd);
364 range = PMD_SIZE;
365 if (pmd_empty(pmd)) {
366 clear_pud_entry(pud);
367 range = PUD_SIZE;
368 }
369 }
370
371 addr += range;
372 }
373}
374
375/**
376 * kvm_free_stage2_pgd - free all stage-2 tables
377 * @kvm: The KVM struct pointer for the VM.
378 *
379 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
380 * underlying level-2 and level-3 tables before freeing the actual level-1 table
381 * and setting the struct pointer to NULL.
382 *
383 * Note we don't need locking here as this is only called when the VM is
384 * destroyed, which can only be done once.
385 */
386void kvm_free_stage2_pgd(struct kvm *kvm)
387{
388 if (kvm->arch.pgd == NULL)
389 return;
390
391 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
392 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
393 kvm->arch.pgd = NULL;
394}
395
396
397static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
398 phys_addr_t addr, const pte_t *new_pte, bool iomap)
399{
400 pgd_t *pgd;
401 pud_t *pud;
402 pmd_t *pmd;
403 pte_t *pte, old_pte;
404
405 /* Create 2nd stage page table mapping - Level 1 */
406 pgd = kvm->arch.pgd + pgd_index(addr);
407 pud = pud_offset(pgd, addr);
408 if (pud_none(*pud)) {
409 if (!cache)
410 return 0; /* ignore calls from kvm_set_spte_hva */
411 pmd = mmu_memory_cache_alloc(cache);
412 pud_populate(NULL, pud, pmd);
d5d8184d 413 get_page(virt_to_page(pud));
c62ee2b2
MZ
414 }
415
416 pmd = pmd_offset(pud, addr);
d5d8184d
CD
417
418 /* Create 2nd stage page table mapping - Level 2 */
419 if (pmd_none(*pmd)) {
420 if (!cache)
421 return 0; /* ignore calls from kvm_set_spte_hva */
422 pte = mmu_memory_cache_alloc(cache);
c62ee2b2 423 kvm_clean_pte(pte);
d5d8184d 424 pmd_populate_kernel(NULL, pmd, pte);
d5d8184d 425 get_page(virt_to_page(pmd));
c62ee2b2
MZ
426 }
427
428 pte = pte_offset_kernel(pmd, addr);
d5d8184d
CD
429
430 if (iomap && pte_present(*pte))
431 return -EFAULT;
432
433 /* Create 2nd stage page table mapping - Level 3 */
434 old_pte = *pte;
435 kvm_set_pte(pte, *new_pte);
436 if (pte_present(old_pte))
437 kvm_tlb_flush_vmid(kvm);
438 else
439 get_page(virt_to_page(pte));
440
441 return 0;
442}
443
444/**
445 * kvm_phys_addr_ioremap - map a device range to guest IPA
446 *
447 * @kvm: The KVM pointer
448 * @guest_ipa: The IPA at which to insert the mapping
449 * @pa: The physical address of the device
450 * @size: The size of the mapping
451 */
452int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
453 phys_addr_t pa, unsigned long size)
454{
455 phys_addr_t addr, end;
456 int ret = 0;
457 unsigned long pfn;
458 struct kvm_mmu_memory_cache cache = { 0, };
459
460 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
461 pfn = __phys_to_pfn(pa);
462
463 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
c62ee2b2
MZ
464 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
465 kvm_set_s2pte_writable(&pte);
d5d8184d
CD
466
467 ret = mmu_topup_memory_cache(&cache, 2, 2);
468 if (ret)
469 goto out;
470 spin_lock(&kvm->mmu_lock);
471 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
472 spin_unlock(&kvm->mmu_lock);
473 if (ret)
474 goto out;
475
476 pfn++;
477 }
478
479out:
480 mmu_free_memory_cache(&cache);
481 return ret;
482}
483
94f8e641
CD
484static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
485 gfn_t gfn, struct kvm_memory_slot *memslot,
486 unsigned long fault_status)
487{
488 pte_t new_pte;
489 pfn_t pfn;
490 int ret;
491 bool write_fault, writable;
492 unsigned long mmu_seq;
493 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
494
7393b599 495 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
94f8e641
CD
496 if (fault_status == FSC_PERM && !write_fault) {
497 kvm_err("Unexpected L2 read permission error\n");
498 return -EFAULT;
499 }
500
501 /* We need minimum second+third level pages */
502 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
503 if (ret)
504 return ret;
505
506 mmu_seq = vcpu->kvm->mmu_notifier_seq;
507 /*
508 * Ensure the read of mmu_notifier_seq happens before we call
509 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
510 * the page we just got a reference to gets unmapped before we have a
511 * chance to grab the mmu_lock, which ensure that if the page gets
512 * unmapped afterwards, the call to kvm_unmap_hva will take it away
513 * from us again properly. This smp_rmb() interacts with the smp_wmb()
514 * in kvm_mmu_notifier_invalidate_<page|range_end>.
515 */
516 smp_rmb();
517
518 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
519 if (is_error_pfn(pfn))
520 return -EFAULT;
521
522 new_pte = pfn_pte(pfn, PAGE_S2);
523 coherent_icache_guest_page(vcpu->kvm, gfn);
524
525 spin_lock(&vcpu->kvm->mmu_lock);
526 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
527 goto out_unlock;
528 if (writable) {
c62ee2b2 529 kvm_set_s2pte_writable(&new_pte);
94f8e641
CD
530 kvm_set_pfn_dirty(pfn);
531 }
532 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
533
534out_unlock:
535 spin_unlock(&vcpu->kvm->mmu_lock);
536 kvm_release_pfn_clean(pfn);
537 return 0;
538}
539
540/**
541 * kvm_handle_guest_abort - handles all 2nd stage aborts
542 * @vcpu: the VCPU pointer
543 * @run: the kvm_run structure
544 *
545 * Any abort that gets to the host is almost guaranteed to be caused by a
546 * missing second stage translation table entry, which can mean that either the
547 * guest simply needs more memory and we must allocate an appropriate page or it
548 * can mean that the guest tried to access I/O memory, which is emulated by user
549 * space. The distinction is based on the IPA causing the fault and whether this
550 * memory region has been registered as standard RAM by user space.
551 */
342cd0ab
CD
552int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
553{
94f8e641
CD
554 unsigned long fault_status;
555 phys_addr_t fault_ipa;
556 struct kvm_memory_slot *memslot;
557 bool is_iabt;
558 gfn_t gfn;
559 int ret, idx;
560
52d1dba9 561 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
7393b599 562 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
94f8e641 563
7393b599
MZ
564 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
565 kvm_vcpu_get_hfar(vcpu), fault_ipa);
94f8e641
CD
566
567 /* Check the stage-2 fault is trans. fault or write fault */
1cc287dd 568 fault_status = kvm_vcpu_trap_get_fault(vcpu);
94f8e641 569 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
52d1dba9
MZ
570 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
571 kvm_vcpu_trap_get_class(vcpu), fault_status);
94f8e641
CD
572 return -EFAULT;
573 }
574
575 idx = srcu_read_lock(&vcpu->kvm->srcu);
576
577 gfn = fault_ipa >> PAGE_SHIFT;
578 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
579 if (is_iabt) {
580 /* Prefetch Abort on I/O address */
7393b599 581 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
94f8e641
CD
582 ret = 1;
583 goto out_unlock;
584 }
585
586 if (fault_status != FSC_FAULT) {
587 kvm_err("Unsupported fault status on io memory: %#lx\n",
588 fault_status);
589 ret = -EFAULT;
590 goto out_unlock;
591 }
592
45e96ea6 593 /* Adjust page offset */
7393b599 594 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ~PAGE_MASK;
45e96ea6 595 ret = io_mem_abort(vcpu, run, fault_ipa);
94f8e641
CD
596 goto out_unlock;
597 }
598
599 memslot = gfn_to_memslot(vcpu->kvm, gfn);
94f8e641
CD
600
601 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
602 if (ret == 0)
603 ret = 1;
604out_unlock:
605 srcu_read_unlock(&vcpu->kvm->srcu, idx);
606 return ret;
342cd0ab
CD
607}
608
d5d8184d
CD
609static void handle_hva_to_gpa(struct kvm *kvm,
610 unsigned long start,
611 unsigned long end,
612 void (*handler)(struct kvm *kvm,
613 gpa_t gpa, void *data),
614 void *data)
615{
616 struct kvm_memslots *slots;
617 struct kvm_memory_slot *memslot;
618
619 slots = kvm_memslots(kvm);
620
621 /* we only care about the pages that the guest sees */
622 kvm_for_each_memslot(memslot, slots) {
623 unsigned long hva_start, hva_end;
624 gfn_t gfn, gfn_end;
625
626 hva_start = max(start, memslot->userspace_addr);
627 hva_end = min(end, memslot->userspace_addr +
628 (memslot->npages << PAGE_SHIFT));
629 if (hva_start >= hva_end)
630 continue;
631
632 /*
633 * {gfn(page) | page intersects with [hva_start, hva_end)} =
634 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
635 */
636 gfn = hva_to_gfn_memslot(hva_start, memslot);
637 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
638
639 for (; gfn < gfn_end; ++gfn) {
640 gpa_t gpa = gfn << PAGE_SHIFT;
641 handler(kvm, gpa, data);
642 }
643 }
644}
645
646static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
647{
648 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
649 kvm_tlb_flush_vmid(kvm);
650}
651
652int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
653{
654 unsigned long end = hva + PAGE_SIZE;
655
656 if (!kvm->arch.pgd)
657 return 0;
658
659 trace_kvm_unmap_hva(hva);
660 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
661 return 0;
662}
663
664int kvm_unmap_hva_range(struct kvm *kvm,
665 unsigned long start, unsigned long end)
666{
667 if (!kvm->arch.pgd)
668 return 0;
669
670 trace_kvm_unmap_hva_range(start, end);
671 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
672 return 0;
673}
674
675static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
676{
677 pte_t *pte = (pte_t *)data;
678
679 stage2_set_pte(kvm, NULL, gpa, pte, false);
680}
681
682
683void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
684{
685 unsigned long end = hva + PAGE_SIZE;
686 pte_t stage2_pte;
687
688 if (!kvm->arch.pgd)
689 return;
690
691 trace_kvm_set_spte_hva(hva);
692 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
693 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
694}
695
696void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
697{
698 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
699}
700
342cd0ab
CD
701phys_addr_t kvm_mmu_get_httbr(void)
702{
703 VM_BUG_ON(!virt_addr_valid(hyp_pgd));
704 return virt_to_phys(hyp_pgd);
705}
706
707int kvm_mmu_init(void)
708{
d5d8184d
CD
709 if (!hyp_pgd) {
710 kvm_err("Hyp mode PGD not allocated\n");
711 return -ENOMEM;
712 }
713
714 return 0;
342cd0ab
CD
715}
716
717/**
718 * kvm_clear_idmap - remove all idmaps from the hyp pgd
719 *
720 * Free the underlying pmds for all pgds in range and clear the pgds (but
721 * don't free them) afterwards.
722 */
723void kvm_clear_hyp_idmap(void)
724{
725 unsigned long addr, end;
726 unsigned long next;
727 pgd_t *pgd = hyp_pgd;
728 pud_t *pud;
729 pmd_t *pmd;
730
731 addr = virt_to_phys(__hyp_idmap_text_start);
732 end = virt_to_phys(__hyp_idmap_text_end);
733
734 pgd += pgd_index(addr);
735 do {
736 next = pgd_addr_end(addr, end);
737 if (pgd_none_or_clear_bad(pgd))
738 continue;
739 pud = pud_offset(pgd, addr);
740 pmd = pmd_offset(pud, addr);
741
742 pud_clear(pud);
c62ee2b2 743 kvm_clean_pmd_entry(pmd);
342cd0ab
CD
744 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
745 } while (pgd++, addr = next, addr < end);
746}
This page took 0.094269 seconds and 5 git commands to generate.