2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
22 #include <linux/hugetlb.h>
23 #include <trace/events/kvm.h>
24 #include <asm/pgalloc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_mmio.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_emulate.h>
34 extern char __hyp_idmap_text_start
[], __hyp_idmap_text_end
[];
36 static pgd_t
*boot_hyp_pgd
;
37 static pgd_t
*hyp_pgd
;
38 static DEFINE_MUTEX(kvm_hyp_pgd_mutex
);
40 static void *init_bounce_page
;
41 static unsigned long hyp_idmap_start
;
42 static unsigned long hyp_idmap_end
;
43 static phys_addr_t hyp_idmap_vector
;
45 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
47 #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
49 static void kvm_tlb_flush_vmid_ipa(struct kvm
*kvm
, phys_addr_t ipa
)
52 * This function also gets called when dealing with HYP page
53 * tables. As HYP doesn't have an associated struct kvm (and
54 * the HYP page tables are fairly static), we don't do
58 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa
, kvm
, ipa
);
62 * D-Cache management functions. They take the page table entries by
63 * value, as they are flushing the cache using the kernel mapping (or
66 static void kvm_flush_dcache_pte(pte_t pte
)
68 __kvm_flush_dcache_pte(pte
);
71 static void kvm_flush_dcache_pmd(pmd_t pmd
)
73 __kvm_flush_dcache_pmd(pmd
);
76 static void kvm_flush_dcache_pud(pud_t pud
)
78 __kvm_flush_dcache_pud(pud
);
81 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache
*cache
,
86 BUG_ON(max
> KVM_NR_MEM_OBJS
);
87 if (cache
->nobjs
>= min
)
89 while (cache
->nobjs
< max
) {
90 page
= (void *)__get_free_page(PGALLOC_GFP
);
93 cache
->objects
[cache
->nobjs
++] = page
;
98 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache
*mc
)
101 free_page((unsigned long)mc
->objects
[--mc
->nobjs
]);
104 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache
*mc
)
108 BUG_ON(!mc
|| !mc
->nobjs
);
109 p
= mc
->objects
[--mc
->nobjs
];
113 static void clear_pgd_entry(struct kvm
*kvm
, pgd_t
*pgd
, phys_addr_t addr
)
115 pud_t
*pud_table __maybe_unused
= pud_offset(pgd
, 0);
117 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
118 pud_free(NULL
, pud_table
);
119 put_page(virt_to_page(pgd
));
122 static void clear_pud_entry(struct kvm
*kvm
, pud_t
*pud
, phys_addr_t addr
)
124 pmd_t
*pmd_table
= pmd_offset(pud
, 0);
125 VM_BUG_ON(pud_huge(*pud
));
127 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
128 pmd_free(NULL
, pmd_table
);
129 put_page(virt_to_page(pud
));
132 static void clear_pmd_entry(struct kvm
*kvm
, pmd_t
*pmd
, phys_addr_t addr
)
134 pte_t
*pte_table
= pte_offset_kernel(pmd
, 0);
135 VM_BUG_ON(kvm_pmd_huge(*pmd
));
137 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
138 pte_free_kernel(NULL
, pte_table
);
139 put_page(virt_to_page(pmd
));
143 * Unmapping vs dcache management:
145 * If a guest maps certain memory pages as uncached, all writes will
146 * bypass the data cache and go directly to RAM. However, the CPUs
147 * can still speculate reads (not writes) and fill cache lines with
150 * Those cache lines will be *clean* cache lines though, so a
151 * clean+invalidate operation is equivalent to an invalidate
152 * operation, because no cache lines are marked dirty.
154 * Those clean cache lines could be filled prior to an uncached write
155 * by the guest, and the cache coherent IO subsystem would therefore
156 * end up writing old data to disk.
158 * This is why right after unmapping a page/section and invalidating
159 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
160 * the IO subsystem will never hit in the cache.
162 static void unmap_ptes(struct kvm
*kvm
, pmd_t
*pmd
,
163 phys_addr_t addr
, phys_addr_t end
)
165 phys_addr_t start_addr
= addr
;
166 pte_t
*pte
, *start_pte
;
168 start_pte
= pte
= pte_offset_kernel(pmd
, addr
);
170 if (!pte_none(*pte
)) {
171 pte_t old_pte
= *pte
;
173 kvm_set_pte(pte
, __pte(0));
174 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
176 /* No need to invalidate the cache for device mappings */
177 if ((pte_val(old_pte
) & PAGE_S2_DEVICE
) != PAGE_S2_DEVICE
)
178 kvm_flush_dcache_pte(old_pte
);
180 put_page(virt_to_page(pte
));
182 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
184 if (kvm_pte_table_empty(kvm
, start_pte
))
185 clear_pmd_entry(kvm
, pmd
, start_addr
);
188 static void unmap_pmds(struct kvm
*kvm
, pud_t
*pud
,
189 phys_addr_t addr
, phys_addr_t end
)
191 phys_addr_t next
, start_addr
= addr
;
192 pmd_t
*pmd
, *start_pmd
;
194 start_pmd
= pmd
= pmd_offset(pud
, addr
);
196 next
= kvm_pmd_addr_end(addr
, end
);
197 if (!pmd_none(*pmd
)) {
198 if (kvm_pmd_huge(*pmd
)) {
199 pmd_t old_pmd
= *pmd
;
202 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
204 kvm_flush_dcache_pmd(old_pmd
);
206 put_page(virt_to_page(pmd
));
208 unmap_ptes(kvm
, pmd
, addr
, next
);
211 } while (pmd
++, addr
= next
, addr
!= end
);
213 if (kvm_pmd_table_empty(kvm
, start_pmd
))
214 clear_pud_entry(kvm
, pud
, start_addr
);
217 static void unmap_puds(struct kvm
*kvm
, pgd_t
*pgd
,
218 phys_addr_t addr
, phys_addr_t end
)
220 phys_addr_t next
, start_addr
= addr
;
221 pud_t
*pud
, *start_pud
;
223 start_pud
= pud
= pud_offset(pgd
, addr
);
225 next
= kvm_pud_addr_end(addr
, end
);
226 if (!pud_none(*pud
)) {
227 if (pud_huge(*pud
)) {
228 pud_t old_pud
= *pud
;
231 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
233 kvm_flush_dcache_pud(old_pud
);
235 put_page(virt_to_page(pud
));
237 unmap_pmds(kvm
, pud
, addr
, next
);
240 } while (pud
++, addr
= next
, addr
!= end
);
242 if (kvm_pud_table_empty(kvm
, start_pud
))
243 clear_pgd_entry(kvm
, pgd
, start_addr
);
247 static void unmap_range(struct kvm
*kvm
, pgd_t
*pgdp
,
248 phys_addr_t start
, u64 size
)
251 phys_addr_t addr
= start
, end
= start
+ size
;
254 pgd
= pgdp
+ pgd_index(addr
);
256 next
= kvm_pgd_addr_end(addr
, end
);
258 unmap_puds(kvm
, pgd
, addr
, next
);
259 } while (pgd
++, addr
= next
, addr
!= end
);
262 static void stage2_flush_ptes(struct kvm
*kvm
, pmd_t
*pmd
,
263 phys_addr_t addr
, phys_addr_t end
)
267 pte
= pte_offset_kernel(pmd
, addr
);
269 if (!pte_none(*pte
) &&
270 (pte_val(*pte
) & PAGE_S2_DEVICE
) != PAGE_S2_DEVICE
)
271 kvm_flush_dcache_pte(*pte
);
272 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
275 static void stage2_flush_pmds(struct kvm
*kvm
, pud_t
*pud
,
276 phys_addr_t addr
, phys_addr_t end
)
281 pmd
= pmd_offset(pud
, addr
);
283 next
= kvm_pmd_addr_end(addr
, end
);
284 if (!pmd_none(*pmd
)) {
285 if (kvm_pmd_huge(*pmd
))
286 kvm_flush_dcache_pmd(*pmd
);
288 stage2_flush_ptes(kvm
, pmd
, addr
, next
);
290 } while (pmd
++, addr
= next
, addr
!= end
);
293 static void stage2_flush_puds(struct kvm
*kvm
, pgd_t
*pgd
,
294 phys_addr_t addr
, phys_addr_t end
)
299 pud
= pud_offset(pgd
, addr
);
301 next
= kvm_pud_addr_end(addr
, end
);
302 if (!pud_none(*pud
)) {
304 kvm_flush_dcache_pud(*pud
);
306 stage2_flush_pmds(kvm
, pud
, addr
, next
);
308 } while (pud
++, addr
= next
, addr
!= end
);
311 static void stage2_flush_memslot(struct kvm
*kvm
,
312 struct kvm_memory_slot
*memslot
)
314 phys_addr_t addr
= memslot
->base_gfn
<< PAGE_SHIFT
;
315 phys_addr_t end
= addr
+ PAGE_SIZE
* memslot
->npages
;
319 pgd
= kvm
->arch
.pgd
+ pgd_index(addr
);
321 next
= kvm_pgd_addr_end(addr
, end
);
322 stage2_flush_puds(kvm
, pgd
, addr
, next
);
323 } while (pgd
++, addr
= next
, addr
!= end
);
327 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
328 * @kvm: The struct kvm pointer
330 * Go through the stage 2 page tables and invalidate any cache lines
331 * backing memory already mapped to the VM.
333 static void stage2_flush_vm(struct kvm
*kvm
)
335 struct kvm_memslots
*slots
;
336 struct kvm_memory_slot
*memslot
;
339 idx
= srcu_read_lock(&kvm
->srcu
);
340 spin_lock(&kvm
->mmu_lock
);
342 slots
= kvm_memslots(kvm
);
343 kvm_for_each_memslot(memslot
, slots
)
344 stage2_flush_memslot(kvm
, memslot
);
346 spin_unlock(&kvm
->mmu_lock
);
347 srcu_read_unlock(&kvm
->srcu
, idx
);
351 * free_boot_hyp_pgd - free HYP boot page tables
353 * Free the HYP boot page tables. The bounce page is also freed.
355 void free_boot_hyp_pgd(void)
357 mutex_lock(&kvm_hyp_pgd_mutex
);
360 unmap_range(NULL
, boot_hyp_pgd
, hyp_idmap_start
, PAGE_SIZE
);
361 unmap_range(NULL
, boot_hyp_pgd
, TRAMPOLINE_VA
, PAGE_SIZE
);
362 free_pages((unsigned long)boot_hyp_pgd
, hyp_pgd_order
);
367 unmap_range(NULL
, hyp_pgd
, TRAMPOLINE_VA
, PAGE_SIZE
);
369 free_page((unsigned long)init_bounce_page
);
370 init_bounce_page
= NULL
;
372 mutex_unlock(&kvm_hyp_pgd_mutex
);
376 * free_hyp_pgds - free Hyp-mode page tables
378 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
379 * therefore contains either mappings in the kernel memory area (above
380 * PAGE_OFFSET), or device mappings in the vmalloc range (from
381 * VMALLOC_START to VMALLOC_END).
383 * boot_hyp_pgd should only map two pages for the init code.
385 void free_hyp_pgds(void)
391 mutex_lock(&kvm_hyp_pgd_mutex
);
394 for (addr
= PAGE_OFFSET
; virt_addr_valid(addr
); addr
+= PGDIR_SIZE
)
395 unmap_range(NULL
, hyp_pgd
, KERN_TO_HYP(addr
), PGDIR_SIZE
);
396 for (addr
= VMALLOC_START
; is_vmalloc_addr((void*)addr
); addr
+= PGDIR_SIZE
)
397 unmap_range(NULL
, hyp_pgd
, KERN_TO_HYP(addr
), PGDIR_SIZE
);
399 free_pages((unsigned long)hyp_pgd
, hyp_pgd_order
);
403 mutex_unlock(&kvm_hyp_pgd_mutex
);
406 static void create_hyp_pte_mappings(pmd_t
*pmd
, unsigned long start
,
407 unsigned long end
, unsigned long pfn
,
415 pte
= pte_offset_kernel(pmd
, addr
);
416 kvm_set_pte(pte
, pfn_pte(pfn
, prot
));
417 get_page(virt_to_page(pte
));
418 kvm_flush_dcache_to_poc(pte
, sizeof(*pte
));
420 } while (addr
+= PAGE_SIZE
, addr
!= end
);
423 static int create_hyp_pmd_mappings(pud_t
*pud
, unsigned long start
,
424 unsigned long end
, unsigned long pfn
,
429 unsigned long addr
, next
;
433 pmd
= pmd_offset(pud
, addr
);
435 BUG_ON(pmd_sect(*pmd
));
437 if (pmd_none(*pmd
)) {
438 pte
= pte_alloc_one_kernel(NULL
, addr
);
440 kvm_err("Cannot allocate Hyp pte\n");
443 pmd_populate_kernel(NULL
, pmd
, pte
);
444 get_page(virt_to_page(pmd
));
445 kvm_flush_dcache_to_poc(pmd
, sizeof(*pmd
));
448 next
= pmd_addr_end(addr
, end
);
450 create_hyp_pte_mappings(pmd
, addr
, next
, pfn
, prot
);
451 pfn
+= (next
- addr
) >> PAGE_SHIFT
;
452 } while (addr
= next
, addr
!= end
);
457 static int create_hyp_pud_mappings(pgd_t
*pgd
, unsigned long start
,
458 unsigned long end
, unsigned long pfn
,
463 unsigned long addr
, next
;
468 pud
= pud_offset(pgd
, addr
);
470 if (pud_none_or_clear_bad(pud
)) {
471 pmd
= pmd_alloc_one(NULL
, addr
);
473 kvm_err("Cannot allocate Hyp pmd\n");
476 pud_populate(NULL
, pud
, pmd
);
477 get_page(virt_to_page(pud
));
478 kvm_flush_dcache_to_poc(pud
, sizeof(*pud
));
481 next
= pud_addr_end(addr
, end
);
482 ret
= create_hyp_pmd_mappings(pud
, addr
, next
, pfn
, prot
);
485 pfn
+= (next
- addr
) >> PAGE_SHIFT
;
486 } while (addr
= next
, addr
!= end
);
491 static int __create_hyp_mappings(pgd_t
*pgdp
,
492 unsigned long start
, unsigned long end
,
493 unsigned long pfn
, pgprot_t prot
)
497 unsigned long addr
, next
;
500 mutex_lock(&kvm_hyp_pgd_mutex
);
501 addr
= start
& PAGE_MASK
;
502 end
= PAGE_ALIGN(end
);
504 pgd
= pgdp
+ pgd_index(addr
);
506 if (pgd_none(*pgd
)) {
507 pud
= pud_alloc_one(NULL
, addr
);
509 kvm_err("Cannot allocate Hyp pud\n");
513 pgd_populate(NULL
, pgd
, pud
);
514 get_page(virt_to_page(pgd
));
515 kvm_flush_dcache_to_poc(pgd
, sizeof(*pgd
));
518 next
= pgd_addr_end(addr
, end
);
519 err
= create_hyp_pud_mappings(pgd
, addr
, next
, pfn
, prot
);
522 pfn
+= (next
- addr
) >> PAGE_SHIFT
;
523 } while (addr
= next
, addr
!= end
);
525 mutex_unlock(&kvm_hyp_pgd_mutex
);
529 static phys_addr_t
kvm_kaddr_to_phys(void *kaddr
)
531 if (!is_vmalloc_addr(kaddr
)) {
532 BUG_ON(!virt_addr_valid(kaddr
));
535 return page_to_phys(vmalloc_to_page(kaddr
)) +
536 offset_in_page(kaddr
);
541 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
542 * @from: The virtual kernel start address of the range
543 * @to: The virtual kernel end address of the range (exclusive)
545 * The same virtual address as the kernel virtual address is also used
546 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
549 int create_hyp_mappings(void *from
, void *to
)
551 phys_addr_t phys_addr
;
552 unsigned long virt_addr
;
553 unsigned long start
= KERN_TO_HYP((unsigned long)from
);
554 unsigned long end
= KERN_TO_HYP((unsigned long)to
);
556 start
= start
& PAGE_MASK
;
557 end
= PAGE_ALIGN(end
);
559 for (virt_addr
= start
; virt_addr
< end
; virt_addr
+= PAGE_SIZE
) {
562 phys_addr
= kvm_kaddr_to_phys(from
+ virt_addr
- start
);
563 err
= __create_hyp_mappings(hyp_pgd
, virt_addr
,
564 virt_addr
+ PAGE_SIZE
,
565 __phys_to_pfn(phys_addr
),
575 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
576 * @from: The kernel start VA of the range
577 * @to: The kernel end VA of the range (exclusive)
578 * @phys_addr: The physical start address which gets mapped
580 * The resulting HYP VA is the same as the kernel VA, modulo
583 int create_hyp_io_mappings(void *from
, void *to
, phys_addr_t phys_addr
)
585 unsigned long start
= KERN_TO_HYP((unsigned long)from
);
586 unsigned long end
= KERN_TO_HYP((unsigned long)to
);
588 /* Check for a valid kernel IO mapping */
589 if (!is_vmalloc_addr(from
) || !is_vmalloc_addr(to
- 1))
592 return __create_hyp_mappings(hyp_pgd
, start
, end
,
593 __phys_to_pfn(phys_addr
), PAGE_HYP_DEVICE
);
597 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
598 * @kvm: The KVM struct pointer for the VM.
600 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
601 * support either full 40-bit input addresses or limited to 32-bit input
602 * addresses). Clears the allocated pages.
604 * Note we don't need locking here as this is only called when the VM is
605 * created, which can only be done once.
607 int kvm_alloc_stage2_pgd(struct kvm
*kvm
)
612 if (kvm
->arch
.pgd
!= NULL
) {
613 kvm_err("kvm_arch already initialized?\n");
617 if (KVM_PREALLOC_LEVEL
> 0) {
619 * Allocate fake pgd for the page table manipulation macros to
620 * work. This is not used by the hardware and we have no
621 * alignment requirement for this allocation.
623 pgd
= (pgd_t
*)kmalloc(PTRS_PER_S2_PGD
* sizeof(pgd_t
),
624 GFP_KERNEL
| __GFP_ZERO
);
627 * Allocate actual first-level Stage-2 page table used by the
628 * hardware for Stage-2 page table walks.
630 pgd
= (pgd_t
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, S2_PGD_ORDER
);
636 ret
= kvm_prealloc_hwpgd(kvm
, pgd
);
644 if (KVM_PREALLOC_LEVEL
> 0)
647 free_pages((unsigned long)pgd
, S2_PGD_ORDER
);
652 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
653 * @kvm: The VM pointer
654 * @start: The intermediate physical base address of the range to unmap
655 * @size: The size of the area to unmap
657 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
658 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
659 * destroying the VM), otherwise another faulting VCPU may come in and mess
660 * with things behind our backs.
662 static void unmap_stage2_range(struct kvm
*kvm
, phys_addr_t start
, u64 size
)
664 unmap_range(kvm
, kvm
->arch
.pgd
, start
, size
);
667 static void stage2_unmap_memslot(struct kvm
*kvm
,
668 struct kvm_memory_slot
*memslot
)
670 hva_t hva
= memslot
->userspace_addr
;
671 phys_addr_t addr
= memslot
->base_gfn
<< PAGE_SHIFT
;
672 phys_addr_t size
= PAGE_SIZE
* memslot
->npages
;
673 hva_t reg_end
= hva
+ size
;
676 * A memory region could potentially cover multiple VMAs, and any holes
677 * between them, so iterate over all of them to find out if we should
680 * +--------------------------------------------+
681 * +---------------+----------------+ +----------------+
682 * | : VMA 1 | VMA 2 | | VMA 3 : |
683 * +---------------+----------------+ +----------------+
685 * +--------------------------------------------+
688 struct vm_area_struct
*vma
= find_vma(current
->mm
, hva
);
689 hva_t vm_start
, vm_end
;
691 if (!vma
|| vma
->vm_start
>= reg_end
)
695 * Take the intersection of this VMA with the memory region
697 vm_start
= max(hva
, vma
->vm_start
);
698 vm_end
= min(reg_end
, vma
->vm_end
);
700 if (!(vma
->vm_flags
& VM_PFNMAP
)) {
701 gpa_t gpa
= addr
+ (vm_start
- memslot
->userspace_addr
);
702 unmap_stage2_range(kvm
, gpa
, vm_end
- vm_start
);
705 } while (hva
< reg_end
);
709 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
710 * @kvm: The struct kvm pointer
712 * Go through the memregions and unmap any reguler RAM
713 * backing memory already mapped to the VM.
715 void stage2_unmap_vm(struct kvm
*kvm
)
717 struct kvm_memslots
*slots
;
718 struct kvm_memory_slot
*memslot
;
721 idx
= srcu_read_lock(&kvm
->srcu
);
722 spin_lock(&kvm
->mmu_lock
);
724 slots
= kvm_memslots(kvm
);
725 kvm_for_each_memslot(memslot
, slots
)
726 stage2_unmap_memslot(kvm
, memslot
);
728 spin_unlock(&kvm
->mmu_lock
);
729 srcu_read_unlock(&kvm
->srcu
, idx
);
733 * kvm_free_stage2_pgd - free all stage-2 tables
734 * @kvm: The KVM struct pointer for the VM.
736 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
737 * underlying level-2 and level-3 tables before freeing the actual level-1 table
738 * and setting the struct pointer to NULL.
740 * Note we don't need locking here as this is only called when the VM is
741 * destroyed, which can only be done once.
743 void kvm_free_stage2_pgd(struct kvm
*kvm
)
745 if (kvm
->arch
.pgd
== NULL
)
748 unmap_stage2_range(kvm
, 0, KVM_PHYS_SIZE
);
750 if (KVM_PREALLOC_LEVEL
> 0)
751 kfree(kvm
->arch
.pgd
);
753 free_pages((unsigned long)kvm
->arch
.pgd
, S2_PGD_ORDER
);
754 kvm
->arch
.pgd
= NULL
;
757 static pud_t
*stage2_get_pud(struct kvm
*kvm
, struct kvm_mmu_memory_cache
*cache
,
763 pgd
= kvm
->arch
.pgd
+ pgd_index(addr
);
764 if (WARN_ON(pgd_none(*pgd
))) {
767 pud
= mmu_memory_cache_alloc(cache
);
768 pgd_populate(NULL
, pgd
, pud
);
769 get_page(virt_to_page(pgd
));
772 return pud_offset(pgd
, addr
);
775 static pmd_t
*stage2_get_pmd(struct kvm
*kvm
, struct kvm_mmu_memory_cache
*cache
,
781 pud
= stage2_get_pud(kvm
, cache
, addr
);
782 if (pud_none(*pud
)) {
785 pmd
= mmu_memory_cache_alloc(cache
);
786 pud_populate(NULL
, pud
, pmd
);
787 get_page(virt_to_page(pud
));
790 return pmd_offset(pud
, addr
);
793 static int stage2_set_pmd_huge(struct kvm
*kvm
, struct kvm_mmu_memory_cache
794 *cache
, phys_addr_t addr
, const pmd_t
*new_pmd
)
798 pmd
= stage2_get_pmd(kvm
, cache
, addr
);
802 * Mapping in huge pages should only happen through a fault. If a
803 * page is merged into a transparent huge page, the individual
804 * subpages of that huge page should be unmapped through MMU
805 * notifiers before we get here.
807 * Merging of CompoundPages is not supported; they should become
808 * splitting first, unmapped, merged, and mapped back in on-demand.
810 VM_BUG_ON(pmd_present(*pmd
) && pmd_pfn(*pmd
) != pmd_pfn(*new_pmd
));
813 kvm_set_pmd(pmd
, *new_pmd
);
814 if (pmd_present(old_pmd
))
815 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
817 get_page(virt_to_page(pmd
));
821 static int stage2_set_pte(struct kvm
*kvm
, struct kvm_mmu_memory_cache
*cache
,
822 phys_addr_t addr
, const pte_t
*new_pte
, bool iomap
)
827 /* Create stage-2 page table mapping - Levels 0 and 1 */
828 pmd
= stage2_get_pmd(kvm
, cache
, addr
);
831 * Ignore calls from kvm_set_spte_hva for unallocated
837 /* Create stage-2 page mappings - Level 2 */
838 if (pmd_none(*pmd
)) {
840 return 0; /* ignore calls from kvm_set_spte_hva */
841 pte
= mmu_memory_cache_alloc(cache
);
843 pmd_populate_kernel(NULL
, pmd
, pte
);
844 get_page(virt_to_page(pmd
));
847 pte
= pte_offset_kernel(pmd
, addr
);
849 if (iomap
&& pte_present(*pte
))
852 /* Create 2nd stage page table mapping - Level 3 */
854 kvm_set_pte(pte
, *new_pte
);
855 if (pte_present(old_pte
))
856 kvm_tlb_flush_vmid_ipa(kvm
, addr
);
858 get_page(virt_to_page(pte
));
864 * kvm_phys_addr_ioremap - map a device range to guest IPA
866 * @kvm: The KVM pointer
867 * @guest_ipa: The IPA at which to insert the mapping
868 * @pa: The physical address of the device
869 * @size: The size of the mapping
871 int kvm_phys_addr_ioremap(struct kvm
*kvm
, phys_addr_t guest_ipa
,
872 phys_addr_t pa
, unsigned long size
, bool writable
)
874 phys_addr_t addr
, end
;
877 struct kvm_mmu_memory_cache cache
= { 0, };
879 end
= (guest_ipa
+ size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
880 pfn
= __phys_to_pfn(pa
);
882 for (addr
= guest_ipa
; addr
< end
; addr
+= PAGE_SIZE
) {
883 pte_t pte
= pfn_pte(pfn
, PAGE_S2_DEVICE
);
886 kvm_set_s2pte_writable(&pte
);
888 ret
= mmu_topup_memory_cache(&cache
, KVM_MMU_CACHE_MIN_PAGES
,
892 spin_lock(&kvm
->mmu_lock
);
893 ret
= stage2_set_pte(kvm
, &cache
, addr
, &pte
, true);
894 spin_unlock(&kvm
->mmu_lock
);
902 mmu_free_memory_cache(&cache
);
906 static bool transparent_hugepage_adjust(pfn_t
*pfnp
, phys_addr_t
*ipap
)
909 gfn_t gfn
= *ipap
>> PAGE_SHIFT
;
911 if (PageTransCompound(pfn_to_page(pfn
))) {
914 * The address we faulted on is backed by a transparent huge
915 * page. However, because we map the compound huge page and
916 * not the individual tail page, we need to transfer the
917 * refcount to the head page. We have to be careful that the
918 * THP doesn't start to split while we are adjusting the
921 * We are sure this doesn't happen, because mmu_notifier_retry
922 * was successful and we are holding the mmu_lock, so if this
923 * THP is trying to split, it will be blocked in the mmu
924 * notifier before touching any of the pages, specifically
925 * before being able to call __split_huge_page_refcount().
927 * We can therefore safely transfer the refcount from PG_tail
928 * to PG_head and switch the pfn from a tail page to the head
931 mask
= PTRS_PER_PMD
- 1;
932 VM_BUG_ON((gfn
& mask
) != (pfn
& mask
));
935 kvm_release_pfn_clean(pfn
);
947 static bool kvm_is_write_fault(struct kvm_vcpu
*vcpu
)
949 if (kvm_vcpu_trap_is_iabt(vcpu
))
952 return kvm_vcpu_dabt_iswrite(vcpu
);
955 static bool kvm_is_device_pfn(unsigned long pfn
)
957 return !pfn_valid(pfn
);
960 static void coherent_cache_guest_page(struct kvm_vcpu
*vcpu
, pfn_t pfn
,
961 unsigned long size
, bool uncached
)
963 __coherent_cache_guest_page(vcpu
, pfn
, size
, uncached
);
966 static int user_mem_abort(struct kvm_vcpu
*vcpu
, phys_addr_t fault_ipa
,
967 struct kvm_memory_slot
*memslot
, unsigned long hva
,
968 unsigned long fault_status
)
971 bool write_fault
, writable
, hugetlb
= false, force_pte
= false;
972 unsigned long mmu_seq
;
973 gfn_t gfn
= fault_ipa
>> PAGE_SHIFT
;
974 struct kvm
*kvm
= vcpu
->kvm
;
975 struct kvm_mmu_memory_cache
*memcache
= &vcpu
->arch
.mmu_page_cache
;
976 struct vm_area_struct
*vma
;
978 pgprot_t mem_type
= PAGE_S2
;
979 bool fault_ipa_uncached
;
981 write_fault
= kvm_is_write_fault(vcpu
);
982 if (fault_status
== FSC_PERM
&& !write_fault
) {
983 kvm_err("Unexpected L2 read permission error\n");
987 /* Let's check if we will get back a huge page backed by hugetlbfs */
988 down_read(¤t
->mm
->mmap_sem
);
989 vma
= find_vma_intersection(current
->mm
, hva
, hva
+ 1);
990 if (unlikely(!vma
)) {
991 kvm_err("Failed to find VMA for hva 0x%lx\n", hva
);
992 up_read(¤t
->mm
->mmap_sem
);
996 if (is_vm_hugetlb_page(vma
)) {
998 gfn
= (fault_ipa
& PMD_MASK
) >> PAGE_SHIFT
;
1001 * Pages belonging to memslots that don't have the same
1002 * alignment for userspace and IPA cannot be mapped using
1003 * block descriptors even if the pages belong to a THP for
1004 * the process, because the stage-2 block descriptor will
1005 * cover more than a single THP and we loose atomicity for
1006 * unmapping, updates, and splits of the THP or other pages
1007 * in the stage-2 block range.
1009 if ((memslot
->userspace_addr
& ~PMD_MASK
) !=
1010 ((memslot
->base_gfn
<< PAGE_SHIFT
) & ~PMD_MASK
))
1013 up_read(¤t
->mm
->mmap_sem
);
1015 /* We need minimum second+third level pages */
1016 ret
= mmu_topup_memory_cache(memcache
, KVM_MMU_CACHE_MIN_PAGES
,
1021 mmu_seq
= vcpu
->kvm
->mmu_notifier_seq
;
1023 * Ensure the read of mmu_notifier_seq happens before we call
1024 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1025 * the page we just got a reference to gets unmapped before we have a
1026 * chance to grab the mmu_lock, which ensure that if the page gets
1027 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1028 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1029 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1033 pfn
= gfn_to_pfn_prot(kvm
, gfn
, write_fault
, &writable
);
1034 if (is_error_pfn(pfn
))
1037 if (kvm_is_device_pfn(pfn
))
1038 mem_type
= PAGE_S2_DEVICE
;
1040 spin_lock(&kvm
->mmu_lock
);
1041 if (mmu_notifier_retry(kvm
, mmu_seq
))
1043 if (!hugetlb
&& !force_pte
)
1044 hugetlb
= transparent_hugepage_adjust(&pfn
, &fault_ipa
);
1046 fault_ipa_uncached
= memslot
->flags
& KVM_MEMSLOT_INCOHERENT
;
1049 pmd_t new_pmd
= pfn_pmd(pfn
, mem_type
);
1050 new_pmd
= pmd_mkhuge(new_pmd
);
1052 kvm_set_s2pmd_writable(&new_pmd
);
1053 kvm_set_pfn_dirty(pfn
);
1055 coherent_cache_guest_page(vcpu
, pfn
, PMD_SIZE
, fault_ipa_uncached
);
1056 ret
= stage2_set_pmd_huge(kvm
, memcache
, fault_ipa
, &new_pmd
);
1058 pte_t new_pte
= pfn_pte(pfn
, mem_type
);
1060 kvm_set_s2pte_writable(&new_pte
);
1061 kvm_set_pfn_dirty(pfn
);
1063 coherent_cache_guest_page(vcpu
, pfn
, PAGE_SIZE
, fault_ipa_uncached
);
1064 ret
= stage2_set_pte(kvm
, memcache
, fault_ipa
, &new_pte
,
1065 pgprot_val(mem_type
) == pgprot_val(PAGE_S2_DEVICE
));
1070 spin_unlock(&kvm
->mmu_lock
);
1071 kvm_release_pfn_clean(pfn
);
1076 * kvm_handle_guest_abort - handles all 2nd stage aborts
1077 * @vcpu: the VCPU pointer
1078 * @run: the kvm_run structure
1080 * Any abort that gets to the host is almost guaranteed to be caused by a
1081 * missing second stage translation table entry, which can mean that either the
1082 * guest simply needs more memory and we must allocate an appropriate page or it
1083 * can mean that the guest tried to access I/O memory, which is emulated by user
1084 * space. The distinction is based on the IPA causing the fault and whether this
1085 * memory region has been registered as standard RAM by user space.
1087 int kvm_handle_guest_abort(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1089 unsigned long fault_status
;
1090 phys_addr_t fault_ipa
;
1091 struct kvm_memory_slot
*memslot
;
1093 bool is_iabt
, write_fault
, writable
;
1097 is_iabt
= kvm_vcpu_trap_is_iabt(vcpu
);
1098 fault_ipa
= kvm_vcpu_get_fault_ipa(vcpu
);
1100 trace_kvm_guest_fault(*vcpu_pc(vcpu
), kvm_vcpu_get_hsr(vcpu
),
1101 kvm_vcpu_get_hfar(vcpu
), fault_ipa
);
1103 /* Check the stage-2 fault is trans. fault or write fault */
1104 fault_status
= kvm_vcpu_trap_get_fault_type(vcpu
);
1105 if (fault_status
!= FSC_FAULT
&& fault_status
!= FSC_PERM
) {
1106 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1107 kvm_vcpu_trap_get_class(vcpu
),
1108 (unsigned long)kvm_vcpu_trap_get_fault(vcpu
),
1109 (unsigned long)kvm_vcpu_get_hsr(vcpu
));
1113 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1115 gfn
= fault_ipa
>> PAGE_SHIFT
;
1116 memslot
= gfn_to_memslot(vcpu
->kvm
, gfn
);
1117 hva
= gfn_to_hva_memslot_prot(memslot
, gfn
, &writable
);
1118 write_fault
= kvm_is_write_fault(vcpu
);
1119 if (kvm_is_error_hva(hva
) || (write_fault
&& !writable
)) {
1121 /* Prefetch Abort on I/O address */
1122 kvm_inject_pabt(vcpu
, kvm_vcpu_get_hfar(vcpu
));
1128 * The IPA is reported as [MAX:12], so we need to
1129 * complement it with the bottom 12 bits from the
1130 * faulting VA. This is always 12 bits, irrespective
1133 fault_ipa
|= kvm_vcpu_get_hfar(vcpu
) & ((1 << 12) - 1);
1134 ret
= io_mem_abort(vcpu
, run
, fault_ipa
);
1138 /* Userspace should not be able to register out-of-bounds IPAs */
1139 VM_BUG_ON(fault_ipa
>= KVM_PHYS_SIZE
);
1141 ret
= user_mem_abort(vcpu
, fault_ipa
, memslot
, hva
, fault_status
);
1145 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1149 static void handle_hva_to_gpa(struct kvm
*kvm
,
1150 unsigned long start
,
1152 void (*handler
)(struct kvm
*kvm
,
1153 gpa_t gpa
, void *data
),
1156 struct kvm_memslots
*slots
;
1157 struct kvm_memory_slot
*memslot
;
1159 slots
= kvm_memslots(kvm
);
1161 /* we only care about the pages that the guest sees */
1162 kvm_for_each_memslot(memslot
, slots
) {
1163 unsigned long hva_start
, hva_end
;
1166 hva_start
= max(start
, memslot
->userspace_addr
);
1167 hva_end
= min(end
, memslot
->userspace_addr
+
1168 (memslot
->npages
<< PAGE_SHIFT
));
1169 if (hva_start
>= hva_end
)
1173 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1174 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1176 gfn
= hva_to_gfn_memslot(hva_start
, memslot
);
1177 gfn_end
= hva_to_gfn_memslot(hva_end
+ PAGE_SIZE
- 1, memslot
);
1179 for (; gfn
< gfn_end
; ++gfn
) {
1180 gpa_t gpa
= gfn
<< PAGE_SHIFT
;
1181 handler(kvm
, gpa
, data
);
1186 static void kvm_unmap_hva_handler(struct kvm
*kvm
, gpa_t gpa
, void *data
)
1188 unmap_stage2_range(kvm
, gpa
, PAGE_SIZE
);
1191 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
1193 unsigned long end
= hva
+ PAGE_SIZE
;
1198 trace_kvm_unmap_hva(hva
);
1199 handle_hva_to_gpa(kvm
, hva
, end
, &kvm_unmap_hva_handler
, NULL
);
1203 int kvm_unmap_hva_range(struct kvm
*kvm
,
1204 unsigned long start
, unsigned long end
)
1209 trace_kvm_unmap_hva_range(start
, end
);
1210 handle_hva_to_gpa(kvm
, start
, end
, &kvm_unmap_hva_handler
, NULL
);
1214 static void kvm_set_spte_handler(struct kvm
*kvm
, gpa_t gpa
, void *data
)
1216 pte_t
*pte
= (pte_t
*)data
;
1218 stage2_set_pte(kvm
, NULL
, gpa
, pte
, false);
1222 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
1224 unsigned long end
= hva
+ PAGE_SIZE
;
1230 trace_kvm_set_spte_hva(hva
);
1231 stage2_pte
= pfn_pte(pte_pfn(pte
), PAGE_S2
);
1232 handle_hva_to_gpa(kvm
, hva
, end
, &kvm_set_spte_handler
, &stage2_pte
);
1235 void kvm_mmu_free_memory_caches(struct kvm_vcpu
*vcpu
)
1237 mmu_free_memory_cache(&vcpu
->arch
.mmu_page_cache
);
1240 phys_addr_t
kvm_mmu_get_httbr(void)
1242 return virt_to_phys(hyp_pgd
);
1245 phys_addr_t
kvm_mmu_get_boot_httbr(void)
1247 return virt_to_phys(boot_hyp_pgd
);
1250 phys_addr_t
kvm_get_idmap_vector(void)
1252 return hyp_idmap_vector
;
1255 int kvm_mmu_init(void)
1259 hyp_idmap_start
= kvm_virt_to_phys(__hyp_idmap_text_start
);
1260 hyp_idmap_end
= kvm_virt_to_phys(__hyp_idmap_text_end
);
1261 hyp_idmap_vector
= kvm_virt_to_phys(__kvm_hyp_init
);
1263 if ((hyp_idmap_start
^ hyp_idmap_end
) & PAGE_MASK
) {
1265 * Our init code is crossing a page boundary. Allocate
1266 * a bounce page, copy the code over and use that.
1268 size_t len
= __hyp_idmap_text_end
- __hyp_idmap_text_start
;
1269 phys_addr_t phys_base
;
1271 init_bounce_page
= (void *)__get_free_page(GFP_KERNEL
);
1272 if (!init_bounce_page
) {
1273 kvm_err("Couldn't allocate HYP init bounce page\n");
1278 memcpy(init_bounce_page
, __hyp_idmap_text_start
, len
);
1280 * Warning: the code we just copied to the bounce page
1281 * must be flushed to the point of coherency.
1282 * Otherwise, the data may be sitting in L2, and HYP
1283 * mode won't be able to observe it as it runs with
1284 * caches off at that point.
1286 kvm_flush_dcache_to_poc(init_bounce_page
, len
);
1288 phys_base
= kvm_virt_to_phys(init_bounce_page
);
1289 hyp_idmap_vector
+= phys_base
- hyp_idmap_start
;
1290 hyp_idmap_start
= phys_base
;
1291 hyp_idmap_end
= phys_base
+ len
;
1293 kvm_info("Using HYP init bounce page @%lx\n",
1294 (unsigned long)phys_base
);
1297 hyp_pgd
= (pgd_t
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, hyp_pgd_order
);
1298 boot_hyp_pgd
= (pgd_t
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, hyp_pgd_order
);
1300 if (!hyp_pgd
|| !boot_hyp_pgd
) {
1301 kvm_err("Hyp mode PGD not allocated\n");
1306 /* Create the idmap in the boot page tables */
1307 err
= __create_hyp_mappings(boot_hyp_pgd
,
1308 hyp_idmap_start
, hyp_idmap_end
,
1309 __phys_to_pfn(hyp_idmap_start
),
1313 kvm_err("Failed to idmap %lx-%lx\n",
1314 hyp_idmap_start
, hyp_idmap_end
);
1318 /* Map the very same page at the trampoline VA */
1319 err
= __create_hyp_mappings(boot_hyp_pgd
,
1320 TRAMPOLINE_VA
, TRAMPOLINE_VA
+ PAGE_SIZE
,
1321 __phys_to_pfn(hyp_idmap_start
),
1324 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
1329 /* Map the same page again into the runtime page tables */
1330 err
= __create_hyp_mappings(hyp_pgd
,
1331 TRAMPOLINE_VA
, TRAMPOLINE_VA
+ PAGE_SIZE
,
1332 __phys_to_pfn(hyp_idmap_start
),
1335 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
1346 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
1347 struct kvm_userspace_memory_region
*mem
,
1348 const struct kvm_memory_slot
*old
,
1349 enum kvm_mr_change change
)
1353 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
1354 struct kvm_memory_slot
*memslot
,
1355 struct kvm_userspace_memory_region
*mem
,
1356 enum kvm_mr_change change
)
1358 hva_t hva
= mem
->userspace_addr
;
1359 hva_t reg_end
= hva
+ mem
->memory_size
;
1360 bool writable
= !(mem
->flags
& KVM_MEM_READONLY
);
1363 if (change
!= KVM_MR_CREATE
&& change
!= KVM_MR_MOVE
)
1367 * Prevent userspace from creating a memory region outside of the IPA
1368 * space addressable by the KVM guest IPA space.
1370 if (memslot
->base_gfn
+ memslot
->npages
>=
1371 (KVM_PHYS_SIZE
>> PAGE_SHIFT
))
1375 * A memory region could potentially cover multiple VMAs, and any holes
1376 * between them, so iterate over all of them to find out if we can map
1377 * any of them right now.
1379 * +--------------------------------------------+
1380 * +---------------+----------------+ +----------------+
1381 * | : VMA 1 | VMA 2 | | VMA 3 : |
1382 * +---------------+----------------+ +----------------+
1384 * +--------------------------------------------+
1387 struct vm_area_struct
*vma
= find_vma(current
->mm
, hva
);
1388 hva_t vm_start
, vm_end
;
1390 if (!vma
|| vma
->vm_start
>= reg_end
)
1394 * Mapping a read-only VMA is only allowed if the
1395 * memory region is configured as read-only.
1397 if (writable
&& !(vma
->vm_flags
& VM_WRITE
)) {
1403 * Take the intersection of this VMA with the memory region
1405 vm_start
= max(hva
, vma
->vm_start
);
1406 vm_end
= min(reg_end
, vma
->vm_end
);
1408 if (vma
->vm_flags
& VM_PFNMAP
) {
1409 gpa_t gpa
= mem
->guest_phys_addr
+
1410 (vm_start
- mem
->userspace_addr
);
1411 phys_addr_t pa
= (vma
->vm_pgoff
<< PAGE_SHIFT
) +
1412 vm_start
- vma
->vm_start
;
1414 ret
= kvm_phys_addr_ioremap(kvm
, gpa
, pa
,
1421 } while (hva
< reg_end
);
1423 spin_lock(&kvm
->mmu_lock
);
1425 unmap_stage2_range(kvm
, mem
->guest_phys_addr
, mem
->memory_size
);
1427 stage2_flush_memslot(kvm
, memslot
);
1428 spin_unlock(&kvm
->mmu_lock
);
1432 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
1433 struct kvm_memory_slot
*dont
)
1437 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
1438 unsigned long npages
)
1441 * Readonly memslots are not incoherent with the caches by definition,
1442 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1443 * that the guest may consider devices and hence map as uncached.
1444 * To prevent incoherency issues in these cases, tag all readonly
1445 * regions as incoherent.
1447 if (slot
->flags
& KVM_MEM_READONLY
)
1448 slot
->flags
|= KVM_MEMSLOT_INCOHERENT
;
1452 void kvm_arch_memslots_updated(struct kvm
*kvm
)
1456 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
1460 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
1461 struct kvm_memory_slot
*slot
)
1463 gpa_t gpa
= slot
->base_gfn
<< PAGE_SHIFT
;
1464 phys_addr_t size
= slot
->npages
<< PAGE_SHIFT
;
1466 spin_lock(&kvm
->mmu_lock
);
1467 unmap_stage2_range(kvm
, gpa
, size
);
1468 spin_unlock(&kvm
->mmu_lock
);
1472 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
1475 * - S/W ops are local to a CPU (not broadcast)
1476 * - We have line migration behind our back (speculation)
1477 * - System caches don't support S/W at all (damn!)
1479 * In the face of the above, the best we can do is to try and convert
1480 * S/W ops to VA ops. Because the guest is not allowed to infer the
1481 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
1482 * which is a rather good thing for us.
1484 * Also, it is only used when turning caches on/off ("The expected
1485 * usage of the cache maintenance instructions that operate by set/way
1486 * is associated with the cache maintenance instructions associated
1487 * with the powerdown and powerup of caches, if this is required by
1488 * the implementation.").
1490 * We use the following policy:
1492 * - If we trap a S/W operation, we enable VM trapping to detect
1493 * caches being turned on/off, and do a full clean.
1495 * - We flush the caches on both caches being turned on and off.
1497 * - Once the caches are enabled, we stop trapping VM ops.
1499 void kvm_set_way_flush(struct kvm_vcpu
*vcpu
)
1501 unsigned long hcr
= vcpu_get_hcr(vcpu
);
1504 * If this is the first time we do a S/W operation
1505 * (i.e. HCR_TVM not set) flush the whole memory, and set the
1508 * Otherwise, rely on the VM trapping to wait for the MMU +
1509 * Caches to be turned off. At that point, we'll be able to
1510 * clean the caches again.
1512 if (!(hcr
& HCR_TVM
)) {
1513 trace_kvm_set_way_flush(*vcpu_pc(vcpu
),
1514 vcpu_has_cache_enabled(vcpu
));
1515 stage2_flush_vm(vcpu
->kvm
);
1516 vcpu_set_hcr(vcpu
, hcr
| HCR_TVM
);
1520 void kvm_toggle_cache(struct kvm_vcpu
*vcpu
, bool was_enabled
)
1522 bool now_enabled
= vcpu_has_cache_enabled(vcpu
);
1525 * If switching the MMU+caches on, need to invalidate the caches.
1526 * If switching it off, need to clean the caches.
1527 * Clean + invalidate does the trick always.
1529 if (now_enabled
!= was_enabled
)
1530 stage2_flush_vm(vcpu
->kvm
);
1532 /* Caches are now on, stop trapping VM ops (until a S/W op) */
1534 vcpu_set_hcr(vcpu
, vcpu_get_hcr(vcpu
) & ~HCR_TVM
);
1536 trace_kvm_toggle_cache(*vcpu_pc(vcpu
), was_enabled
, now_enabled
);