2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #ifndef __ARM_KVM_MMU_H__
20 #define __ARM_KVM_MMU_H__
22 #include <asm/memory.h>
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
29 #define HYP_PAGE_OFFSET_MASK UL(~0)
30 #define HYP_PAGE_OFFSET PAGE_OFFSET
31 #define KERN_TO_HYP(kva) (kva)
34 * Our virtual mapping for the boot-time MMU-enable code. Must be
35 * shared across all the page-tables. Conveniently, we use the vectors
36 * page, where no kernel data will ever be shared with HYP.
38 #define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
41 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
43 #define KVM_MMU_CACHE_MIN_PAGES 2
47 #include <asm/cacheflush.h>
48 #include <asm/pgalloc.h>
50 int create_hyp_mappings(void *from
, void *to
);
51 int create_hyp_io_mappings(void *from
, void *to
, phys_addr_t
);
52 void free_boot_hyp_pgd(void);
53 void free_hyp_pgds(void);
55 void stage2_unmap_vm(struct kvm
*kvm
);
56 int kvm_alloc_stage2_pgd(struct kvm
*kvm
);
57 void kvm_free_stage2_pgd(struct kvm
*kvm
);
58 int kvm_phys_addr_ioremap(struct kvm
*kvm
, phys_addr_t guest_ipa
,
59 phys_addr_t pa
, unsigned long size
, bool writable
);
61 int kvm_handle_guest_abort(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
);
63 void kvm_mmu_free_memory_caches(struct kvm_vcpu
*vcpu
);
65 phys_addr_t
kvm_mmu_get_httbr(void);
66 phys_addr_t
kvm_mmu_get_boot_httbr(void);
67 phys_addr_t
kvm_get_idmap_vector(void);
68 int kvm_mmu_init(void);
69 void kvm_clear_hyp_idmap(void);
71 static inline void kvm_set_pmd(pmd_t
*pmd
, pmd_t new_pmd
)
77 static inline void kvm_set_pte(pte_t
*pte
, pte_t new_pte
)
81 * flush_pmd_entry just takes a void pointer and cleans the necessary
82 * cache entries, so we can reuse the function for ptes.
87 static inline void kvm_clean_pgd(pgd_t
*pgd
)
89 clean_dcache_area(pgd
, PTRS_PER_S2_PGD
* sizeof(pgd_t
));
92 static inline void kvm_clean_pmd(pmd_t
*pmd
)
94 clean_dcache_area(pmd
, PTRS_PER_PMD
* sizeof(pmd_t
));
97 static inline void kvm_clean_pmd_entry(pmd_t
*pmd
)
102 static inline void kvm_clean_pte(pte_t
*pte
)
104 clean_pte_table(pte
);
107 static inline void kvm_set_s2pte_writable(pte_t
*pte
)
109 pte_val(*pte
) |= L_PTE_S2_RDWR
;
112 static inline void kvm_set_s2pmd_writable(pmd_t
*pmd
)
114 pmd_val(*pmd
) |= L_PMD_S2_RDWR
;
117 /* Open coded p*d_addr_end that can deal with 64bit addresses */
118 #define kvm_pgd_addr_end(addr, end) \
119 ({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
120 (__boundary - 1 < (end) - 1)? __boundary: (end); \
123 #define kvm_pud_addr_end(addr,end) (end)
125 #define kvm_pmd_addr_end(addr, end) \
126 ({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
127 (__boundary - 1 < (end) - 1)? __boundary: (end); \
130 static inline bool kvm_page_empty(void *ptr
)
132 struct page
*ptr_page
= virt_to_page(ptr
);
133 return page_count(ptr_page
) == 1;
137 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
138 #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
139 #define kvm_pud_table_empty(kvm, pudp) (0)
141 #define KVM_PREALLOC_LEVEL 0
143 static inline int kvm_prealloc_hwpgd(struct kvm
*kvm
, pgd_t
*pgd
)
148 static inline void kvm_free_hwpgd(struct kvm
*kvm
) { }
150 static inline void *kvm_get_hwpgd(struct kvm
*kvm
)
152 return kvm
->arch
.pgd
;
157 #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
159 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu
*vcpu
)
161 return (vcpu
->arch
.cp15
[c1_SCTLR
] & 0b101) == 0b101;
164 static inline void coherent_cache_guest_page(struct kvm_vcpu
*vcpu
, hva_t hva
,
168 if (!vcpu_has_cache_enabled(vcpu
) || ipa_uncached
)
169 kvm_flush_dcache_to_poc((void *)hva
, size
);
172 * If we are going to insert an instruction page and the icache is
173 * either VIPT or PIPT, there is a potential problem where the host
174 * (or another VM) may have used the same page as this guest, and we
175 * read incorrect data from the icache. If we're using a PIPT cache,
176 * we can invalidate just that page, but if we are using a VIPT cache
177 * we need to invalidate the entire icache - damn shame - as written
178 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
180 * VIVT caches are tagged using both the ASID and the VMID and doesn't
181 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
183 if (icache_is_pipt()) {
184 __cpuc_coherent_user_range(hva
, hva
+ size
);
185 } else if (!icache_is_vivt_asid_tagged()) {
186 /* any kind of VIPT cache */
187 __flush_icache_all();
191 #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
193 void stage2_flush_vm(struct kvm
*kvm
);
195 #endif /* !__ASSEMBLY__ */
197 #endif /* __ARM_KVM_MMU_H__ */
This page took 0.054404 seconds and 6 git commands to generate.