Commit | Line | Data |
---|---|---|
749cf76c CD |
1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License, version 2, as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | */ | |
342cd0ab CD |
18 | |
19 | #include <linux/mman.h> | |
20 | #include <linux/kvm_host.h> | |
21 | #include <linux/io.h> | |
ad361f09 | 22 | #include <linux/hugetlb.h> |
45e96ea6 | 23 | #include <trace/events/kvm.h> |
342cd0ab | 24 | #include <asm/pgalloc.h> |
94f8e641 | 25 | #include <asm/cacheflush.h> |
342cd0ab CD |
26 | #include <asm/kvm_arm.h> |
27 | #include <asm/kvm_mmu.h> | |
45e96ea6 | 28 | #include <asm/kvm_mmio.h> |
d5d8184d | 29 | #include <asm/kvm_asm.h> |
94f8e641 | 30 | #include <asm/kvm_emulate.h> |
d5d8184d CD |
31 | |
32 | #include "trace.h" | |
342cd0ab CD |
33 | |
34 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | |
35 | ||
5a677ce0 | 36 | static pgd_t *boot_hyp_pgd; |
2fb41059 | 37 | static pgd_t *hyp_pgd; |
342cd0ab CD |
38 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
39 | ||
5a677ce0 MZ |
40 | static void *init_bounce_page; |
41 | static unsigned long hyp_idmap_start; | |
42 | static unsigned long hyp_idmap_end; | |
43 | static phys_addr_t hyp_idmap_vector; | |
44 | ||
38f791a4 | 45 | #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) |
5d4e08c4 | 46 | |
9b5fdb97 | 47 | #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) |
c6473555 | 48 | #define kvm_pud_huge(_x) pud_huge(_x) |
ad361f09 | 49 | |
15a49a44 MS |
50 | #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) |
51 | #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1) | |
52 | ||
53 | static bool memslot_is_logging(struct kvm_memory_slot *memslot) | |
54 | { | |
15a49a44 | 55 | return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); |
7276030a MS |
56 | } |
57 | ||
58 | /** | |
59 | * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8 | |
60 | * @kvm: pointer to kvm structure. | |
61 | * | |
62 | * Interface to HYP function to flush all VM TLB entries | |
63 | */ | |
64 | void kvm_flush_remote_tlbs(struct kvm *kvm) | |
65 | { | |
66 | kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); | |
15a49a44 MS |
67 | } |
68 | ||
48762767 | 69 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
d5d8184d | 70 | { |
d4cb9df5 MZ |
71 | /* |
72 | * This function also gets called when dealing with HYP page | |
73 | * tables. As HYP doesn't have an associated struct kvm (and | |
74 | * the HYP page tables are fairly static), we don't do | |
75 | * anything there. | |
76 | */ | |
77 | if (kvm) | |
78 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); | |
d5d8184d CD |
79 | } |
80 | ||
15a49a44 MS |
81 | /** |
82 | * stage2_dissolve_pmd() - clear and flush huge PMD entry | |
83 | * @kvm: pointer to kvm structure. | |
84 | * @addr: IPA | |
85 | * @pmd: pmd pointer for IPA | |
86 | * | |
87 | * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all | |
88 | * pages in the range dirty. | |
89 | */ | |
90 | static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) | |
91 | { | |
92 | if (!kvm_pmd_huge(*pmd)) | |
93 | return; | |
94 | ||
95 | pmd_clear(pmd); | |
96 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
97 | put_page(virt_to_page(pmd)); | |
98 | } | |
99 | ||
d5d8184d CD |
100 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
101 | int min, int max) | |
102 | { | |
103 | void *page; | |
104 | ||
105 | BUG_ON(max > KVM_NR_MEM_OBJS); | |
106 | if (cache->nobjs >= min) | |
107 | return 0; | |
108 | while (cache->nobjs < max) { | |
109 | page = (void *)__get_free_page(PGALLOC_GFP); | |
110 | if (!page) | |
111 | return -ENOMEM; | |
112 | cache->objects[cache->nobjs++] = page; | |
113 | } | |
114 | return 0; | |
115 | } | |
116 | ||
117 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) | |
118 | { | |
119 | while (mc->nobjs) | |
120 | free_page((unsigned long)mc->objects[--mc->nobjs]); | |
121 | } | |
122 | ||
123 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | |
124 | { | |
125 | void *p; | |
126 | ||
127 | BUG_ON(!mc || !mc->nobjs); | |
128 | p = mc->objects[--mc->nobjs]; | |
129 | return p; | |
130 | } | |
131 | ||
4f853a71 | 132 | static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) |
979acd5e | 133 | { |
4f853a71 CD |
134 | pud_t *pud_table __maybe_unused = pud_offset(pgd, 0); |
135 | pgd_clear(pgd); | |
136 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
137 | pud_free(NULL, pud_table); | |
138 | put_page(virt_to_page(pgd)); | |
979acd5e MZ |
139 | } |
140 | ||
d4cb9df5 | 141 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) |
342cd0ab | 142 | { |
4f853a71 CD |
143 | pmd_t *pmd_table = pmd_offset(pud, 0); |
144 | VM_BUG_ON(pud_huge(*pud)); | |
145 | pud_clear(pud); | |
146 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
147 | pmd_free(NULL, pmd_table); | |
4f728276 MZ |
148 | put_page(virt_to_page(pud)); |
149 | } | |
342cd0ab | 150 | |
d4cb9df5 | 151 | static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) |
4f728276 | 152 | { |
4f853a71 CD |
153 | pte_t *pte_table = pte_offset_kernel(pmd, 0); |
154 | VM_BUG_ON(kvm_pmd_huge(*pmd)); | |
155 | pmd_clear(pmd); | |
156 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
157 | pte_free_kernel(NULL, pte_table); | |
4f728276 MZ |
158 | put_page(virt_to_page(pmd)); |
159 | } | |
160 | ||
4f853a71 CD |
161 | static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, |
162 | phys_addr_t addr, phys_addr_t end) | |
4f728276 | 163 | { |
4f853a71 CD |
164 | phys_addr_t start_addr = addr; |
165 | pte_t *pte, *start_pte; | |
166 | ||
167 | start_pte = pte = pte_offset_kernel(pmd, addr); | |
168 | do { | |
169 | if (!pte_none(*pte)) { | |
170 | kvm_set_pte(pte, __pte(0)); | |
171 | put_page(virt_to_page(pte)); | |
172 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
173 | } | |
174 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
175 | ||
38f791a4 | 176 | if (kvm_pte_table_empty(kvm, start_pte)) |
4f853a71 | 177 | clear_pmd_entry(kvm, pmd, start_addr); |
342cd0ab CD |
178 | } |
179 | ||
4f853a71 CD |
180 | static void unmap_pmds(struct kvm *kvm, pud_t *pud, |
181 | phys_addr_t addr, phys_addr_t end) | |
000d3996 | 182 | { |
4f853a71 CD |
183 | phys_addr_t next, start_addr = addr; |
184 | pmd_t *pmd, *start_pmd; | |
000d3996 | 185 | |
4f853a71 CD |
186 | start_pmd = pmd = pmd_offset(pud, addr); |
187 | do { | |
188 | next = kvm_pmd_addr_end(addr, end); | |
189 | if (!pmd_none(*pmd)) { | |
190 | if (kvm_pmd_huge(*pmd)) { | |
191 | pmd_clear(pmd); | |
192 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
193 | put_page(virt_to_page(pmd)); | |
194 | } else { | |
195 | unmap_ptes(kvm, pmd, addr, next); | |
196 | } | |
ad361f09 | 197 | } |
4f853a71 | 198 | } while (pmd++, addr = next, addr != end); |
ad361f09 | 199 | |
38f791a4 | 200 | if (kvm_pmd_table_empty(kvm, start_pmd)) |
4f853a71 CD |
201 | clear_pud_entry(kvm, pud, start_addr); |
202 | } | |
000d3996 | 203 | |
4f853a71 CD |
204 | static void unmap_puds(struct kvm *kvm, pgd_t *pgd, |
205 | phys_addr_t addr, phys_addr_t end) | |
206 | { | |
207 | phys_addr_t next, start_addr = addr; | |
208 | pud_t *pud, *start_pud; | |
4f728276 | 209 | |
4f853a71 CD |
210 | start_pud = pud = pud_offset(pgd, addr); |
211 | do { | |
212 | next = kvm_pud_addr_end(addr, end); | |
213 | if (!pud_none(*pud)) { | |
214 | if (pud_huge(*pud)) { | |
215 | pud_clear(pud); | |
216 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
217 | put_page(virt_to_page(pud)); | |
218 | } else { | |
219 | unmap_pmds(kvm, pud, addr, next); | |
4f728276 MZ |
220 | } |
221 | } | |
4f853a71 | 222 | } while (pud++, addr = next, addr != end); |
4f728276 | 223 | |
38f791a4 | 224 | if (kvm_pud_table_empty(kvm, start_pud)) |
4f853a71 CD |
225 | clear_pgd_entry(kvm, pgd, start_addr); |
226 | } | |
227 | ||
228 | ||
229 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, | |
230 | phys_addr_t start, u64 size) | |
231 | { | |
232 | pgd_t *pgd; | |
233 | phys_addr_t addr = start, end = start + size; | |
234 | phys_addr_t next; | |
235 | ||
236 | pgd = pgdp + pgd_index(addr); | |
237 | do { | |
238 | next = kvm_pgd_addr_end(addr, end); | |
7cbb87d6 MR |
239 | if (!pgd_none(*pgd)) |
240 | unmap_puds(kvm, pgd, addr, next); | |
4f853a71 | 241 | } while (pgd++, addr = next, addr != end); |
000d3996 MZ |
242 | } |
243 | ||
9d218a1f MZ |
244 | static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, |
245 | phys_addr_t addr, phys_addr_t end) | |
246 | { | |
247 | pte_t *pte; | |
248 | ||
249 | pte = pte_offset_kernel(pmd, addr); | |
250 | do { | |
251 | if (!pte_none(*pte)) { | |
252 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | |
253 | kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); | |
254 | } | |
255 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
256 | } | |
257 | ||
258 | static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, | |
259 | phys_addr_t addr, phys_addr_t end) | |
260 | { | |
261 | pmd_t *pmd; | |
262 | phys_addr_t next; | |
263 | ||
264 | pmd = pmd_offset(pud, addr); | |
265 | do { | |
266 | next = kvm_pmd_addr_end(addr, end); | |
267 | if (!pmd_none(*pmd)) { | |
268 | if (kvm_pmd_huge(*pmd)) { | |
269 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | |
270 | kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); | |
271 | } else { | |
272 | stage2_flush_ptes(kvm, pmd, addr, next); | |
273 | } | |
274 | } | |
275 | } while (pmd++, addr = next, addr != end); | |
276 | } | |
277 | ||
278 | static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, | |
279 | phys_addr_t addr, phys_addr_t end) | |
280 | { | |
281 | pud_t *pud; | |
282 | phys_addr_t next; | |
283 | ||
284 | pud = pud_offset(pgd, addr); | |
285 | do { | |
286 | next = kvm_pud_addr_end(addr, end); | |
287 | if (!pud_none(*pud)) { | |
288 | if (pud_huge(*pud)) { | |
289 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | |
290 | kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); | |
291 | } else { | |
292 | stage2_flush_pmds(kvm, pud, addr, next); | |
293 | } | |
294 | } | |
295 | } while (pud++, addr = next, addr != end); | |
296 | } | |
297 | ||
298 | static void stage2_flush_memslot(struct kvm *kvm, | |
299 | struct kvm_memory_slot *memslot) | |
300 | { | |
301 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | |
302 | phys_addr_t end = addr + PAGE_SIZE * memslot->npages; | |
303 | phys_addr_t next; | |
304 | pgd_t *pgd; | |
305 | ||
306 | pgd = kvm->arch.pgd + pgd_index(addr); | |
307 | do { | |
308 | next = kvm_pgd_addr_end(addr, end); | |
309 | stage2_flush_puds(kvm, pgd, addr, next); | |
310 | } while (pgd++, addr = next, addr != end); | |
311 | } | |
312 | ||
313 | /** | |
314 | * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 | |
315 | * @kvm: The struct kvm pointer | |
316 | * | |
317 | * Go through the stage 2 page tables and invalidate any cache lines | |
318 | * backing memory already mapped to the VM. | |
319 | */ | |
320 | void stage2_flush_vm(struct kvm *kvm) | |
321 | { | |
322 | struct kvm_memslots *slots; | |
323 | struct kvm_memory_slot *memslot; | |
324 | int idx; | |
325 | ||
326 | idx = srcu_read_lock(&kvm->srcu); | |
327 | spin_lock(&kvm->mmu_lock); | |
328 | ||
329 | slots = kvm_memslots(kvm); | |
330 | kvm_for_each_memslot(memslot, slots) | |
331 | stage2_flush_memslot(kvm, memslot); | |
332 | ||
333 | spin_unlock(&kvm->mmu_lock); | |
334 | srcu_read_unlock(&kvm->srcu, idx); | |
335 | } | |
336 | ||
d157f4a5 MZ |
337 | /** |
338 | * free_boot_hyp_pgd - free HYP boot page tables | |
339 | * | |
340 | * Free the HYP boot page tables. The bounce page is also freed. | |
341 | */ | |
342 | void free_boot_hyp_pgd(void) | |
343 | { | |
344 | mutex_lock(&kvm_hyp_pgd_mutex); | |
345 | ||
346 | if (boot_hyp_pgd) { | |
d4cb9df5 MZ |
347 | unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
348 | unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | |
38f791a4 | 349 | free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order); |
d157f4a5 MZ |
350 | boot_hyp_pgd = NULL; |
351 | } | |
352 | ||
353 | if (hyp_pgd) | |
d4cb9df5 | 354 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
d157f4a5 | 355 | |
5d4e08c4 | 356 | free_page((unsigned long)init_bounce_page); |
d157f4a5 MZ |
357 | init_bounce_page = NULL; |
358 | ||
359 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
360 | } | |
361 | ||
342cd0ab | 362 | /** |
4f728276 | 363 | * free_hyp_pgds - free Hyp-mode page tables |
342cd0ab | 364 | * |
5a677ce0 MZ |
365 | * Assumes hyp_pgd is a page table used strictly in Hyp-mode and |
366 | * therefore contains either mappings in the kernel memory area (above | |
367 | * PAGE_OFFSET), or device mappings in the vmalloc range (from | |
368 | * VMALLOC_START to VMALLOC_END). | |
369 | * | |
370 | * boot_hyp_pgd should only map two pages for the init code. | |
342cd0ab | 371 | */ |
4f728276 | 372 | void free_hyp_pgds(void) |
342cd0ab | 373 | { |
342cd0ab CD |
374 | unsigned long addr; |
375 | ||
d157f4a5 | 376 | free_boot_hyp_pgd(); |
4f728276 | 377 | |
d157f4a5 | 378 | mutex_lock(&kvm_hyp_pgd_mutex); |
5a677ce0 | 379 | |
4f728276 MZ |
380 | if (hyp_pgd) { |
381 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) | |
d4cb9df5 | 382 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
4f728276 | 383 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
d4cb9df5 MZ |
384 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
385 | ||
38f791a4 | 386 | free_pages((unsigned long)hyp_pgd, hyp_pgd_order); |
d157f4a5 | 387 | hyp_pgd = NULL; |
4f728276 MZ |
388 | } |
389 | ||
342cd0ab CD |
390 | mutex_unlock(&kvm_hyp_pgd_mutex); |
391 | } | |
392 | ||
393 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, | |
6060df84 MZ |
394 | unsigned long end, unsigned long pfn, |
395 | pgprot_t prot) | |
342cd0ab CD |
396 | { |
397 | pte_t *pte; | |
398 | unsigned long addr; | |
342cd0ab | 399 | |
3562c76d MZ |
400 | addr = start; |
401 | do { | |
6060df84 MZ |
402 | pte = pte_offset_kernel(pmd, addr); |
403 | kvm_set_pte(pte, pfn_pte(pfn, prot)); | |
4f728276 | 404 | get_page(virt_to_page(pte)); |
5a677ce0 | 405 | kvm_flush_dcache_to_poc(pte, sizeof(*pte)); |
6060df84 | 406 | pfn++; |
3562c76d | 407 | } while (addr += PAGE_SIZE, addr != end); |
342cd0ab CD |
408 | } |
409 | ||
410 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, | |
6060df84 MZ |
411 | unsigned long end, unsigned long pfn, |
412 | pgprot_t prot) | |
342cd0ab CD |
413 | { |
414 | pmd_t *pmd; | |
415 | pte_t *pte; | |
416 | unsigned long addr, next; | |
417 | ||
3562c76d MZ |
418 | addr = start; |
419 | do { | |
6060df84 | 420 | pmd = pmd_offset(pud, addr); |
342cd0ab CD |
421 | |
422 | BUG_ON(pmd_sect(*pmd)); | |
423 | ||
424 | if (pmd_none(*pmd)) { | |
6060df84 | 425 | pte = pte_alloc_one_kernel(NULL, addr); |
342cd0ab CD |
426 | if (!pte) { |
427 | kvm_err("Cannot allocate Hyp pte\n"); | |
428 | return -ENOMEM; | |
429 | } | |
430 | pmd_populate_kernel(NULL, pmd, pte); | |
4f728276 | 431 | get_page(virt_to_page(pmd)); |
5a677ce0 | 432 | kvm_flush_dcache_to_poc(pmd, sizeof(*pmd)); |
342cd0ab CD |
433 | } |
434 | ||
435 | next = pmd_addr_end(addr, end); | |
436 | ||
6060df84 MZ |
437 | create_hyp_pte_mappings(pmd, addr, next, pfn, prot); |
438 | pfn += (next - addr) >> PAGE_SHIFT; | |
3562c76d | 439 | } while (addr = next, addr != end); |
342cd0ab CD |
440 | |
441 | return 0; | |
442 | } | |
443 | ||
38f791a4 CD |
444 | static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start, |
445 | unsigned long end, unsigned long pfn, | |
446 | pgprot_t prot) | |
447 | { | |
448 | pud_t *pud; | |
449 | pmd_t *pmd; | |
450 | unsigned long addr, next; | |
451 | int ret; | |
452 | ||
453 | addr = start; | |
454 | do { | |
455 | pud = pud_offset(pgd, addr); | |
456 | ||
457 | if (pud_none_or_clear_bad(pud)) { | |
458 | pmd = pmd_alloc_one(NULL, addr); | |
459 | if (!pmd) { | |
460 | kvm_err("Cannot allocate Hyp pmd\n"); | |
461 | return -ENOMEM; | |
462 | } | |
463 | pud_populate(NULL, pud, pmd); | |
464 | get_page(virt_to_page(pud)); | |
465 | kvm_flush_dcache_to_poc(pud, sizeof(*pud)); | |
466 | } | |
467 | ||
468 | next = pud_addr_end(addr, end); | |
469 | ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); | |
470 | if (ret) | |
471 | return ret; | |
472 | pfn += (next - addr) >> PAGE_SHIFT; | |
473 | } while (addr = next, addr != end); | |
474 | ||
475 | return 0; | |
476 | } | |
477 | ||
6060df84 MZ |
478 | static int __create_hyp_mappings(pgd_t *pgdp, |
479 | unsigned long start, unsigned long end, | |
480 | unsigned long pfn, pgprot_t prot) | |
342cd0ab | 481 | { |
342cd0ab CD |
482 | pgd_t *pgd; |
483 | pud_t *pud; | |
342cd0ab CD |
484 | unsigned long addr, next; |
485 | int err = 0; | |
486 | ||
342cd0ab | 487 | mutex_lock(&kvm_hyp_pgd_mutex); |
3562c76d MZ |
488 | addr = start & PAGE_MASK; |
489 | end = PAGE_ALIGN(end); | |
490 | do { | |
6060df84 | 491 | pgd = pgdp + pgd_index(addr); |
342cd0ab | 492 | |
38f791a4 CD |
493 | if (pgd_none(*pgd)) { |
494 | pud = pud_alloc_one(NULL, addr); | |
495 | if (!pud) { | |
496 | kvm_err("Cannot allocate Hyp pud\n"); | |
342cd0ab CD |
497 | err = -ENOMEM; |
498 | goto out; | |
499 | } | |
38f791a4 CD |
500 | pgd_populate(NULL, pgd, pud); |
501 | get_page(virt_to_page(pgd)); | |
502 | kvm_flush_dcache_to_poc(pgd, sizeof(*pgd)); | |
342cd0ab CD |
503 | } |
504 | ||
505 | next = pgd_addr_end(addr, end); | |
38f791a4 | 506 | err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot); |
342cd0ab CD |
507 | if (err) |
508 | goto out; | |
6060df84 | 509 | pfn += (next - addr) >> PAGE_SHIFT; |
3562c76d | 510 | } while (addr = next, addr != end); |
342cd0ab CD |
511 | out: |
512 | mutex_unlock(&kvm_hyp_pgd_mutex); | |
513 | return err; | |
514 | } | |
515 | ||
40c2729b CD |
516 | static phys_addr_t kvm_kaddr_to_phys(void *kaddr) |
517 | { | |
518 | if (!is_vmalloc_addr(kaddr)) { | |
519 | BUG_ON(!virt_addr_valid(kaddr)); | |
520 | return __pa(kaddr); | |
521 | } else { | |
522 | return page_to_phys(vmalloc_to_page(kaddr)) + | |
523 | offset_in_page(kaddr); | |
524 | } | |
525 | } | |
526 | ||
342cd0ab | 527 | /** |
06e8c3b0 | 528 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
342cd0ab CD |
529 | * @from: The virtual kernel start address of the range |
530 | * @to: The virtual kernel end address of the range (exclusive) | |
531 | * | |
06e8c3b0 MZ |
532 | * The same virtual address as the kernel virtual address is also used |
533 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying | |
534 | * physical pages. | |
342cd0ab CD |
535 | */ |
536 | int create_hyp_mappings(void *from, void *to) | |
537 | { | |
40c2729b CD |
538 | phys_addr_t phys_addr; |
539 | unsigned long virt_addr; | |
6060df84 MZ |
540 | unsigned long start = KERN_TO_HYP((unsigned long)from); |
541 | unsigned long end = KERN_TO_HYP((unsigned long)to); | |
542 | ||
40c2729b CD |
543 | start = start & PAGE_MASK; |
544 | end = PAGE_ALIGN(end); | |
6060df84 | 545 | |
40c2729b CD |
546 | for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { |
547 | int err; | |
6060df84 | 548 | |
40c2729b CD |
549 | phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); |
550 | err = __create_hyp_mappings(hyp_pgd, virt_addr, | |
551 | virt_addr + PAGE_SIZE, | |
552 | __phys_to_pfn(phys_addr), | |
553 | PAGE_HYP); | |
554 | if (err) | |
555 | return err; | |
556 | } | |
557 | ||
558 | return 0; | |
342cd0ab CD |
559 | } |
560 | ||
561 | /** | |
06e8c3b0 MZ |
562 | * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode |
563 | * @from: The kernel start VA of the range | |
564 | * @to: The kernel end VA of the range (exclusive) | |
6060df84 | 565 | * @phys_addr: The physical start address which gets mapped |
06e8c3b0 MZ |
566 | * |
567 | * The resulting HYP VA is the same as the kernel VA, modulo | |
568 | * HYP_PAGE_OFFSET. | |
342cd0ab | 569 | */ |
6060df84 | 570 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) |
342cd0ab | 571 | { |
6060df84 MZ |
572 | unsigned long start = KERN_TO_HYP((unsigned long)from); |
573 | unsigned long end = KERN_TO_HYP((unsigned long)to); | |
574 | ||
575 | /* Check for a valid kernel IO mapping */ | |
576 | if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) | |
577 | return -EINVAL; | |
578 | ||
579 | return __create_hyp_mappings(hyp_pgd, start, end, | |
580 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); | |
342cd0ab CD |
581 | } |
582 | ||
d5d8184d CD |
583 | /** |
584 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | |
585 | * @kvm: The KVM struct pointer for the VM. | |
586 | * | |
587 | * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can | |
588 | * support either full 40-bit input addresses or limited to 32-bit input | |
589 | * addresses). Clears the allocated pages. | |
590 | * | |
591 | * Note we don't need locking here as this is only called when the VM is | |
592 | * created, which can only be done once. | |
593 | */ | |
594 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | |
595 | { | |
38f791a4 | 596 | int ret; |
d5d8184d CD |
597 | pgd_t *pgd; |
598 | ||
599 | if (kvm->arch.pgd != NULL) { | |
600 | kvm_err("kvm_arch already initialized?\n"); | |
601 | return -EINVAL; | |
602 | } | |
603 | ||
38f791a4 CD |
604 | if (KVM_PREALLOC_LEVEL > 0) { |
605 | /* | |
606 | * Allocate fake pgd for the page table manipulation macros to | |
607 | * work. This is not used by the hardware and we have no | |
608 | * alignment requirement for this allocation. | |
609 | */ | |
610 | pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), | |
611 | GFP_KERNEL | __GFP_ZERO); | |
612 | } else { | |
613 | /* | |
614 | * Allocate actual first-level Stage-2 page table used by the | |
615 | * hardware for Stage-2 page table walks. | |
616 | */ | |
617 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); | |
618 | } | |
619 | ||
d5d8184d CD |
620 | if (!pgd) |
621 | return -ENOMEM; | |
622 | ||
38f791a4 CD |
623 | ret = kvm_prealloc_hwpgd(kvm, pgd); |
624 | if (ret) | |
625 | goto out_err; | |
626 | ||
c62ee2b2 | 627 | kvm_clean_pgd(pgd); |
d5d8184d | 628 | kvm->arch.pgd = pgd; |
d5d8184d | 629 | return 0; |
38f791a4 CD |
630 | out_err: |
631 | if (KVM_PREALLOC_LEVEL > 0) | |
632 | kfree(pgd); | |
633 | else | |
634 | free_pages((unsigned long)pgd, S2_PGD_ORDER); | |
635 | return ret; | |
d5d8184d CD |
636 | } |
637 | ||
d5d8184d CD |
638 | /** |
639 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range | |
640 | * @kvm: The VM pointer | |
641 | * @start: The intermediate physical base address of the range to unmap | |
642 | * @size: The size of the area to unmap | |
643 | * | |
644 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must | |
645 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before | |
646 | * destroying the VM), otherwise another faulting VCPU may come in and mess | |
647 | * with things behind our backs. | |
648 | */ | |
649 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | |
650 | { | |
d4cb9df5 | 651 | unmap_range(kvm, kvm->arch.pgd, start, size); |
d5d8184d CD |
652 | } |
653 | ||
957db105 CD |
654 | static void stage2_unmap_memslot(struct kvm *kvm, |
655 | struct kvm_memory_slot *memslot) | |
656 | { | |
657 | hva_t hva = memslot->userspace_addr; | |
658 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | |
659 | phys_addr_t size = PAGE_SIZE * memslot->npages; | |
660 | hva_t reg_end = hva + size; | |
661 | ||
662 | /* | |
663 | * A memory region could potentially cover multiple VMAs, and any holes | |
664 | * between them, so iterate over all of them to find out if we should | |
665 | * unmap any of them. | |
666 | * | |
667 | * +--------------------------------------------+ | |
668 | * +---------------+----------------+ +----------------+ | |
669 | * | : VMA 1 | VMA 2 | | VMA 3 : | | |
670 | * +---------------+----------------+ +----------------+ | |
671 | * | memory region | | |
672 | * +--------------------------------------------+ | |
673 | */ | |
674 | do { | |
675 | struct vm_area_struct *vma = find_vma(current->mm, hva); | |
676 | hva_t vm_start, vm_end; | |
677 | ||
678 | if (!vma || vma->vm_start >= reg_end) | |
679 | break; | |
680 | ||
681 | /* | |
682 | * Take the intersection of this VMA with the memory region | |
683 | */ | |
684 | vm_start = max(hva, vma->vm_start); | |
685 | vm_end = min(reg_end, vma->vm_end); | |
686 | ||
687 | if (!(vma->vm_flags & VM_PFNMAP)) { | |
688 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); | |
689 | unmap_stage2_range(kvm, gpa, vm_end - vm_start); | |
690 | } | |
691 | hva = vm_end; | |
692 | } while (hva < reg_end); | |
693 | } | |
694 | ||
695 | /** | |
696 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings | |
697 | * @kvm: The struct kvm pointer | |
698 | * | |
699 | * Go through the memregions and unmap any reguler RAM | |
700 | * backing memory already mapped to the VM. | |
701 | */ | |
702 | void stage2_unmap_vm(struct kvm *kvm) | |
703 | { | |
704 | struct kvm_memslots *slots; | |
705 | struct kvm_memory_slot *memslot; | |
706 | int idx; | |
707 | ||
708 | idx = srcu_read_lock(&kvm->srcu); | |
709 | spin_lock(&kvm->mmu_lock); | |
710 | ||
711 | slots = kvm_memslots(kvm); | |
712 | kvm_for_each_memslot(memslot, slots) | |
713 | stage2_unmap_memslot(kvm, memslot); | |
714 | ||
715 | spin_unlock(&kvm->mmu_lock); | |
716 | srcu_read_unlock(&kvm->srcu, idx); | |
717 | } | |
718 | ||
d5d8184d CD |
719 | /** |
720 | * kvm_free_stage2_pgd - free all stage-2 tables | |
721 | * @kvm: The KVM struct pointer for the VM. | |
722 | * | |
723 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all | |
724 | * underlying level-2 and level-3 tables before freeing the actual level-1 table | |
725 | * and setting the struct pointer to NULL. | |
726 | * | |
727 | * Note we don't need locking here as this is only called when the VM is | |
728 | * destroyed, which can only be done once. | |
729 | */ | |
730 | void kvm_free_stage2_pgd(struct kvm *kvm) | |
731 | { | |
732 | if (kvm->arch.pgd == NULL) | |
733 | return; | |
734 | ||
735 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | |
38f791a4 CD |
736 | kvm_free_hwpgd(kvm); |
737 | if (KVM_PREALLOC_LEVEL > 0) | |
738 | kfree(kvm->arch.pgd); | |
739 | else | |
740 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); | |
d5d8184d CD |
741 | kvm->arch.pgd = NULL; |
742 | } | |
743 | ||
38f791a4 | 744 | static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
ad361f09 | 745 | phys_addr_t addr) |
d5d8184d CD |
746 | { |
747 | pgd_t *pgd; | |
748 | pud_t *pud; | |
d5d8184d | 749 | |
d5d8184d | 750 | pgd = kvm->arch.pgd + pgd_index(addr); |
38f791a4 CD |
751 | if (WARN_ON(pgd_none(*pgd))) { |
752 | if (!cache) | |
753 | return NULL; | |
754 | pud = mmu_memory_cache_alloc(cache); | |
755 | pgd_populate(NULL, pgd, pud); | |
756 | get_page(virt_to_page(pgd)); | |
757 | } | |
758 | ||
759 | return pud_offset(pgd, addr); | |
760 | } | |
761 | ||
762 | static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |
763 | phys_addr_t addr) | |
764 | { | |
765 | pud_t *pud; | |
766 | pmd_t *pmd; | |
767 | ||
768 | pud = stage2_get_pud(kvm, cache, addr); | |
d5d8184d CD |
769 | if (pud_none(*pud)) { |
770 | if (!cache) | |
ad361f09 | 771 | return NULL; |
d5d8184d CD |
772 | pmd = mmu_memory_cache_alloc(cache); |
773 | pud_populate(NULL, pud, pmd); | |
d5d8184d | 774 | get_page(virt_to_page(pud)); |
c62ee2b2 MZ |
775 | } |
776 | ||
ad361f09 CD |
777 | return pmd_offset(pud, addr); |
778 | } | |
779 | ||
780 | static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache | |
781 | *cache, phys_addr_t addr, const pmd_t *new_pmd) | |
782 | { | |
783 | pmd_t *pmd, old_pmd; | |
784 | ||
785 | pmd = stage2_get_pmd(kvm, cache, addr); | |
786 | VM_BUG_ON(!pmd); | |
d5d8184d | 787 | |
ad361f09 CD |
788 | /* |
789 | * Mapping in huge pages should only happen through a fault. If a | |
790 | * page is merged into a transparent huge page, the individual | |
791 | * subpages of that huge page should be unmapped through MMU | |
792 | * notifiers before we get here. | |
793 | * | |
794 | * Merging of CompoundPages is not supported; they should become | |
795 | * splitting first, unmapped, merged, and mapped back in on-demand. | |
796 | */ | |
797 | VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); | |
798 | ||
799 | old_pmd = *pmd; | |
800 | kvm_set_pmd(pmd, *new_pmd); | |
801 | if (pmd_present(old_pmd)) | |
802 | kvm_tlb_flush_vmid_ipa(kvm, addr); | |
803 | else | |
804 | get_page(virt_to_page(pmd)); | |
805 | return 0; | |
806 | } | |
807 | ||
808 | static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | |
15a49a44 MS |
809 | phys_addr_t addr, const pte_t *new_pte, |
810 | unsigned long flags) | |
ad361f09 CD |
811 | { |
812 | pmd_t *pmd; | |
813 | pte_t *pte, old_pte; | |
15a49a44 MS |
814 | bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP; |
815 | bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE; | |
816 | ||
817 | VM_BUG_ON(logging_active && !cache); | |
ad361f09 | 818 | |
38f791a4 | 819 | /* Create stage-2 page table mapping - Levels 0 and 1 */ |
ad361f09 CD |
820 | pmd = stage2_get_pmd(kvm, cache, addr); |
821 | if (!pmd) { | |
822 | /* | |
823 | * Ignore calls from kvm_set_spte_hva for unallocated | |
824 | * address ranges. | |
825 | */ | |
826 | return 0; | |
827 | } | |
828 | ||
15a49a44 MS |
829 | /* |
830 | * While dirty page logging - dissolve huge PMD, then continue on to | |
831 | * allocate page. | |
832 | */ | |
833 | if (logging_active) | |
834 | stage2_dissolve_pmd(kvm, addr, pmd); | |
835 | ||
ad361f09 | 836 | /* Create stage-2 page mappings - Level 2 */ |
d5d8184d CD |
837 | if (pmd_none(*pmd)) { |
838 | if (!cache) | |
839 | return 0; /* ignore calls from kvm_set_spte_hva */ | |
840 | pte = mmu_memory_cache_alloc(cache); | |
c62ee2b2 | 841 | kvm_clean_pte(pte); |
d5d8184d | 842 | pmd_populate_kernel(NULL, pmd, pte); |
d5d8184d | 843 | get_page(virt_to_page(pmd)); |
c62ee2b2 MZ |
844 | } |
845 | ||
846 | pte = pte_offset_kernel(pmd, addr); | |
d5d8184d CD |
847 | |
848 | if (iomap && pte_present(*pte)) | |
849 | return -EFAULT; | |
850 | ||
851 | /* Create 2nd stage page table mapping - Level 3 */ | |
852 | old_pte = *pte; | |
853 | kvm_set_pte(pte, *new_pte); | |
854 | if (pte_present(old_pte)) | |
48762767 | 855 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
d5d8184d CD |
856 | else |
857 | get_page(virt_to_page(pte)); | |
858 | ||
859 | return 0; | |
860 | } | |
861 | ||
862 | /** | |
863 | * kvm_phys_addr_ioremap - map a device range to guest IPA | |
864 | * | |
865 | * @kvm: The KVM pointer | |
866 | * @guest_ipa: The IPA at which to insert the mapping | |
867 | * @pa: The physical address of the device | |
868 | * @size: The size of the mapping | |
869 | */ | |
870 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | |
c40f2f8f | 871 | phys_addr_t pa, unsigned long size, bool writable) |
d5d8184d CD |
872 | { |
873 | phys_addr_t addr, end; | |
874 | int ret = 0; | |
875 | unsigned long pfn; | |
876 | struct kvm_mmu_memory_cache cache = { 0, }; | |
877 | ||
878 | end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; | |
879 | pfn = __phys_to_pfn(pa); | |
880 | ||
881 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { | |
c62ee2b2 | 882 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); |
d5d8184d | 883 | |
c40f2f8f AB |
884 | if (writable) |
885 | kvm_set_s2pte_writable(&pte); | |
886 | ||
38f791a4 CD |
887 | ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES, |
888 | KVM_NR_MEM_OBJS); | |
d5d8184d CD |
889 | if (ret) |
890 | goto out; | |
891 | spin_lock(&kvm->mmu_lock); | |
15a49a44 MS |
892 | ret = stage2_set_pte(kvm, &cache, addr, &pte, |
893 | KVM_S2PTE_FLAG_IS_IOMAP); | |
d5d8184d CD |
894 | spin_unlock(&kvm->mmu_lock); |
895 | if (ret) | |
896 | goto out; | |
897 | ||
898 | pfn++; | |
899 | } | |
900 | ||
901 | out: | |
902 | mmu_free_memory_cache(&cache); | |
903 | return ret; | |
904 | } | |
905 | ||
9b5fdb97 CD |
906 | static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap) |
907 | { | |
908 | pfn_t pfn = *pfnp; | |
909 | gfn_t gfn = *ipap >> PAGE_SHIFT; | |
910 | ||
911 | if (PageTransCompound(pfn_to_page(pfn))) { | |
912 | unsigned long mask; | |
913 | /* | |
914 | * The address we faulted on is backed by a transparent huge | |
915 | * page. However, because we map the compound huge page and | |
916 | * not the individual tail page, we need to transfer the | |
917 | * refcount to the head page. We have to be careful that the | |
918 | * THP doesn't start to split while we are adjusting the | |
919 | * refcounts. | |
920 | * | |
921 | * We are sure this doesn't happen, because mmu_notifier_retry | |
922 | * was successful and we are holding the mmu_lock, so if this | |
923 | * THP is trying to split, it will be blocked in the mmu | |
924 | * notifier before touching any of the pages, specifically | |
925 | * before being able to call __split_huge_page_refcount(). | |
926 | * | |
927 | * We can therefore safely transfer the refcount from PG_tail | |
928 | * to PG_head and switch the pfn from a tail page to the head | |
929 | * page accordingly. | |
930 | */ | |
931 | mask = PTRS_PER_PMD - 1; | |
932 | VM_BUG_ON((gfn & mask) != (pfn & mask)); | |
933 | if (pfn & mask) { | |
934 | *ipap &= PMD_MASK; | |
935 | kvm_release_pfn_clean(pfn); | |
936 | pfn &= ~mask; | |
937 | kvm_get_pfn(pfn); | |
938 | *pfnp = pfn; | |
939 | } | |
940 | ||
941 | return true; | |
942 | } | |
943 | ||
944 | return false; | |
945 | } | |
946 | ||
a7d079ce AB |
947 | static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) |
948 | { | |
949 | if (kvm_vcpu_trap_is_iabt(vcpu)) | |
950 | return false; | |
951 | ||
952 | return kvm_vcpu_dabt_iswrite(vcpu); | |
953 | } | |
954 | ||
bb55e9b1 AB |
955 | static bool kvm_is_device_pfn(unsigned long pfn) |
956 | { | |
957 | return !pfn_valid(pfn); | |
958 | } | |
959 | ||
c6473555 MS |
960 | /** |
961 | * stage2_wp_ptes - write protect PMD range | |
962 | * @pmd: pointer to pmd entry | |
963 | * @addr: range start address | |
964 | * @end: range end address | |
965 | */ | |
966 | static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) | |
967 | { | |
968 | pte_t *pte; | |
969 | ||
970 | pte = pte_offset_kernel(pmd, addr); | |
971 | do { | |
972 | if (!pte_none(*pte)) { | |
973 | if (!kvm_s2pte_readonly(pte)) | |
974 | kvm_set_s2pte_readonly(pte); | |
975 | } | |
976 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
977 | } | |
978 | ||
979 | /** | |
980 | * stage2_wp_pmds - write protect PUD range | |
981 | * @pud: pointer to pud entry | |
982 | * @addr: range start address | |
983 | * @end: range end address | |
984 | */ | |
985 | static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) | |
986 | { | |
987 | pmd_t *pmd; | |
988 | phys_addr_t next; | |
989 | ||
990 | pmd = pmd_offset(pud, addr); | |
991 | ||
992 | do { | |
993 | next = kvm_pmd_addr_end(addr, end); | |
994 | if (!pmd_none(*pmd)) { | |
995 | if (kvm_pmd_huge(*pmd)) { | |
996 | if (!kvm_s2pmd_readonly(pmd)) | |
997 | kvm_set_s2pmd_readonly(pmd); | |
998 | } else { | |
999 | stage2_wp_ptes(pmd, addr, next); | |
1000 | } | |
1001 | } | |
1002 | } while (pmd++, addr = next, addr != end); | |
1003 | } | |
1004 | ||
1005 | /** | |
1006 | * stage2_wp_puds - write protect PGD range | |
1007 | * @pgd: pointer to pgd entry | |
1008 | * @addr: range start address | |
1009 | * @end: range end address | |
1010 | * | |
1011 | * Process PUD entries, for a huge PUD we cause a panic. | |
1012 | */ | |
1013 | static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end) | |
1014 | { | |
1015 | pud_t *pud; | |
1016 | phys_addr_t next; | |
1017 | ||
1018 | pud = pud_offset(pgd, addr); | |
1019 | do { | |
1020 | next = kvm_pud_addr_end(addr, end); | |
1021 | if (!pud_none(*pud)) { | |
1022 | /* TODO:PUD not supported, revisit later if supported */ | |
1023 | BUG_ON(kvm_pud_huge(*pud)); | |
1024 | stage2_wp_pmds(pud, addr, next); | |
1025 | } | |
1026 | } while (pud++, addr = next, addr != end); | |
1027 | } | |
1028 | ||
1029 | /** | |
1030 | * stage2_wp_range() - write protect stage2 memory region range | |
1031 | * @kvm: The KVM pointer | |
1032 | * @addr: Start address of range | |
1033 | * @end: End address of range | |
1034 | */ | |
1035 | static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) | |
1036 | { | |
1037 | pgd_t *pgd; | |
1038 | phys_addr_t next; | |
1039 | ||
1040 | pgd = kvm->arch.pgd + pgd_index(addr); | |
1041 | do { | |
1042 | /* | |
1043 | * Release kvm_mmu_lock periodically if the memory region is | |
1044 | * large. Otherwise, we may see kernel panics with | |
227ea818 CD |
1045 | * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, |
1046 | * CONFIG_LOCKDEP. Additionally, holding the lock too long | |
c6473555 MS |
1047 | * will also starve other vCPUs. |
1048 | */ | |
1049 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) | |
1050 | cond_resched_lock(&kvm->mmu_lock); | |
1051 | ||
1052 | next = kvm_pgd_addr_end(addr, end); | |
1053 | if (pgd_present(*pgd)) | |
1054 | stage2_wp_puds(pgd, addr, next); | |
1055 | } while (pgd++, addr = next, addr != end); | |
1056 | } | |
1057 | ||
1058 | /** | |
1059 | * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot | |
1060 | * @kvm: The KVM pointer | |
1061 | * @slot: The memory slot to write protect | |
1062 | * | |
1063 | * Called to start logging dirty pages after memory region | |
1064 | * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns | |
1065 | * all present PMD and PTEs are write protected in the memory region. | |
1066 | * Afterwards read of dirty page log can be called. | |
1067 | * | |
1068 | * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, | |
1069 | * serializing operations for VM memory regions. | |
1070 | */ | |
1071 | void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) | |
1072 | { | |
1073 | struct kvm_memory_slot *memslot = id_to_memslot(kvm->memslots, slot); | |
1074 | phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; | |
1075 | phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; | |
1076 | ||
1077 | spin_lock(&kvm->mmu_lock); | |
1078 | stage2_wp_range(kvm, start, end); | |
1079 | spin_unlock(&kvm->mmu_lock); | |
1080 | kvm_flush_remote_tlbs(kvm); | |
1081 | } | |
53c810c3 MS |
1082 | |
1083 | /** | |
1084 | * kvm_arch_mmu_write_protect_pt_masked() - write protect dirty pages | |
1085 | * @kvm: The KVM pointer | |
1086 | * @slot: The memory slot associated with mask | |
1087 | * @gfn_offset: The gfn offset in memory slot | |
1088 | * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory | |
1089 | * slot to be write protected | |
1090 | * | |
1091 | * Walks bits set in mask write protects the associated pte's. Caller must | |
1092 | * acquire kvm_mmu_lock. | |
1093 | */ | |
1094 | void kvm_arch_mmu_write_protect_pt_masked(struct kvm *kvm, | |
1095 | struct kvm_memory_slot *slot, | |
1096 | gfn_t gfn_offset, unsigned long mask) | |
1097 | { | |
1098 | phys_addr_t base_gfn = slot->base_gfn + gfn_offset; | |
1099 | phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; | |
1100 | phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; | |
1101 | ||
1102 | stage2_wp_range(kvm, start, end); | |
1103 | } | |
c6473555 | 1104 | |
94f8e641 | 1105 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
98047888 | 1106 | struct kvm_memory_slot *memslot, unsigned long hva, |
94f8e641 CD |
1107 | unsigned long fault_status) |
1108 | { | |
94f8e641 | 1109 | int ret; |
9b5fdb97 | 1110 | bool write_fault, writable, hugetlb = false, force_pte = false; |
94f8e641 | 1111 | unsigned long mmu_seq; |
ad361f09 | 1112 | gfn_t gfn = fault_ipa >> PAGE_SHIFT; |
ad361f09 | 1113 | struct kvm *kvm = vcpu->kvm; |
94f8e641 | 1114 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
ad361f09 CD |
1115 | struct vm_area_struct *vma; |
1116 | pfn_t pfn; | |
b8865767 | 1117 | pgprot_t mem_type = PAGE_S2; |
840f4bfb | 1118 | bool fault_ipa_uncached; |
15a49a44 MS |
1119 | bool logging_active = memslot_is_logging(memslot); |
1120 | unsigned long flags = 0; | |
94f8e641 | 1121 | |
a7d079ce | 1122 | write_fault = kvm_is_write_fault(vcpu); |
94f8e641 CD |
1123 | if (fault_status == FSC_PERM && !write_fault) { |
1124 | kvm_err("Unexpected L2 read permission error\n"); | |
1125 | return -EFAULT; | |
1126 | } | |
1127 | ||
ad361f09 CD |
1128 | /* Let's check if we will get back a huge page backed by hugetlbfs */ |
1129 | down_read(¤t->mm->mmap_sem); | |
1130 | vma = find_vma_intersection(current->mm, hva, hva + 1); | |
37b54408 AB |
1131 | if (unlikely(!vma)) { |
1132 | kvm_err("Failed to find VMA for hva 0x%lx\n", hva); | |
1133 | up_read(¤t->mm->mmap_sem); | |
1134 | return -EFAULT; | |
1135 | } | |
1136 | ||
15a49a44 | 1137 | if (is_vm_hugetlb_page(vma) && !logging_active) { |
ad361f09 CD |
1138 | hugetlb = true; |
1139 | gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; | |
9b5fdb97 CD |
1140 | } else { |
1141 | /* | |
136d737f MZ |
1142 | * Pages belonging to memslots that don't have the same |
1143 | * alignment for userspace and IPA cannot be mapped using | |
1144 | * block descriptors even if the pages belong to a THP for | |
1145 | * the process, because the stage-2 block descriptor will | |
1146 | * cover more than a single THP and we loose atomicity for | |
1147 | * unmapping, updates, and splits of the THP or other pages | |
1148 | * in the stage-2 block range. | |
9b5fdb97 | 1149 | */ |
136d737f MZ |
1150 | if ((memslot->userspace_addr & ~PMD_MASK) != |
1151 | ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK)) | |
9b5fdb97 | 1152 | force_pte = true; |
ad361f09 CD |
1153 | } |
1154 | up_read(¤t->mm->mmap_sem); | |
1155 | ||
94f8e641 | 1156 | /* We need minimum second+third level pages */ |
38f791a4 CD |
1157 | ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, |
1158 | KVM_NR_MEM_OBJS); | |
94f8e641 CD |
1159 | if (ret) |
1160 | return ret; | |
1161 | ||
1162 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | |
1163 | /* | |
1164 | * Ensure the read of mmu_notifier_seq happens before we call | |
1165 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk | |
1166 | * the page we just got a reference to gets unmapped before we have a | |
1167 | * chance to grab the mmu_lock, which ensure that if the page gets | |
1168 | * unmapped afterwards, the call to kvm_unmap_hva will take it away | |
1169 | * from us again properly. This smp_rmb() interacts with the smp_wmb() | |
1170 | * in kvm_mmu_notifier_invalidate_<page|range_end>. | |
1171 | */ | |
1172 | smp_rmb(); | |
1173 | ||
ad361f09 | 1174 | pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); |
94f8e641 CD |
1175 | if (is_error_pfn(pfn)) |
1176 | return -EFAULT; | |
1177 | ||
15a49a44 | 1178 | if (kvm_is_device_pfn(pfn)) { |
b8865767 | 1179 | mem_type = PAGE_S2_DEVICE; |
15a49a44 MS |
1180 | flags |= KVM_S2PTE_FLAG_IS_IOMAP; |
1181 | } else if (logging_active) { | |
1182 | /* | |
1183 | * Faults on pages in a memslot with logging enabled | |
1184 | * should not be mapped with huge pages (it introduces churn | |
1185 | * and performance degradation), so force a pte mapping. | |
1186 | */ | |
1187 | force_pte = true; | |
1188 | flags |= KVM_S2_FLAG_LOGGING_ACTIVE; | |
1189 | ||
1190 | /* | |
1191 | * Only actually map the page as writable if this was a write | |
1192 | * fault. | |
1193 | */ | |
1194 | if (!write_fault) | |
1195 | writable = false; | |
1196 | } | |
b8865767 | 1197 | |
ad361f09 CD |
1198 | spin_lock(&kvm->mmu_lock); |
1199 | if (mmu_notifier_retry(kvm, mmu_seq)) | |
94f8e641 | 1200 | goto out_unlock; |
15a49a44 | 1201 | |
9b5fdb97 CD |
1202 | if (!hugetlb && !force_pte) |
1203 | hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); | |
ad361f09 | 1204 | |
849260c7 | 1205 | fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT; |
840f4bfb | 1206 | |
ad361f09 | 1207 | if (hugetlb) { |
b8865767 | 1208 | pmd_t new_pmd = pfn_pmd(pfn, mem_type); |
ad361f09 CD |
1209 | new_pmd = pmd_mkhuge(new_pmd); |
1210 | if (writable) { | |
1211 | kvm_set_s2pmd_writable(&new_pmd); | |
1212 | kvm_set_pfn_dirty(pfn); | |
1213 | } | |
840f4bfb LE |
1214 | coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, |
1215 | fault_ipa_uncached); | |
ad361f09 CD |
1216 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); |
1217 | } else { | |
b8865767 | 1218 | pte_t new_pte = pfn_pte(pfn, mem_type); |
15a49a44 | 1219 | |
ad361f09 CD |
1220 | if (writable) { |
1221 | kvm_set_s2pte_writable(&new_pte); | |
1222 | kvm_set_pfn_dirty(pfn); | |
15a49a44 | 1223 | mark_page_dirty(kvm, gfn); |
ad361f09 | 1224 | } |
840f4bfb LE |
1225 | coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, |
1226 | fault_ipa_uncached); | |
15a49a44 | 1227 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); |
94f8e641 | 1228 | } |
ad361f09 | 1229 | |
94f8e641 | 1230 | out_unlock: |
ad361f09 | 1231 | spin_unlock(&kvm->mmu_lock); |
94f8e641 | 1232 | kvm_release_pfn_clean(pfn); |
ad361f09 | 1233 | return ret; |
94f8e641 CD |
1234 | } |
1235 | ||
1236 | /** | |
1237 | * kvm_handle_guest_abort - handles all 2nd stage aborts | |
1238 | * @vcpu: the VCPU pointer | |
1239 | * @run: the kvm_run structure | |
1240 | * | |
1241 | * Any abort that gets to the host is almost guaranteed to be caused by a | |
1242 | * missing second stage translation table entry, which can mean that either the | |
1243 | * guest simply needs more memory and we must allocate an appropriate page or it | |
1244 | * can mean that the guest tried to access I/O memory, which is emulated by user | |
1245 | * space. The distinction is based on the IPA causing the fault and whether this | |
1246 | * memory region has been registered as standard RAM by user space. | |
1247 | */ | |
342cd0ab CD |
1248 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) |
1249 | { | |
94f8e641 CD |
1250 | unsigned long fault_status; |
1251 | phys_addr_t fault_ipa; | |
1252 | struct kvm_memory_slot *memslot; | |
98047888 CD |
1253 | unsigned long hva; |
1254 | bool is_iabt, write_fault, writable; | |
94f8e641 CD |
1255 | gfn_t gfn; |
1256 | int ret, idx; | |
1257 | ||
52d1dba9 | 1258 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
7393b599 | 1259 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); |
94f8e641 | 1260 | |
7393b599 MZ |
1261 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), |
1262 | kvm_vcpu_get_hfar(vcpu), fault_ipa); | |
94f8e641 CD |
1263 | |
1264 | /* Check the stage-2 fault is trans. fault or write fault */ | |
0496daa5 | 1265 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); |
94f8e641 | 1266 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { |
0496daa5 CD |
1267 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", |
1268 | kvm_vcpu_trap_get_class(vcpu), | |
1269 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), | |
1270 | (unsigned long)kvm_vcpu_get_hsr(vcpu)); | |
94f8e641 CD |
1271 | return -EFAULT; |
1272 | } | |
1273 | ||
1274 | idx = srcu_read_lock(&vcpu->kvm->srcu); | |
1275 | ||
1276 | gfn = fault_ipa >> PAGE_SHIFT; | |
98047888 CD |
1277 | memslot = gfn_to_memslot(vcpu->kvm, gfn); |
1278 | hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); | |
a7d079ce | 1279 | write_fault = kvm_is_write_fault(vcpu); |
98047888 | 1280 | if (kvm_is_error_hva(hva) || (write_fault && !writable)) { |
94f8e641 CD |
1281 | if (is_iabt) { |
1282 | /* Prefetch Abort on I/O address */ | |
7393b599 | 1283 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
94f8e641 CD |
1284 | ret = 1; |
1285 | goto out_unlock; | |
1286 | } | |
1287 | ||
cfe3950c MZ |
1288 | /* |
1289 | * The IPA is reported as [MAX:12], so we need to | |
1290 | * complement it with the bottom 12 bits from the | |
1291 | * faulting VA. This is always 12 bits, irrespective | |
1292 | * of the page size. | |
1293 | */ | |
1294 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); | |
45e96ea6 | 1295 | ret = io_mem_abort(vcpu, run, fault_ipa); |
94f8e641 CD |
1296 | goto out_unlock; |
1297 | } | |
1298 | ||
c3058d5d CD |
1299 | /* Userspace should not be able to register out-of-bounds IPAs */ |
1300 | VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); | |
1301 | ||
98047888 | 1302 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); |
94f8e641 CD |
1303 | if (ret == 0) |
1304 | ret = 1; | |
1305 | out_unlock: | |
1306 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
1307 | return ret; | |
342cd0ab CD |
1308 | } |
1309 | ||
d5d8184d CD |
1310 | static void handle_hva_to_gpa(struct kvm *kvm, |
1311 | unsigned long start, | |
1312 | unsigned long end, | |
1313 | void (*handler)(struct kvm *kvm, | |
1314 | gpa_t gpa, void *data), | |
1315 | void *data) | |
1316 | { | |
1317 | struct kvm_memslots *slots; | |
1318 | struct kvm_memory_slot *memslot; | |
1319 | ||
1320 | slots = kvm_memslots(kvm); | |
1321 | ||
1322 | /* we only care about the pages that the guest sees */ | |
1323 | kvm_for_each_memslot(memslot, slots) { | |
1324 | unsigned long hva_start, hva_end; | |
1325 | gfn_t gfn, gfn_end; | |
1326 | ||
1327 | hva_start = max(start, memslot->userspace_addr); | |
1328 | hva_end = min(end, memslot->userspace_addr + | |
1329 | (memslot->npages << PAGE_SHIFT)); | |
1330 | if (hva_start >= hva_end) | |
1331 | continue; | |
1332 | ||
1333 | /* | |
1334 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | |
1335 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. | |
1336 | */ | |
1337 | gfn = hva_to_gfn_memslot(hva_start, memslot); | |
1338 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | |
1339 | ||
1340 | for (; gfn < gfn_end; ++gfn) { | |
1341 | gpa_t gpa = gfn << PAGE_SHIFT; | |
1342 | handler(kvm, gpa, data); | |
1343 | } | |
1344 | } | |
1345 | } | |
1346 | ||
1347 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | |
1348 | { | |
1349 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | |
d5d8184d CD |
1350 | } |
1351 | ||
1352 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | |
1353 | { | |
1354 | unsigned long end = hva + PAGE_SIZE; | |
1355 | ||
1356 | if (!kvm->arch.pgd) | |
1357 | return 0; | |
1358 | ||
1359 | trace_kvm_unmap_hva(hva); | |
1360 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); | |
1361 | return 0; | |
1362 | } | |
1363 | ||
1364 | int kvm_unmap_hva_range(struct kvm *kvm, | |
1365 | unsigned long start, unsigned long end) | |
1366 | { | |
1367 | if (!kvm->arch.pgd) | |
1368 | return 0; | |
1369 | ||
1370 | trace_kvm_unmap_hva_range(start, end); | |
1371 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); | |
1372 | return 0; | |
1373 | } | |
1374 | ||
1375 | static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) | |
1376 | { | |
1377 | pte_t *pte = (pte_t *)data; | |
1378 | ||
15a49a44 MS |
1379 | /* |
1380 | * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE | |
1381 | * flag clear because MMU notifiers will have unmapped a huge PMD before | |
1382 | * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and | |
1383 | * therefore stage2_set_pte() never needs to clear out a huge PMD | |
1384 | * through this calling path. | |
1385 | */ | |
1386 | stage2_set_pte(kvm, NULL, gpa, pte, 0); | |
d5d8184d CD |
1387 | } |
1388 | ||
1389 | ||
1390 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |
1391 | { | |
1392 | unsigned long end = hva + PAGE_SIZE; | |
1393 | pte_t stage2_pte; | |
1394 | ||
1395 | if (!kvm->arch.pgd) | |
1396 | return; | |
1397 | ||
1398 | trace_kvm_set_spte_hva(hva); | |
1399 | stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); | |
1400 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); | |
1401 | } | |
1402 | ||
1403 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) | |
1404 | { | |
1405 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); | |
1406 | } | |
1407 | ||
342cd0ab CD |
1408 | phys_addr_t kvm_mmu_get_httbr(void) |
1409 | { | |
342cd0ab CD |
1410 | return virt_to_phys(hyp_pgd); |
1411 | } | |
1412 | ||
5a677ce0 MZ |
1413 | phys_addr_t kvm_mmu_get_boot_httbr(void) |
1414 | { | |
1415 | return virt_to_phys(boot_hyp_pgd); | |
1416 | } | |
1417 | ||
1418 | phys_addr_t kvm_get_idmap_vector(void) | |
1419 | { | |
1420 | return hyp_idmap_vector; | |
1421 | } | |
1422 | ||
342cd0ab CD |
1423 | int kvm_mmu_init(void) |
1424 | { | |
2fb41059 MZ |
1425 | int err; |
1426 | ||
4fda342c SS |
1427 | hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start); |
1428 | hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); | |
1429 | hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); | |
5a677ce0 MZ |
1430 | |
1431 | if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { | |
1432 | /* | |
1433 | * Our init code is crossing a page boundary. Allocate | |
1434 | * a bounce page, copy the code over and use that. | |
1435 | */ | |
1436 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; | |
1437 | phys_addr_t phys_base; | |
1438 | ||
5d4e08c4 | 1439 | init_bounce_page = (void *)__get_free_page(GFP_KERNEL); |
5a677ce0 MZ |
1440 | if (!init_bounce_page) { |
1441 | kvm_err("Couldn't allocate HYP init bounce page\n"); | |
1442 | err = -ENOMEM; | |
1443 | goto out; | |
1444 | } | |
1445 | ||
1446 | memcpy(init_bounce_page, __hyp_idmap_text_start, len); | |
1447 | /* | |
1448 | * Warning: the code we just copied to the bounce page | |
1449 | * must be flushed to the point of coherency. | |
1450 | * Otherwise, the data may be sitting in L2, and HYP | |
1451 | * mode won't be able to observe it as it runs with | |
1452 | * caches off at that point. | |
1453 | */ | |
1454 | kvm_flush_dcache_to_poc(init_bounce_page, len); | |
1455 | ||
4fda342c | 1456 | phys_base = kvm_virt_to_phys(init_bounce_page); |
5a677ce0 MZ |
1457 | hyp_idmap_vector += phys_base - hyp_idmap_start; |
1458 | hyp_idmap_start = phys_base; | |
1459 | hyp_idmap_end = phys_base + len; | |
1460 | ||
1461 | kvm_info("Using HYP init bounce page @%lx\n", | |
1462 | (unsigned long)phys_base); | |
1463 | } | |
1464 | ||
38f791a4 CD |
1465 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); |
1466 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); | |
5d4e08c4 | 1467 | |
5a677ce0 | 1468 | if (!hyp_pgd || !boot_hyp_pgd) { |
d5d8184d | 1469 | kvm_err("Hyp mode PGD not allocated\n"); |
2fb41059 MZ |
1470 | err = -ENOMEM; |
1471 | goto out; | |
1472 | } | |
1473 | ||
1474 | /* Create the idmap in the boot page tables */ | |
1475 | err = __create_hyp_mappings(boot_hyp_pgd, | |
1476 | hyp_idmap_start, hyp_idmap_end, | |
1477 | __phys_to_pfn(hyp_idmap_start), | |
1478 | PAGE_HYP); | |
1479 | ||
1480 | if (err) { | |
1481 | kvm_err("Failed to idmap %lx-%lx\n", | |
1482 | hyp_idmap_start, hyp_idmap_end); | |
1483 | goto out; | |
d5d8184d CD |
1484 | } |
1485 | ||
5a677ce0 MZ |
1486 | /* Map the very same page at the trampoline VA */ |
1487 | err = __create_hyp_mappings(boot_hyp_pgd, | |
1488 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, | |
1489 | __phys_to_pfn(hyp_idmap_start), | |
1490 | PAGE_HYP); | |
1491 | if (err) { | |
1492 | kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n", | |
1493 | TRAMPOLINE_VA); | |
1494 | goto out; | |
1495 | } | |
1496 | ||
1497 | /* Map the same page again into the runtime page tables */ | |
1498 | err = __create_hyp_mappings(hyp_pgd, | |
1499 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, | |
1500 | __phys_to_pfn(hyp_idmap_start), | |
1501 | PAGE_HYP); | |
1502 | if (err) { | |
1503 | kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n", | |
1504 | TRAMPOLINE_VA); | |
1505 | goto out; | |
1506 | } | |
1507 | ||
d5d8184d | 1508 | return 0; |
2fb41059 | 1509 | out: |
4f728276 | 1510 | free_hyp_pgds(); |
2fb41059 | 1511 | return err; |
342cd0ab | 1512 | } |
df6ce24f EA |
1513 | |
1514 | void kvm_arch_commit_memory_region(struct kvm *kvm, | |
1515 | struct kvm_userspace_memory_region *mem, | |
1516 | const struct kvm_memory_slot *old, | |
1517 | enum kvm_mr_change change) | |
1518 | { | |
c6473555 MS |
1519 | /* |
1520 | * At this point memslot has been committed and there is an | |
1521 | * allocated dirty_bitmap[], dirty pages will be be tracked while the | |
1522 | * memory slot is write protected. | |
1523 | */ | |
1524 | if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) | |
1525 | kvm_mmu_wp_memory_region(kvm, mem->slot); | |
df6ce24f EA |
1526 | } |
1527 | ||
1528 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | |
1529 | struct kvm_memory_slot *memslot, | |
1530 | struct kvm_userspace_memory_region *mem, | |
1531 | enum kvm_mr_change change) | |
1532 | { | |
8eef9123 AB |
1533 | hva_t hva = mem->userspace_addr; |
1534 | hva_t reg_end = hva + mem->memory_size; | |
1535 | bool writable = !(mem->flags & KVM_MEM_READONLY); | |
1536 | int ret = 0; | |
1537 | ||
15a49a44 MS |
1538 | if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && |
1539 | change != KVM_MR_FLAGS_ONLY) | |
8eef9123 AB |
1540 | return 0; |
1541 | ||
c3058d5d CD |
1542 | /* |
1543 | * Prevent userspace from creating a memory region outside of the IPA | |
1544 | * space addressable by the KVM guest IPA space. | |
1545 | */ | |
1546 | if (memslot->base_gfn + memslot->npages >= | |
1547 | (KVM_PHYS_SIZE >> PAGE_SHIFT)) | |
1548 | return -EFAULT; | |
1549 | ||
8eef9123 AB |
1550 | /* |
1551 | * A memory region could potentially cover multiple VMAs, and any holes | |
1552 | * between them, so iterate over all of them to find out if we can map | |
1553 | * any of them right now. | |
1554 | * | |
1555 | * +--------------------------------------------+ | |
1556 | * +---------------+----------------+ +----------------+ | |
1557 | * | : VMA 1 | VMA 2 | | VMA 3 : | | |
1558 | * +---------------+----------------+ +----------------+ | |
1559 | * | memory region | | |
1560 | * +--------------------------------------------+ | |
1561 | */ | |
1562 | do { | |
1563 | struct vm_area_struct *vma = find_vma(current->mm, hva); | |
1564 | hva_t vm_start, vm_end; | |
1565 | ||
1566 | if (!vma || vma->vm_start >= reg_end) | |
1567 | break; | |
1568 | ||
1569 | /* | |
1570 | * Mapping a read-only VMA is only allowed if the | |
1571 | * memory region is configured as read-only. | |
1572 | */ | |
1573 | if (writable && !(vma->vm_flags & VM_WRITE)) { | |
1574 | ret = -EPERM; | |
1575 | break; | |
1576 | } | |
1577 | ||
1578 | /* | |
1579 | * Take the intersection of this VMA with the memory region | |
1580 | */ | |
1581 | vm_start = max(hva, vma->vm_start); | |
1582 | vm_end = min(reg_end, vma->vm_end); | |
1583 | ||
1584 | if (vma->vm_flags & VM_PFNMAP) { | |
1585 | gpa_t gpa = mem->guest_phys_addr + | |
1586 | (vm_start - mem->userspace_addr); | |
1587 | phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) + | |
1588 | vm_start - vma->vm_start; | |
1589 | ||
15a49a44 MS |
1590 | /* IO region dirty page logging not allowed */ |
1591 | if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) | |
1592 | return -EINVAL; | |
1593 | ||
8eef9123 AB |
1594 | ret = kvm_phys_addr_ioremap(kvm, gpa, pa, |
1595 | vm_end - vm_start, | |
1596 | writable); | |
1597 | if (ret) | |
1598 | break; | |
1599 | } | |
1600 | hva = vm_end; | |
1601 | } while (hva < reg_end); | |
1602 | ||
15a49a44 MS |
1603 | if (change == KVM_MR_FLAGS_ONLY) |
1604 | return ret; | |
1605 | ||
849260c7 AB |
1606 | spin_lock(&kvm->mmu_lock); |
1607 | if (ret) | |
8eef9123 | 1608 | unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); |
849260c7 AB |
1609 | else |
1610 | stage2_flush_memslot(kvm, memslot); | |
1611 | spin_unlock(&kvm->mmu_lock); | |
8eef9123 | 1612 | return ret; |
df6ce24f EA |
1613 | } |
1614 | ||
1615 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | |
1616 | struct kvm_memory_slot *dont) | |
1617 | { | |
1618 | } | |
1619 | ||
1620 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | |
1621 | unsigned long npages) | |
1622 | { | |
849260c7 AB |
1623 | /* |
1624 | * Readonly memslots are not incoherent with the caches by definition, | |
1625 | * but in practice, they are used mostly to emulate ROMs or NOR flashes | |
1626 | * that the guest may consider devices and hence map as uncached. | |
1627 | * To prevent incoherency issues in these cases, tag all readonly | |
1628 | * regions as incoherent. | |
1629 | */ | |
1630 | if (slot->flags & KVM_MEM_READONLY) | |
1631 | slot->flags |= KVM_MEMSLOT_INCOHERENT; | |
df6ce24f EA |
1632 | return 0; |
1633 | } | |
1634 | ||
1635 | void kvm_arch_memslots_updated(struct kvm *kvm) | |
1636 | { | |
1637 | } | |
1638 | ||
1639 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | |
1640 | { | |
1641 | } | |
1642 | ||
1643 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
1644 | struct kvm_memory_slot *slot) | |
1645 | { | |
8eef9123 AB |
1646 | gpa_t gpa = slot->base_gfn << PAGE_SHIFT; |
1647 | phys_addr_t size = slot->npages << PAGE_SHIFT; | |
1648 | ||
1649 | spin_lock(&kvm->mmu_lock); | |
1650 | unmap_stage2_range(kvm, gpa, size); | |
1651 | spin_unlock(&kvm->mmu_lock); | |
df6ce24f | 1652 | } |