arm64: KVM: Implement 48 VA support for KVM EL2 and Stage-2
[deliverable/linux.git] / arch / arm / kvm / mmu.c
CommitLineData
749cf76c
CD
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
342cd0ab
CD
18
19#include <linux/mman.h>
20#include <linux/kvm_host.h>
21#include <linux/io.h>
ad361f09 22#include <linux/hugetlb.h>
45e96ea6 23#include <trace/events/kvm.h>
342cd0ab 24#include <asm/pgalloc.h>
94f8e641 25#include <asm/cacheflush.h>
342cd0ab
CD
26#include <asm/kvm_arm.h>
27#include <asm/kvm_mmu.h>
45e96ea6 28#include <asm/kvm_mmio.h>
d5d8184d 29#include <asm/kvm_asm.h>
94f8e641 30#include <asm/kvm_emulate.h>
d5d8184d
CD
31
32#include "trace.h"
342cd0ab
CD
33
34extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35
5a677ce0 36static pgd_t *boot_hyp_pgd;
2fb41059 37static pgd_t *hyp_pgd;
342cd0ab
CD
38static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
39
5a677ce0
MZ
40static void *init_bounce_page;
41static unsigned long hyp_idmap_start;
42static unsigned long hyp_idmap_end;
43static phys_addr_t hyp_idmap_vector;
44
38f791a4 45#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
5d4e08c4 46
9b5fdb97 47#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
ad361f09 48
48762767 49static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
d5d8184d 50{
d4cb9df5
MZ
51 /*
52 * This function also gets called when dealing with HYP page
53 * tables. As HYP doesn't have an associated struct kvm (and
54 * the HYP page tables are fairly static), we don't do
55 * anything there.
56 */
57 if (kvm)
58 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
d5d8184d
CD
59}
60
d5d8184d
CD
61static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
62 int min, int max)
63{
64 void *page;
65
66 BUG_ON(max > KVM_NR_MEM_OBJS);
67 if (cache->nobjs >= min)
68 return 0;
69 while (cache->nobjs < max) {
70 page = (void *)__get_free_page(PGALLOC_GFP);
71 if (!page)
72 return -ENOMEM;
73 cache->objects[cache->nobjs++] = page;
74 }
75 return 0;
76}
77
78static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
79{
80 while (mc->nobjs)
81 free_page((unsigned long)mc->objects[--mc->nobjs]);
82}
83
84static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
85{
86 void *p;
87
88 BUG_ON(!mc || !mc->nobjs);
89 p = mc->objects[--mc->nobjs];
90 return p;
91}
92
4f853a71 93static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
979acd5e 94{
4f853a71
CD
95 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
96 pgd_clear(pgd);
97 kvm_tlb_flush_vmid_ipa(kvm, addr);
98 pud_free(NULL, pud_table);
99 put_page(virt_to_page(pgd));
979acd5e
MZ
100}
101
d4cb9df5 102static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
342cd0ab 103{
4f853a71
CD
104 pmd_t *pmd_table = pmd_offset(pud, 0);
105 VM_BUG_ON(pud_huge(*pud));
106 pud_clear(pud);
107 kvm_tlb_flush_vmid_ipa(kvm, addr);
108 pmd_free(NULL, pmd_table);
4f728276
MZ
109 put_page(virt_to_page(pud));
110}
342cd0ab 111
d4cb9df5 112static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
4f728276 113{
4f853a71
CD
114 pte_t *pte_table = pte_offset_kernel(pmd, 0);
115 VM_BUG_ON(kvm_pmd_huge(*pmd));
116 pmd_clear(pmd);
117 kvm_tlb_flush_vmid_ipa(kvm, addr);
118 pte_free_kernel(NULL, pte_table);
4f728276
MZ
119 put_page(virt_to_page(pmd));
120}
121
4f853a71
CD
122static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
123 phys_addr_t addr, phys_addr_t end)
4f728276 124{
4f853a71
CD
125 phys_addr_t start_addr = addr;
126 pte_t *pte, *start_pte;
127
128 start_pte = pte = pte_offset_kernel(pmd, addr);
129 do {
130 if (!pte_none(*pte)) {
131 kvm_set_pte(pte, __pte(0));
132 put_page(virt_to_page(pte));
133 kvm_tlb_flush_vmid_ipa(kvm, addr);
134 }
135 } while (pte++, addr += PAGE_SIZE, addr != end);
136
38f791a4 137 if (kvm_pte_table_empty(kvm, start_pte))
4f853a71 138 clear_pmd_entry(kvm, pmd, start_addr);
342cd0ab
CD
139}
140
4f853a71
CD
141static void unmap_pmds(struct kvm *kvm, pud_t *pud,
142 phys_addr_t addr, phys_addr_t end)
000d3996 143{
4f853a71
CD
144 phys_addr_t next, start_addr = addr;
145 pmd_t *pmd, *start_pmd;
000d3996 146
4f853a71
CD
147 start_pmd = pmd = pmd_offset(pud, addr);
148 do {
149 next = kvm_pmd_addr_end(addr, end);
150 if (!pmd_none(*pmd)) {
151 if (kvm_pmd_huge(*pmd)) {
152 pmd_clear(pmd);
153 kvm_tlb_flush_vmid_ipa(kvm, addr);
154 put_page(virt_to_page(pmd));
155 } else {
156 unmap_ptes(kvm, pmd, addr, next);
157 }
ad361f09 158 }
4f853a71 159 } while (pmd++, addr = next, addr != end);
ad361f09 160
38f791a4 161 if (kvm_pmd_table_empty(kvm, start_pmd))
4f853a71
CD
162 clear_pud_entry(kvm, pud, start_addr);
163}
000d3996 164
4f853a71
CD
165static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
166 phys_addr_t addr, phys_addr_t end)
167{
168 phys_addr_t next, start_addr = addr;
169 pud_t *pud, *start_pud;
4f728276 170
4f853a71
CD
171 start_pud = pud = pud_offset(pgd, addr);
172 do {
173 next = kvm_pud_addr_end(addr, end);
174 if (!pud_none(*pud)) {
175 if (pud_huge(*pud)) {
176 pud_clear(pud);
177 kvm_tlb_flush_vmid_ipa(kvm, addr);
178 put_page(virt_to_page(pud));
179 } else {
180 unmap_pmds(kvm, pud, addr, next);
4f728276
MZ
181 }
182 }
4f853a71 183 } while (pud++, addr = next, addr != end);
4f728276 184
38f791a4 185 if (kvm_pud_table_empty(kvm, start_pud))
4f853a71
CD
186 clear_pgd_entry(kvm, pgd, start_addr);
187}
188
189
190static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
191 phys_addr_t start, u64 size)
192{
193 pgd_t *pgd;
194 phys_addr_t addr = start, end = start + size;
195 phys_addr_t next;
196
197 pgd = pgdp + pgd_index(addr);
198 do {
199 next = kvm_pgd_addr_end(addr, end);
200 unmap_puds(kvm, pgd, addr, next);
201 } while (pgd++, addr = next, addr != end);
000d3996
MZ
202}
203
9d218a1f
MZ
204static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
205 phys_addr_t addr, phys_addr_t end)
206{
207 pte_t *pte;
208
209 pte = pte_offset_kernel(pmd, addr);
210 do {
211 if (!pte_none(*pte)) {
212 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
213 kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
214 }
215 } while (pte++, addr += PAGE_SIZE, addr != end);
216}
217
218static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
219 phys_addr_t addr, phys_addr_t end)
220{
221 pmd_t *pmd;
222 phys_addr_t next;
223
224 pmd = pmd_offset(pud, addr);
225 do {
226 next = kvm_pmd_addr_end(addr, end);
227 if (!pmd_none(*pmd)) {
228 if (kvm_pmd_huge(*pmd)) {
229 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
230 kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
231 } else {
232 stage2_flush_ptes(kvm, pmd, addr, next);
233 }
234 }
235 } while (pmd++, addr = next, addr != end);
236}
237
238static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
239 phys_addr_t addr, phys_addr_t end)
240{
241 pud_t *pud;
242 phys_addr_t next;
243
244 pud = pud_offset(pgd, addr);
245 do {
246 next = kvm_pud_addr_end(addr, end);
247 if (!pud_none(*pud)) {
248 if (pud_huge(*pud)) {
249 hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
250 kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
251 } else {
252 stage2_flush_pmds(kvm, pud, addr, next);
253 }
254 }
255 } while (pud++, addr = next, addr != end);
256}
257
258static void stage2_flush_memslot(struct kvm *kvm,
259 struct kvm_memory_slot *memslot)
260{
261 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
262 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
263 phys_addr_t next;
264 pgd_t *pgd;
265
266 pgd = kvm->arch.pgd + pgd_index(addr);
267 do {
268 next = kvm_pgd_addr_end(addr, end);
269 stage2_flush_puds(kvm, pgd, addr, next);
270 } while (pgd++, addr = next, addr != end);
271}
272
273/**
274 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
275 * @kvm: The struct kvm pointer
276 *
277 * Go through the stage 2 page tables and invalidate any cache lines
278 * backing memory already mapped to the VM.
279 */
280void stage2_flush_vm(struct kvm *kvm)
281{
282 struct kvm_memslots *slots;
283 struct kvm_memory_slot *memslot;
284 int idx;
285
286 idx = srcu_read_lock(&kvm->srcu);
287 spin_lock(&kvm->mmu_lock);
288
289 slots = kvm_memslots(kvm);
290 kvm_for_each_memslot(memslot, slots)
291 stage2_flush_memslot(kvm, memslot);
292
293 spin_unlock(&kvm->mmu_lock);
294 srcu_read_unlock(&kvm->srcu, idx);
295}
296
d157f4a5
MZ
297/**
298 * free_boot_hyp_pgd - free HYP boot page tables
299 *
300 * Free the HYP boot page tables. The bounce page is also freed.
301 */
302void free_boot_hyp_pgd(void)
303{
304 mutex_lock(&kvm_hyp_pgd_mutex);
305
306 if (boot_hyp_pgd) {
d4cb9df5
MZ
307 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
308 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
38f791a4 309 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
d157f4a5
MZ
310 boot_hyp_pgd = NULL;
311 }
312
313 if (hyp_pgd)
d4cb9df5 314 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
d157f4a5 315
5d4e08c4 316 free_page((unsigned long)init_bounce_page);
d157f4a5
MZ
317 init_bounce_page = NULL;
318
319 mutex_unlock(&kvm_hyp_pgd_mutex);
320}
321
342cd0ab 322/**
4f728276 323 * free_hyp_pgds - free Hyp-mode page tables
342cd0ab 324 *
5a677ce0
MZ
325 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
326 * therefore contains either mappings in the kernel memory area (above
327 * PAGE_OFFSET), or device mappings in the vmalloc range (from
328 * VMALLOC_START to VMALLOC_END).
329 *
330 * boot_hyp_pgd should only map two pages for the init code.
342cd0ab 331 */
4f728276 332void free_hyp_pgds(void)
342cd0ab 333{
342cd0ab
CD
334 unsigned long addr;
335
d157f4a5 336 free_boot_hyp_pgd();
4f728276 337
d157f4a5 338 mutex_lock(&kvm_hyp_pgd_mutex);
5a677ce0 339
4f728276
MZ
340 if (hyp_pgd) {
341 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
d4cb9df5 342 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
4f728276 343 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
d4cb9df5
MZ
344 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
345
38f791a4 346 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
d157f4a5 347 hyp_pgd = NULL;
4f728276
MZ
348 }
349
342cd0ab
CD
350 mutex_unlock(&kvm_hyp_pgd_mutex);
351}
352
353static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
6060df84
MZ
354 unsigned long end, unsigned long pfn,
355 pgprot_t prot)
342cd0ab
CD
356{
357 pte_t *pte;
358 unsigned long addr;
342cd0ab 359
3562c76d
MZ
360 addr = start;
361 do {
6060df84
MZ
362 pte = pte_offset_kernel(pmd, addr);
363 kvm_set_pte(pte, pfn_pte(pfn, prot));
4f728276 364 get_page(virt_to_page(pte));
5a677ce0 365 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
6060df84 366 pfn++;
3562c76d 367 } while (addr += PAGE_SIZE, addr != end);
342cd0ab
CD
368}
369
370static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
6060df84
MZ
371 unsigned long end, unsigned long pfn,
372 pgprot_t prot)
342cd0ab
CD
373{
374 pmd_t *pmd;
375 pte_t *pte;
376 unsigned long addr, next;
377
3562c76d
MZ
378 addr = start;
379 do {
6060df84 380 pmd = pmd_offset(pud, addr);
342cd0ab
CD
381
382 BUG_ON(pmd_sect(*pmd));
383
384 if (pmd_none(*pmd)) {
6060df84 385 pte = pte_alloc_one_kernel(NULL, addr);
342cd0ab
CD
386 if (!pte) {
387 kvm_err("Cannot allocate Hyp pte\n");
388 return -ENOMEM;
389 }
390 pmd_populate_kernel(NULL, pmd, pte);
4f728276 391 get_page(virt_to_page(pmd));
5a677ce0 392 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
342cd0ab
CD
393 }
394
395 next = pmd_addr_end(addr, end);
396
6060df84
MZ
397 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
398 pfn += (next - addr) >> PAGE_SHIFT;
3562c76d 399 } while (addr = next, addr != end);
342cd0ab
CD
400
401 return 0;
402}
403
38f791a4
CD
404static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
405 unsigned long end, unsigned long pfn,
406 pgprot_t prot)
407{
408 pud_t *pud;
409 pmd_t *pmd;
410 unsigned long addr, next;
411 int ret;
412
413 addr = start;
414 do {
415 pud = pud_offset(pgd, addr);
416
417 if (pud_none_or_clear_bad(pud)) {
418 pmd = pmd_alloc_one(NULL, addr);
419 if (!pmd) {
420 kvm_err("Cannot allocate Hyp pmd\n");
421 return -ENOMEM;
422 }
423 pud_populate(NULL, pud, pmd);
424 get_page(virt_to_page(pud));
425 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
426 }
427
428 next = pud_addr_end(addr, end);
429 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
430 if (ret)
431 return ret;
432 pfn += (next - addr) >> PAGE_SHIFT;
433 } while (addr = next, addr != end);
434
435 return 0;
436}
437
6060df84
MZ
438static int __create_hyp_mappings(pgd_t *pgdp,
439 unsigned long start, unsigned long end,
440 unsigned long pfn, pgprot_t prot)
342cd0ab 441{
342cd0ab
CD
442 pgd_t *pgd;
443 pud_t *pud;
342cd0ab
CD
444 unsigned long addr, next;
445 int err = 0;
446
342cd0ab 447 mutex_lock(&kvm_hyp_pgd_mutex);
3562c76d
MZ
448 addr = start & PAGE_MASK;
449 end = PAGE_ALIGN(end);
450 do {
6060df84 451 pgd = pgdp + pgd_index(addr);
342cd0ab 452
38f791a4
CD
453 if (pgd_none(*pgd)) {
454 pud = pud_alloc_one(NULL, addr);
455 if (!pud) {
456 kvm_err("Cannot allocate Hyp pud\n");
342cd0ab
CD
457 err = -ENOMEM;
458 goto out;
459 }
38f791a4
CD
460 pgd_populate(NULL, pgd, pud);
461 get_page(virt_to_page(pgd));
462 kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
342cd0ab
CD
463 }
464
465 next = pgd_addr_end(addr, end);
38f791a4 466 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
342cd0ab
CD
467 if (err)
468 goto out;
6060df84 469 pfn += (next - addr) >> PAGE_SHIFT;
3562c76d 470 } while (addr = next, addr != end);
342cd0ab
CD
471out:
472 mutex_unlock(&kvm_hyp_pgd_mutex);
473 return err;
474}
475
40c2729b
CD
476static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
477{
478 if (!is_vmalloc_addr(kaddr)) {
479 BUG_ON(!virt_addr_valid(kaddr));
480 return __pa(kaddr);
481 } else {
482 return page_to_phys(vmalloc_to_page(kaddr)) +
483 offset_in_page(kaddr);
484 }
485}
486
342cd0ab 487/**
06e8c3b0 488 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
342cd0ab
CD
489 * @from: The virtual kernel start address of the range
490 * @to: The virtual kernel end address of the range (exclusive)
491 *
06e8c3b0
MZ
492 * The same virtual address as the kernel virtual address is also used
493 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
494 * physical pages.
342cd0ab
CD
495 */
496int create_hyp_mappings(void *from, void *to)
497{
40c2729b
CD
498 phys_addr_t phys_addr;
499 unsigned long virt_addr;
6060df84
MZ
500 unsigned long start = KERN_TO_HYP((unsigned long)from);
501 unsigned long end = KERN_TO_HYP((unsigned long)to);
502
40c2729b
CD
503 start = start & PAGE_MASK;
504 end = PAGE_ALIGN(end);
6060df84 505
40c2729b
CD
506 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
507 int err;
6060df84 508
40c2729b
CD
509 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
510 err = __create_hyp_mappings(hyp_pgd, virt_addr,
511 virt_addr + PAGE_SIZE,
512 __phys_to_pfn(phys_addr),
513 PAGE_HYP);
514 if (err)
515 return err;
516 }
517
518 return 0;
342cd0ab
CD
519}
520
521/**
06e8c3b0
MZ
522 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
523 * @from: The kernel start VA of the range
524 * @to: The kernel end VA of the range (exclusive)
6060df84 525 * @phys_addr: The physical start address which gets mapped
06e8c3b0
MZ
526 *
527 * The resulting HYP VA is the same as the kernel VA, modulo
528 * HYP_PAGE_OFFSET.
342cd0ab 529 */
6060df84 530int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
342cd0ab 531{
6060df84
MZ
532 unsigned long start = KERN_TO_HYP((unsigned long)from);
533 unsigned long end = KERN_TO_HYP((unsigned long)to);
534
535 /* Check for a valid kernel IO mapping */
536 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
537 return -EINVAL;
538
539 return __create_hyp_mappings(hyp_pgd, start, end,
540 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
342cd0ab
CD
541}
542
d5d8184d
CD
543/**
544 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
545 * @kvm: The KVM struct pointer for the VM.
546 *
547 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
548 * support either full 40-bit input addresses or limited to 32-bit input
549 * addresses). Clears the allocated pages.
550 *
551 * Note we don't need locking here as this is only called when the VM is
552 * created, which can only be done once.
553 */
554int kvm_alloc_stage2_pgd(struct kvm *kvm)
555{
38f791a4 556 int ret;
d5d8184d
CD
557 pgd_t *pgd;
558
559 if (kvm->arch.pgd != NULL) {
560 kvm_err("kvm_arch already initialized?\n");
561 return -EINVAL;
562 }
563
38f791a4
CD
564 if (KVM_PREALLOC_LEVEL > 0) {
565 /*
566 * Allocate fake pgd for the page table manipulation macros to
567 * work. This is not used by the hardware and we have no
568 * alignment requirement for this allocation.
569 */
570 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
571 GFP_KERNEL | __GFP_ZERO);
572 } else {
573 /*
574 * Allocate actual first-level Stage-2 page table used by the
575 * hardware for Stage-2 page table walks.
576 */
577 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
578 }
579
d5d8184d
CD
580 if (!pgd)
581 return -ENOMEM;
582
38f791a4
CD
583 ret = kvm_prealloc_hwpgd(kvm, pgd);
584 if (ret)
585 goto out_err;
586
c62ee2b2 587 kvm_clean_pgd(pgd);
d5d8184d 588 kvm->arch.pgd = pgd;
d5d8184d 589 return 0;
38f791a4
CD
590out_err:
591 if (KVM_PREALLOC_LEVEL > 0)
592 kfree(pgd);
593 else
594 free_pages((unsigned long)pgd, S2_PGD_ORDER);
595 return ret;
d5d8184d
CD
596}
597
d5d8184d
CD
598/**
599 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
600 * @kvm: The VM pointer
601 * @start: The intermediate physical base address of the range to unmap
602 * @size: The size of the area to unmap
603 *
604 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
605 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
606 * destroying the VM), otherwise another faulting VCPU may come in and mess
607 * with things behind our backs.
608 */
609static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
610{
d4cb9df5 611 unmap_range(kvm, kvm->arch.pgd, start, size);
d5d8184d
CD
612}
613
614/**
615 * kvm_free_stage2_pgd - free all stage-2 tables
616 * @kvm: The KVM struct pointer for the VM.
617 *
618 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
619 * underlying level-2 and level-3 tables before freeing the actual level-1 table
620 * and setting the struct pointer to NULL.
621 *
622 * Note we don't need locking here as this is only called when the VM is
623 * destroyed, which can only be done once.
624 */
625void kvm_free_stage2_pgd(struct kvm *kvm)
626{
627 if (kvm->arch.pgd == NULL)
628 return;
629
630 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
38f791a4
CD
631 kvm_free_hwpgd(kvm);
632 if (KVM_PREALLOC_LEVEL > 0)
633 kfree(kvm->arch.pgd);
634 else
635 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
d5d8184d
CD
636 kvm->arch.pgd = NULL;
637}
638
38f791a4 639static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
ad361f09 640 phys_addr_t addr)
d5d8184d
CD
641{
642 pgd_t *pgd;
643 pud_t *pud;
d5d8184d 644
d5d8184d 645 pgd = kvm->arch.pgd + pgd_index(addr);
38f791a4
CD
646 if (WARN_ON(pgd_none(*pgd))) {
647 if (!cache)
648 return NULL;
649 pud = mmu_memory_cache_alloc(cache);
650 pgd_populate(NULL, pgd, pud);
651 get_page(virt_to_page(pgd));
652 }
653
654 return pud_offset(pgd, addr);
655}
656
657static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
658 phys_addr_t addr)
659{
660 pud_t *pud;
661 pmd_t *pmd;
662
663 pud = stage2_get_pud(kvm, cache, addr);
d5d8184d
CD
664 if (pud_none(*pud)) {
665 if (!cache)
ad361f09 666 return NULL;
d5d8184d
CD
667 pmd = mmu_memory_cache_alloc(cache);
668 pud_populate(NULL, pud, pmd);
d5d8184d 669 get_page(virt_to_page(pud));
c62ee2b2
MZ
670 }
671
ad361f09
CD
672 return pmd_offset(pud, addr);
673}
674
675static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
676 *cache, phys_addr_t addr, const pmd_t *new_pmd)
677{
678 pmd_t *pmd, old_pmd;
679
680 pmd = stage2_get_pmd(kvm, cache, addr);
681 VM_BUG_ON(!pmd);
d5d8184d 682
ad361f09
CD
683 /*
684 * Mapping in huge pages should only happen through a fault. If a
685 * page is merged into a transparent huge page, the individual
686 * subpages of that huge page should be unmapped through MMU
687 * notifiers before we get here.
688 *
689 * Merging of CompoundPages is not supported; they should become
690 * splitting first, unmapped, merged, and mapped back in on-demand.
691 */
692 VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
693
694 old_pmd = *pmd;
695 kvm_set_pmd(pmd, *new_pmd);
696 if (pmd_present(old_pmd))
697 kvm_tlb_flush_vmid_ipa(kvm, addr);
698 else
699 get_page(virt_to_page(pmd));
700 return 0;
701}
702
703static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
704 phys_addr_t addr, const pte_t *new_pte, bool iomap)
705{
706 pmd_t *pmd;
707 pte_t *pte, old_pte;
708
38f791a4 709 /* Create stage-2 page table mapping - Levels 0 and 1 */
ad361f09
CD
710 pmd = stage2_get_pmd(kvm, cache, addr);
711 if (!pmd) {
712 /*
713 * Ignore calls from kvm_set_spte_hva for unallocated
714 * address ranges.
715 */
716 return 0;
717 }
718
719 /* Create stage-2 page mappings - Level 2 */
d5d8184d
CD
720 if (pmd_none(*pmd)) {
721 if (!cache)
722 return 0; /* ignore calls from kvm_set_spte_hva */
723 pte = mmu_memory_cache_alloc(cache);
c62ee2b2 724 kvm_clean_pte(pte);
d5d8184d 725 pmd_populate_kernel(NULL, pmd, pte);
d5d8184d 726 get_page(virt_to_page(pmd));
c62ee2b2
MZ
727 }
728
729 pte = pte_offset_kernel(pmd, addr);
d5d8184d
CD
730
731 if (iomap && pte_present(*pte))
732 return -EFAULT;
733
734 /* Create 2nd stage page table mapping - Level 3 */
735 old_pte = *pte;
736 kvm_set_pte(pte, *new_pte);
737 if (pte_present(old_pte))
48762767 738 kvm_tlb_flush_vmid_ipa(kvm, addr);
d5d8184d
CD
739 else
740 get_page(virt_to_page(pte));
741
742 return 0;
743}
744
745/**
746 * kvm_phys_addr_ioremap - map a device range to guest IPA
747 *
748 * @kvm: The KVM pointer
749 * @guest_ipa: The IPA at which to insert the mapping
750 * @pa: The physical address of the device
751 * @size: The size of the mapping
752 */
753int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
c40f2f8f 754 phys_addr_t pa, unsigned long size, bool writable)
d5d8184d
CD
755{
756 phys_addr_t addr, end;
757 int ret = 0;
758 unsigned long pfn;
759 struct kvm_mmu_memory_cache cache = { 0, };
760
761 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
762 pfn = __phys_to_pfn(pa);
763
764 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
c62ee2b2 765 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
d5d8184d 766
c40f2f8f
AB
767 if (writable)
768 kvm_set_s2pte_writable(&pte);
769
38f791a4
CD
770 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
771 KVM_NR_MEM_OBJS);
d5d8184d
CD
772 if (ret)
773 goto out;
774 spin_lock(&kvm->mmu_lock);
775 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
776 spin_unlock(&kvm->mmu_lock);
777 if (ret)
778 goto out;
779
780 pfn++;
781 }
782
783out:
784 mmu_free_memory_cache(&cache);
785 return ret;
786}
787
9b5fdb97
CD
788static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
789{
790 pfn_t pfn = *pfnp;
791 gfn_t gfn = *ipap >> PAGE_SHIFT;
792
793 if (PageTransCompound(pfn_to_page(pfn))) {
794 unsigned long mask;
795 /*
796 * The address we faulted on is backed by a transparent huge
797 * page. However, because we map the compound huge page and
798 * not the individual tail page, we need to transfer the
799 * refcount to the head page. We have to be careful that the
800 * THP doesn't start to split while we are adjusting the
801 * refcounts.
802 *
803 * We are sure this doesn't happen, because mmu_notifier_retry
804 * was successful and we are holding the mmu_lock, so if this
805 * THP is trying to split, it will be blocked in the mmu
806 * notifier before touching any of the pages, specifically
807 * before being able to call __split_huge_page_refcount().
808 *
809 * We can therefore safely transfer the refcount from PG_tail
810 * to PG_head and switch the pfn from a tail page to the head
811 * page accordingly.
812 */
813 mask = PTRS_PER_PMD - 1;
814 VM_BUG_ON((gfn & mask) != (pfn & mask));
815 if (pfn & mask) {
816 *ipap &= PMD_MASK;
817 kvm_release_pfn_clean(pfn);
818 pfn &= ~mask;
819 kvm_get_pfn(pfn);
820 *pfnp = pfn;
821 }
822
823 return true;
824 }
825
826 return false;
827}
828
a7d079ce
AB
829static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
830{
831 if (kvm_vcpu_trap_is_iabt(vcpu))
832 return false;
833
834 return kvm_vcpu_dabt_iswrite(vcpu);
835}
836
94f8e641 837static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
98047888 838 struct kvm_memory_slot *memslot, unsigned long hva,
94f8e641
CD
839 unsigned long fault_status)
840{
94f8e641 841 int ret;
9b5fdb97 842 bool write_fault, writable, hugetlb = false, force_pte = false;
94f8e641 843 unsigned long mmu_seq;
ad361f09 844 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
ad361f09 845 struct kvm *kvm = vcpu->kvm;
94f8e641 846 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
ad361f09
CD
847 struct vm_area_struct *vma;
848 pfn_t pfn;
b8865767 849 pgprot_t mem_type = PAGE_S2;
94f8e641 850
a7d079ce 851 write_fault = kvm_is_write_fault(vcpu);
94f8e641
CD
852 if (fault_status == FSC_PERM && !write_fault) {
853 kvm_err("Unexpected L2 read permission error\n");
854 return -EFAULT;
855 }
856
ad361f09
CD
857 /* Let's check if we will get back a huge page backed by hugetlbfs */
858 down_read(&current->mm->mmap_sem);
859 vma = find_vma_intersection(current->mm, hva, hva + 1);
37b54408
AB
860 if (unlikely(!vma)) {
861 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
862 up_read(&current->mm->mmap_sem);
863 return -EFAULT;
864 }
865
ad361f09
CD
866 if (is_vm_hugetlb_page(vma)) {
867 hugetlb = true;
868 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
9b5fdb97
CD
869 } else {
870 /*
136d737f
MZ
871 * Pages belonging to memslots that don't have the same
872 * alignment for userspace and IPA cannot be mapped using
873 * block descriptors even if the pages belong to a THP for
874 * the process, because the stage-2 block descriptor will
875 * cover more than a single THP and we loose atomicity for
876 * unmapping, updates, and splits of the THP or other pages
877 * in the stage-2 block range.
9b5fdb97 878 */
136d737f
MZ
879 if ((memslot->userspace_addr & ~PMD_MASK) !=
880 ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
9b5fdb97 881 force_pte = true;
ad361f09
CD
882 }
883 up_read(&current->mm->mmap_sem);
884
94f8e641 885 /* We need minimum second+third level pages */
38f791a4
CD
886 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
887 KVM_NR_MEM_OBJS);
94f8e641
CD
888 if (ret)
889 return ret;
890
891 mmu_seq = vcpu->kvm->mmu_notifier_seq;
892 /*
893 * Ensure the read of mmu_notifier_seq happens before we call
894 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
895 * the page we just got a reference to gets unmapped before we have a
896 * chance to grab the mmu_lock, which ensure that if the page gets
897 * unmapped afterwards, the call to kvm_unmap_hva will take it away
898 * from us again properly. This smp_rmb() interacts with the smp_wmb()
899 * in kvm_mmu_notifier_invalidate_<page|range_end>.
900 */
901 smp_rmb();
902
ad361f09 903 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
94f8e641
CD
904 if (is_error_pfn(pfn))
905 return -EFAULT;
906
b8865767
KP
907 if (kvm_is_mmio_pfn(pfn))
908 mem_type = PAGE_S2_DEVICE;
909
ad361f09
CD
910 spin_lock(&kvm->mmu_lock);
911 if (mmu_notifier_retry(kvm, mmu_seq))
94f8e641 912 goto out_unlock;
9b5fdb97
CD
913 if (!hugetlb && !force_pte)
914 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
ad361f09
CD
915
916 if (hugetlb) {
b8865767 917 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
ad361f09
CD
918 new_pmd = pmd_mkhuge(new_pmd);
919 if (writable) {
920 kvm_set_s2pmd_writable(&new_pmd);
921 kvm_set_pfn_dirty(pfn);
922 }
2d58b733 923 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
ad361f09
CD
924 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
925 } else {
b8865767 926 pte_t new_pte = pfn_pte(pfn, mem_type);
ad361f09
CD
927 if (writable) {
928 kvm_set_s2pte_writable(&new_pte);
929 kvm_set_pfn_dirty(pfn);
930 }
2d58b733 931 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
b8865767
KP
932 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
933 mem_type == PAGE_S2_DEVICE);
94f8e641 934 }
ad361f09 935
94f8e641
CD
936
937out_unlock:
ad361f09 938 spin_unlock(&kvm->mmu_lock);
94f8e641 939 kvm_release_pfn_clean(pfn);
ad361f09 940 return ret;
94f8e641
CD
941}
942
943/**
944 * kvm_handle_guest_abort - handles all 2nd stage aborts
945 * @vcpu: the VCPU pointer
946 * @run: the kvm_run structure
947 *
948 * Any abort that gets to the host is almost guaranteed to be caused by a
949 * missing second stage translation table entry, which can mean that either the
950 * guest simply needs more memory and we must allocate an appropriate page or it
951 * can mean that the guest tried to access I/O memory, which is emulated by user
952 * space. The distinction is based on the IPA causing the fault and whether this
953 * memory region has been registered as standard RAM by user space.
954 */
342cd0ab
CD
955int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
956{
94f8e641
CD
957 unsigned long fault_status;
958 phys_addr_t fault_ipa;
959 struct kvm_memory_slot *memslot;
98047888
CD
960 unsigned long hva;
961 bool is_iabt, write_fault, writable;
94f8e641
CD
962 gfn_t gfn;
963 int ret, idx;
964
52d1dba9 965 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
7393b599 966 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
94f8e641 967
7393b599
MZ
968 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
969 kvm_vcpu_get_hfar(vcpu), fault_ipa);
94f8e641
CD
970
971 /* Check the stage-2 fault is trans. fault or write fault */
0496daa5 972 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
94f8e641 973 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
0496daa5
CD
974 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
975 kvm_vcpu_trap_get_class(vcpu),
976 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
977 (unsigned long)kvm_vcpu_get_hsr(vcpu));
94f8e641
CD
978 return -EFAULT;
979 }
980
981 idx = srcu_read_lock(&vcpu->kvm->srcu);
982
983 gfn = fault_ipa >> PAGE_SHIFT;
98047888
CD
984 memslot = gfn_to_memslot(vcpu->kvm, gfn);
985 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
a7d079ce 986 write_fault = kvm_is_write_fault(vcpu);
98047888 987 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
94f8e641
CD
988 if (is_iabt) {
989 /* Prefetch Abort on I/O address */
7393b599 990 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
94f8e641
CD
991 ret = 1;
992 goto out_unlock;
993 }
994
cfe3950c
MZ
995 /*
996 * The IPA is reported as [MAX:12], so we need to
997 * complement it with the bottom 12 bits from the
998 * faulting VA. This is always 12 bits, irrespective
999 * of the page size.
1000 */
1001 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
45e96ea6 1002 ret = io_mem_abort(vcpu, run, fault_ipa);
94f8e641
CD
1003 goto out_unlock;
1004 }
1005
98047888 1006 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
94f8e641
CD
1007 if (ret == 0)
1008 ret = 1;
1009out_unlock:
1010 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1011 return ret;
342cd0ab
CD
1012}
1013
d5d8184d
CD
1014static void handle_hva_to_gpa(struct kvm *kvm,
1015 unsigned long start,
1016 unsigned long end,
1017 void (*handler)(struct kvm *kvm,
1018 gpa_t gpa, void *data),
1019 void *data)
1020{
1021 struct kvm_memslots *slots;
1022 struct kvm_memory_slot *memslot;
1023
1024 slots = kvm_memslots(kvm);
1025
1026 /* we only care about the pages that the guest sees */
1027 kvm_for_each_memslot(memslot, slots) {
1028 unsigned long hva_start, hva_end;
1029 gfn_t gfn, gfn_end;
1030
1031 hva_start = max(start, memslot->userspace_addr);
1032 hva_end = min(end, memslot->userspace_addr +
1033 (memslot->npages << PAGE_SHIFT));
1034 if (hva_start >= hva_end)
1035 continue;
1036
1037 /*
1038 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1039 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1040 */
1041 gfn = hva_to_gfn_memslot(hva_start, memslot);
1042 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1043
1044 for (; gfn < gfn_end; ++gfn) {
1045 gpa_t gpa = gfn << PAGE_SHIFT;
1046 handler(kvm, gpa, data);
1047 }
1048 }
1049}
1050
1051static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1052{
1053 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
d5d8184d
CD
1054}
1055
1056int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1057{
1058 unsigned long end = hva + PAGE_SIZE;
1059
1060 if (!kvm->arch.pgd)
1061 return 0;
1062
1063 trace_kvm_unmap_hva(hva);
1064 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1065 return 0;
1066}
1067
1068int kvm_unmap_hva_range(struct kvm *kvm,
1069 unsigned long start, unsigned long end)
1070{
1071 if (!kvm->arch.pgd)
1072 return 0;
1073
1074 trace_kvm_unmap_hva_range(start, end);
1075 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1076 return 0;
1077}
1078
1079static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1080{
1081 pte_t *pte = (pte_t *)data;
1082
1083 stage2_set_pte(kvm, NULL, gpa, pte, false);
1084}
1085
1086
1087void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1088{
1089 unsigned long end = hva + PAGE_SIZE;
1090 pte_t stage2_pte;
1091
1092 if (!kvm->arch.pgd)
1093 return;
1094
1095 trace_kvm_set_spte_hva(hva);
1096 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
1097 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1098}
1099
1100void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1101{
1102 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1103}
1104
342cd0ab
CD
1105phys_addr_t kvm_mmu_get_httbr(void)
1106{
342cd0ab
CD
1107 return virt_to_phys(hyp_pgd);
1108}
1109
5a677ce0
MZ
1110phys_addr_t kvm_mmu_get_boot_httbr(void)
1111{
1112 return virt_to_phys(boot_hyp_pgd);
1113}
1114
1115phys_addr_t kvm_get_idmap_vector(void)
1116{
1117 return hyp_idmap_vector;
1118}
1119
342cd0ab
CD
1120int kvm_mmu_init(void)
1121{
2fb41059
MZ
1122 int err;
1123
4fda342c
SS
1124 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1125 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1126 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
5a677ce0
MZ
1127
1128 if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
1129 /*
1130 * Our init code is crossing a page boundary. Allocate
1131 * a bounce page, copy the code over and use that.
1132 */
1133 size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
1134 phys_addr_t phys_base;
1135
5d4e08c4 1136 init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
5a677ce0
MZ
1137 if (!init_bounce_page) {
1138 kvm_err("Couldn't allocate HYP init bounce page\n");
1139 err = -ENOMEM;
1140 goto out;
1141 }
1142
1143 memcpy(init_bounce_page, __hyp_idmap_text_start, len);
1144 /*
1145 * Warning: the code we just copied to the bounce page
1146 * must be flushed to the point of coherency.
1147 * Otherwise, the data may be sitting in L2, and HYP
1148 * mode won't be able to observe it as it runs with
1149 * caches off at that point.
1150 */
1151 kvm_flush_dcache_to_poc(init_bounce_page, len);
1152
4fda342c 1153 phys_base = kvm_virt_to_phys(init_bounce_page);
5a677ce0
MZ
1154 hyp_idmap_vector += phys_base - hyp_idmap_start;
1155 hyp_idmap_start = phys_base;
1156 hyp_idmap_end = phys_base + len;
1157
1158 kvm_info("Using HYP init bounce page @%lx\n",
1159 (unsigned long)phys_base);
1160 }
1161
38f791a4
CD
1162 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1163 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
5d4e08c4 1164
5a677ce0 1165 if (!hyp_pgd || !boot_hyp_pgd) {
d5d8184d 1166 kvm_err("Hyp mode PGD not allocated\n");
2fb41059
MZ
1167 err = -ENOMEM;
1168 goto out;
1169 }
1170
1171 /* Create the idmap in the boot page tables */
1172 err = __create_hyp_mappings(boot_hyp_pgd,
1173 hyp_idmap_start, hyp_idmap_end,
1174 __phys_to_pfn(hyp_idmap_start),
1175 PAGE_HYP);
1176
1177 if (err) {
1178 kvm_err("Failed to idmap %lx-%lx\n",
1179 hyp_idmap_start, hyp_idmap_end);
1180 goto out;
d5d8184d
CD
1181 }
1182
5a677ce0
MZ
1183 /* Map the very same page at the trampoline VA */
1184 err = __create_hyp_mappings(boot_hyp_pgd,
1185 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1186 __phys_to_pfn(hyp_idmap_start),
1187 PAGE_HYP);
1188 if (err) {
1189 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
1190 TRAMPOLINE_VA);
1191 goto out;
1192 }
1193
1194 /* Map the same page again into the runtime page tables */
1195 err = __create_hyp_mappings(hyp_pgd,
1196 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1197 __phys_to_pfn(hyp_idmap_start),
1198 PAGE_HYP);
1199 if (err) {
1200 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
1201 TRAMPOLINE_VA);
1202 goto out;
1203 }
1204
d5d8184d 1205 return 0;
2fb41059 1206out:
4f728276 1207 free_hyp_pgds();
2fb41059 1208 return err;
342cd0ab 1209}
df6ce24f
EA
1210
1211void kvm_arch_commit_memory_region(struct kvm *kvm,
1212 struct kvm_userspace_memory_region *mem,
1213 const struct kvm_memory_slot *old,
1214 enum kvm_mr_change change)
1215{
df6ce24f
EA
1216}
1217
1218int kvm_arch_prepare_memory_region(struct kvm *kvm,
1219 struct kvm_memory_slot *memslot,
1220 struct kvm_userspace_memory_region *mem,
1221 enum kvm_mr_change change)
1222{
8eef9123
AB
1223 hva_t hva = mem->userspace_addr;
1224 hva_t reg_end = hva + mem->memory_size;
1225 bool writable = !(mem->flags & KVM_MEM_READONLY);
1226 int ret = 0;
1227
1228 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE)
1229 return 0;
1230
1231 /*
1232 * A memory region could potentially cover multiple VMAs, and any holes
1233 * between them, so iterate over all of them to find out if we can map
1234 * any of them right now.
1235 *
1236 * +--------------------------------------------+
1237 * +---------------+----------------+ +----------------+
1238 * | : VMA 1 | VMA 2 | | VMA 3 : |
1239 * +---------------+----------------+ +----------------+
1240 * | memory region |
1241 * +--------------------------------------------+
1242 */
1243 do {
1244 struct vm_area_struct *vma = find_vma(current->mm, hva);
1245 hva_t vm_start, vm_end;
1246
1247 if (!vma || vma->vm_start >= reg_end)
1248 break;
1249
1250 /*
1251 * Mapping a read-only VMA is only allowed if the
1252 * memory region is configured as read-only.
1253 */
1254 if (writable && !(vma->vm_flags & VM_WRITE)) {
1255 ret = -EPERM;
1256 break;
1257 }
1258
1259 /*
1260 * Take the intersection of this VMA with the memory region
1261 */
1262 vm_start = max(hva, vma->vm_start);
1263 vm_end = min(reg_end, vma->vm_end);
1264
1265 if (vma->vm_flags & VM_PFNMAP) {
1266 gpa_t gpa = mem->guest_phys_addr +
1267 (vm_start - mem->userspace_addr);
1268 phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
1269 vm_start - vma->vm_start;
1270
1271 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1272 vm_end - vm_start,
1273 writable);
1274 if (ret)
1275 break;
1276 }
1277 hva = vm_end;
1278 } while (hva < reg_end);
1279
1280 if (ret) {
1281 spin_lock(&kvm->mmu_lock);
1282 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1283 spin_unlock(&kvm->mmu_lock);
1284 }
1285 return ret;
df6ce24f
EA
1286}
1287
1288void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1289 struct kvm_memory_slot *dont)
1290{
1291}
1292
1293int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1294 unsigned long npages)
1295{
1296 return 0;
1297}
1298
1299void kvm_arch_memslots_updated(struct kvm *kvm)
1300{
1301}
1302
1303void kvm_arch_flush_shadow_all(struct kvm *kvm)
1304{
1305}
1306
1307void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1308 struct kvm_memory_slot *slot)
1309{
8eef9123
AB
1310 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1311 phys_addr_t size = slot->npages << PAGE_SHIFT;
1312
1313 spin_lock(&kvm->mmu_lock);
1314 unmap_stage2_range(kvm, gpa, size);
1315 spin_unlock(&kvm->mmu_lock);
df6ce24f 1316}
This page took 0.141875 seconds and 5 git commands to generate.