KVM: ARM: Memory virtualization setup
[deliverable/linux.git] / arch / arm / kvm / arm.c
CommitLineData
749cf76c
CD
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/vmalloc.h>
24#include <linux/fs.h>
25#include <linux/mman.h>
26#include <linux/sched.h>
27#include <trace/events/kvm.h>
28
29#define CREATE_TRACE_POINTS
30#include "trace.h"
31
32#include <asm/unified.h>
33#include <asm/uaccess.h>
34#include <asm/ptrace.h>
35#include <asm/mman.h>
36#include <asm/cputype.h>
342cd0ab
CD
37#include <asm/tlbflush.h>
38#include <asm/virt.h>
39#include <asm/kvm_arm.h>
40#include <asm/kvm_asm.h>
41#include <asm/kvm_mmu.h>
749cf76c
CD
42
43#ifdef REQUIRES_VIRT
44__asm__(".arch_extension virt");
45#endif
46
342cd0ab
CD
47static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
48static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
49static unsigned long hyp_default_vectors;
50
51
749cf76c
CD
52int kvm_arch_hardware_enable(void *garbage)
53{
54 return 0;
55}
56
57int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
58{
59 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
60}
61
62void kvm_arch_hardware_disable(void *garbage)
63{
64}
65
66int kvm_arch_hardware_setup(void)
67{
68 return 0;
69}
70
71void kvm_arch_hardware_unsetup(void)
72{
73}
74
75void kvm_arch_check_processor_compat(void *rtn)
76{
77 *(int *)rtn = 0;
78}
79
80void kvm_arch_sync_events(struct kvm *kvm)
81{
82}
83
d5d8184d
CD
84/**
85 * kvm_arch_init_vm - initializes a VM data structure
86 * @kvm: pointer to the KVM struct
87 */
749cf76c
CD
88int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
89{
d5d8184d
CD
90 int ret = 0;
91
749cf76c
CD
92 if (type)
93 return -EINVAL;
94
d5d8184d
CD
95 ret = kvm_alloc_stage2_pgd(kvm);
96 if (ret)
97 goto out_fail_alloc;
98
99 ret = create_hyp_mappings(kvm, kvm + 1);
100 if (ret)
101 goto out_free_stage2_pgd;
102
103 /* Mark the initial VMID generation invalid */
104 kvm->arch.vmid_gen = 0;
105
106 return ret;
107out_free_stage2_pgd:
108 kvm_free_stage2_pgd(kvm);
109out_fail_alloc:
110 return ret;
749cf76c
CD
111}
112
113int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
114{
115 return VM_FAULT_SIGBUS;
116}
117
118void kvm_arch_free_memslot(struct kvm_memory_slot *free,
119 struct kvm_memory_slot *dont)
120{
121}
122
123int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
124{
125 return 0;
126}
127
d5d8184d
CD
128/**
129 * kvm_arch_destroy_vm - destroy the VM data structure
130 * @kvm: pointer to the KVM struct
131 */
749cf76c
CD
132void kvm_arch_destroy_vm(struct kvm *kvm)
133{
134 int i;
135
d5d8184d
CD
136 kvm_free_stage2_pgd(kvm);
137
749cf76c
CD
138 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
139 if (kvm->vcpus[i]) {
140 kvm_arch_vcpu_free(kvm->vcpus[i]);
141 kvm->vcpus[i] = NULL;
142 }
143 }
144}
145
146int kvm_dev_ioctl_check_extension(long ext)
147{
148 int r;
149 switch (ext) {
150 case KVM_CAP_USER_MEMORY:
151 case KVM_CAP_SYNC_MMU:
152 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
153 case KVM_CAP_ONE_REG:
154 r = 1;
155 break;
156 case KVM_CAP_COALESCED_MMIO:
157 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
158 break;
159 case KVM_CAP_NR_VCPUS:
160 r = num_online_cpus();
161 break;
162 case KVM_CAP_MAX_VCPUS:
163 r = KVM_MAX_VCPUS;
164 break;
165 default:
166 r = 0;
167 break;
168 }
169 return r;
170}
171
172long kvm_arch_dev_ioctl(struct file *filp,
173 unsigned int ioctl, unsigned long arg)
174{
175 return -EINVAL;
176}
177
178int kvm_arch_set_memory_region(struct kvm *kvm,
179 struct kvm_userspace_memory_region *mem,
180 struct kvm_memory_slot old,
181 int user_alloc)
182{
183 return 0;
184}
185
186int kvm_arch_prepare_memory_region(struct kvm *kvm,
187 struct kvm_memory_slot *memslot,
188 struct kvm_memory_slot old,
189 struct kvm_userspace_memory_region *mem,
190 int user_alloc)
191{
192 return 0;
193}
194
195void kvm_arch_commit_memory_region(struct kvm *kvm,
196 struct kvm_userspace_memory_region *mem,
197 struct kvm_memory_slot old,
198 int user_alloc)
199{
200}
201
202void kvm_arch_flush_shadow_all(struct kvm *kvm)
203{
204}
205
206void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
207 struct kvm_memory_slot *slot)
208{
209}
210
211struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
212{
213 int err;
214 struct kvm_vcpu *vcpu;
215
216 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
217 if (!vcpu) {
218 err = -ENOMEM;
219 goto out;
220 }
221
222 err = kvm_vcpu_init(vcpu, kvm, id);
223 if (err)
224 goto free_vcpu;
225
d5d8184d
CD
226 err = create_hyp_mappings(vcpu, vcpu + 1);
227 if (err)
228 goto vcpu_uninit;
229
749cf76c 230 return vcpu;
d5d8184d
CD
231vcpu_uninit:
232 kvm_vcpu_uninit(vcpu);
749cf76c
CD
233free_vcpu:
234 kmem_cache_free(kvm_vcpu_cache, vcpu);
235out:
236 return ERR_PTR(err);
237}
238
239int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
240{
241 return 0;
242}
243
244void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
245{
d5d8184d
CD
246 kvm_mmu_free_memory_caches(vcpu);
247 kmem_cache_free(kvm_vcpu_cache, vcpu);
749cf76c
CD
248}
249
250void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
251{
252 kvm_arch_vcpu_free(vcpu);
253}
254
255int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
256{
257 return 0;
258}
259
260int __attribute_const__ kvm_target_cpu(void)
261{
262 unsigned long implementor = read_cpuid_implementor();
263 unsigned long part_number = read_cpuid_part_number();
264
265 if (implementor != ARM_CPU_IMP_ARM)
266 return -EINVAL;
267
268 switch (part_number) {
269 case ARM_CPU_PART_CORTEX_A15:
270 return KVM_ARM_TARGET_CORTEX_A15;
271 default:
272 return -EINVAL;
273 }
274}
275
276int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
277{
278 return 0;
279}
280
281void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
282{
283}
284
285void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
286{
287}
288
289void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
290{
291}
292
293int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
294 struct kvm_guest_debug *dbg)
295{
296 return -EINVAL;
297}
298
299
300int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
301 struct kvm_mp_state *mp_state)
302{
303 return -EINVAL;
304}
305
306int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
307 struct kvm_mp_state *mp_state)
308{
309 return -EINVAL;
310}
311
312int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
313{
314 return 0;
315}
316
317int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
318{
319 return -EINVAL;
320}
321
322long kvm_arch_vcpu_ioctl(struct file *filp,
323 unsigned int ioctl, unsigned long arg)
324{
325 struct kvm_vcpu *vcpu = filp->private_data;
326 void __user *argp = (void __user *)arg;
327
328 switch (ioctl) {
329 case KVM_ARM_VCPU_INIT: {
330 struct kvm_vcpu_init init;
331
332 if (copy_from_user(&init, argp, sizeof(init)))
333 return -EFAULT;
334
335 return kvm_vcpu_set_target(vcpu, &init);
336
337 }
338 case KVM_SET_ONE_REG:
339 case KVM_GET_ONE_REG: {
340 struct kvm_one_reg reg;
341 if (copy_from_user(&reg, argp, sizeof(reg)))
342 return -EFAULT;
343 if (ioctl == KVM_SET_ONE_REG)
344 return kvm_arm_set_reg(vcpu, &reg);
345 else
346 return kvm_arm_get_reg(vcpu, &reg);
347 }
348 case KVM_GET_REG_LIST: {
349 struct kvm_reg_list __user *user_list = argp;
350 struct kvm_reg_list reg_list;
351 unsigned n;
352
353 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
354 return -EFAULT;
355 n = reg_list.n;
356 reg_list.n = kvm_arm_num_regs(vcpu);
357 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
358 return -EFAULT;
359 if (n < reg_list.n)
360 return -E2BIG;
361 return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
362 }
363 default:
364 return -EINVAL;
365 }
366}
367
368int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
369{
370 return -EINVAL;
371}
372
373long kvm_arch_vm_ioctl(struct file *filp,
374 unsigned int ioctl, unsigned long arg)
375{
376 return -EINVAL;
377}
378
342cd0ab
CD
379static void cpu_init_hyp_mode(void *vector)
380{
381 unsigned long long pgd_ptr;
382 unsigned long pgd_low, pgd_high;
383 unsigned long hyp_stack_ptr;
384 unsigned long stack_page;
385 unsigned long vector_ptr;
386
387 /* Switch from the HYP stub to our own HYP init vector */
388 __hyp_set_vectors((unsigned long)vector);
389
390 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
391 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
392 pgd_high = (pgd_ptr >> 32ULL);
393 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
394 hyp_stack_ptr = stack_page + PAGE_SIZE;
395 vector_ptr = (unsigned long)__kvm_hyp_vector;
396
397 /*
398 * Call initialization code, and switch to the full blown
399 * HYP code. The init code doesn't need to preserve these registers as
400 * r1-r3 and r12 are already callee save according to the AAPCS.
401 * Note that we slightly misuse the prototype by casing the pgd_low to
402 * a void *.
403 */
404 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
405}
406
407/**
408 * Inits Hyp-mode on all online CPUs
409 */
410static int init_hyp_mode(void)
411{
412 phys_addr_t init_phys_addr;
413 int cpu;
414 int err = 0;
415
416 /*
417 * Allocate Hyp PGD and setup Hyp identity mapping
418 */
419 err = kvm_mmu_init();
420 if (err)
421 goto out_err;
422
423 /*
424 * It is probably enough to obtain the default on one
425 * CPU. It's unlikely to be different on the others.
426 */
427 hyp_default_vectors = __hyp_get_vectors();
428
429 /*
430 * Allocate stack pages for Hypervisor-mode
431 */
432 for_each_possible_cpu(cpu) {
433 unsigned long stack_page;
434
435 stack_page = __get_free_page(GFP_KERNEL);
436 if (!stack_page) {
437 err = -ENOMEM;
438 goto out_free_stack_pages;
439 }
440
441 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
442 }
443
444 /*
445 * Execute the init code on each CPU.
446 *
447 * Note: The stack is not mapped yet, so don't do anything else than
448 * initializing the hypervisor mode on each CPU using a local stack
449 * space for temporary storage.
450 */
451 init_phys_addr = virt_to_phys(__kvm_hyp_init);
452 for_each_online_cpu(cpu) {
453 smp_call_function_single(cpu, cpu_init_hyp_mode,
454 (void *)(long)init_phys_addr, 1);
455 }
456
457 /*
458 * Unmap the identity mapping
459 */
460 kvm_clear_hyp_idmap();
461
462 /*
463 * Map the Hyp-code called directly from the host
464 */
465 err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
466 if (err) {
467 kvm_err("Cannot map world-switch code\n");
468 goto out_free_mappings;
469 }
470
471 /*
472 * Map the Hyp stack pages
473 */
474 for_each_possible_cpu(cpu) {
475 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
476 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
477
478 if (err) {
479 kvm_err("Cannot map hyp stack\n");
480 goto out_free_mappings;
481 }
482 }
483
484 /*
485 * Map the host VFP structures
486 */
487 kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
488 if (!kvm_host_vfp_state) {
489 err = -ENOMEM;
490 kvm_err("Cannot allocate host VFP state\n");
491 goto out_free_mappings;
492 }
493
494 for_each_possible_cpu(cpu) {
495 struct vfp_hard_struct *vfp;
496
497 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
498 err = create_hyp_mappings(vfp, vfp + 1);
499
500 if (err) {
501 kvm_err("Cannot map host VFP state: %d\n", err);
502 goto out_free_vfp;
503 }
504 }
505
506 kvm_info("Hyp mode initialized successfully\n");
507 return 0;
508out_free_vfp:
509 free_percpu(kvm_host_vfp_state);
510out_free_mappings:
511 free_hyp_pmds();
512out_free_stack_pages:
513 for_each_possible_cpu(cpu)
514 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
515out_err:
516 kvm_err("error initializing Hyp mode: %d\n", err);
517 return err;
518}
519
520/**
521 * Initialize Hyp-mode and memory mappings on all CPUs.
522 */
749cf76c
CD
523int kvm_arch_init(void *opaque)
524{
342cd0ab
CD
525 int err;
526
527 if (!is_hyp_mode_available()) {
528 kvm_err("HYP mode not available\n");
529 return -ENODEV;
530 }
531
532 if (kvm_target_cpu() < 0) {
533 kvm_err("Target CPU not supported!\n");
534 return -ENODEV;
535 }
536
537 err = init_hyp_mode();
538 if (err)
539 goto out_err;
540
749cf76c 541 return 0;
342cd0ab
CD
542out_err:
543 return err;
749cf76c
CD
544}
545
546/* NOP: Compiling as a module not supported */
547void kvm_arch_exit(void)
548{
549}
550
551static int arm_init(void)
552{
553 int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
554 return rc;
555}
556
557module_init(arm_init);
This page took 0.048233 seconds and 5 git commands to generate.