2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/cpu.h>
20 #include <linux/cpu_pm.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
27 #include <linux/mman.h>
28 #include <linux/sched.h>
29 #include <linux/kvm.h>
30 #include <trace/events/kvm.h>
31 #include <kvm/arm_pmu.h>
33 #define CREATE_TRACE_POINTS
36 #include <asm/uaccess.h>
37 #include <asm/ptrace.h>
39 #include <asm/tlbflush.h>
40 #include <asm/cacheflush.h>
42 #include <asm/kvm_arm.h>
43 #include <asm/kvm_asm.h>
44 #include <asm/kvm_mmu.h>
45 #include <asm/kvm_emulate.h>
46 #include <asm/kvm_coproc.h>
47 #include <asm/kvm_psci.h>
48 #include <asm/sections.h>
51 __asm__(".arch_extension virt");
54 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page
);
55 static kvm_cpu_context_t __percpu
*kvm_host_cpu_state
;
56 static unsigned long hyp_default_vectors
;
58 /* Per-CPU variable containing the currently running vcpu. */
59 static DEFINE_PER_CPU(struct kvm_vcpu
*, kvm_arm_running_vcpu
);
61 /* The VMID used in the VTTBR */
62 static atomic64_t kvm_vmid_gen
= ATOMIC64_INIT(1);
63 static u32 kvm_next_vmid
;
64 static unsigned int kvm_vmid_bits __read_mostly
;
65 static DEFINE_SPINLOCK(kvm_vmid_lock
);
67 static bool vgic_present
;
69 static void kvm_arm_set_running_vcpu(struct kvm_vcpu
*vcpu
)
71 BUG_ON(preemptible());
72 __this_cpu_write(kvm_arm_running_vcpu
, vcpu
);
76 * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
77 * Must be called from non-preemptible context
79 struct kvm_vcpu
*kvm_arm_get_running_vcpu(void)
81 BUG_ON(preemptible());
82 return __this_cpu_read(kvm_arm_running_vcpu
);
86 * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
88 struct kvm_vcpu
* __percpu
*kvm_get_running_vcpus(void)
90 return &kvm_arm_running_vcpu
;
93 int kvm_arch_hardware_enable(void)
98 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
100 return kvm_vcpu_exiting_guest_mode(vcpu
) == IN_GUEST_MODE
;
103 int kvm_arch_hardware_setup(void)
108 void kvm_arch_check_processor_compat(void *rtn
)
115 * kvm_arch_init_vm - initializes a VM data structure
116 * @kvm: pointer to the KVM struct
118 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
125 ret
= kvm_alloc_stage2_pgd(kvm
);
129 ret
= create_hyp_mappings(kvm
, kvm
+ 1);
131 goto out_free_stage2_pgd
;
133 kvm_vgic_early_init(kvm
);
136 /* Mark the initial VMID generation invalid */
137 kvm
->arch
.vmid_gen
= 0;
139 /* The maximum number of VCPUs is limited by the host's GIC model */
140 kvm
->arch
.max_vcpus
= vgic_present
?
141 kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS
;
145 kvm_free_stage2_pgd(kvm
);
150 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
152 return VM_FAULT_SIGBUS
;
157 * kvm_arch_destroy_vm - destroy the VM data structure
158 * @kvm: pointer to the KVM struct
160 void kvm_arch_destroy_vm(struct kvm
*kvm
)
164 kvm_free_stage2_pgd(kvm
);
166 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
168 kvm_arch_vcpu_free(kvm
->vcpus
[i
]);
169 kvm
->vcpus
[i
] = NULL
;
173 kvm_vgic_destroy(kvm
);
176 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
180 case KVM_CAP_IRQCHIP
:
183 case KVM_CAP_IOEVENTFD
:
184 case KVM_CAP_DEVICE_CTRL
:
185 case KVM_CAP_USER_MEMORY
:
186 case KVM_CAP_SYNC_MMU
:
187 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS
:
188 case KVM_CAP_ONE_REG
:
189 case KVM_CAP_ARM_PSCI
:
190 case KVM_CAP_ARM_PSCI_0_2
:
191 case KVM_CAP_READONLY_MEM
:
192 case KVM_CAP_MP_STATE
:
195 case KVM_CAP_COALESCED_MMIO
:
196 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
198 case KVM_CAP_ARM_SET_DEVICE_ADDR
:
201 case KVM_CAP_NR_VCPUS
:
202 r
= num_online_cpus();
204 case KVM_CAP_MAX_VCPUS
:
208 r
= kvm_arch_dev_ioctl_check_extension(ext
);
214 long kvm_arch_dev_ioctl(struct file
*filp
,
215 unsigned int ioctl
, unsigned long arg
)
221 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
224 struct kvm_vcpu
*vcpu
;
226 if (irqchip_in_kernel(kvm
) && vgic_initialized(kvm
)) {
231 if (id
>= kvm
->arch
.max_vcpus
) {
236 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
242 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
246 err
= create_hyp_mappings(vcpu
, vcpu
+ 1);
252 kvm_vcpu_uninit(vcpu
);
254 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
259 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
261 kvm_vgic_vcpu_early_init(vcpu
);
264 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
266 kvm_mmu_free_memory_caches(vcpu
);
267 kvm_timer_vcpu_terminate(vcpu
);
268 kvm_vgic_vcpu_destroy(vcpu
);
269 kvm_pmu_vcpu_destroy(vcpu
);
270 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
273 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
275 kvm_arch_vcpu_free(vcpu
);
278 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
280 return kvm_timer_should_fire(vcpu
);
283 void kvm_arch_vcpu_blocking(struct kvm_vcpu
*vcpu
)
285 kvm_timer_schedule(vcpu
);
288 void kvm_arch_vcpu_unblocking(struct kvm_vcpu
*vcpu
)
290 kvm_timer_unschedule(vcpu
);
293 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
295 /* Force users to call KVM_ARM_VCPU_INIT */
296 vcpu
->arch
.target
= -1;
297 bitmap_zero(vcpu
->arch
.features
, KVM_VCPU_MAX_FEATURES
);
299 /* Set up the timer */
300 kvm_timer_vcpu_init(vcpu
);
302 kvm_arm_reset_debug_ptr(vcpu
);
307 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
310 vcpu
->arch
.host_cpu_context
= this_cpu_ptr(kvm_host_cpu_state
);
312 kvm_arm_set_running_vcpu(vcpu
);
315 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
318 * The arch-generic KVM code expects the cpu field of a vcpu to be -1
319 * if the vcpu is no longer assigned to a cpu. This is used for the
320 * optimized make_all_cpus_request path.
324 kvm_arm_set_running_vcpu(NULL
);
325 kvm_timer_vcpu_put(vcpu
);
328 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
329 struct kvm_mp_state
*mp_state
)
331 if (vcpu
->arch
.power_off
)
332 mp_state
->mp_state
= KVM_MP_STATE_STOPPED
;
334 mp_state
->mp_state
= KVM_MP_STATE_RUNNABLE
;
339 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
340 struct kvm_mp_state
*mp_state
)
342 switch (mp_state
->mp_state
) {
343 case KVM_MP_STATE_RUNNABLE
:
344 vcpu
->arch
.power_off
= false;
346 case KVM_MP_STATE_STOPPED
:
347 vcpu
->arch
.power_off
= true;
357 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
358 * @v: The VCPU pointer
360 * If the guest CPU is not waiting for interrupts or an interrupt line is
361 * asserted, the CPU is by definition runnable.
363 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
365 return ((!!v
->arch
.irq_lines
|| kvm_vgic_vcpu_pending_irq(v
))
366 && !v
->arch
.power_off
&& !v
->arch
.pause
);
369 /* Just ensure a guest exit from a particular CPU */
370 static void exit_vm_noop(void *info
)
374 void force_vm_exit(const cpumask_t
*mask
)
377 smp_call_function_many(mask
, exit_vm_noop
, NULL
, true);
382 * need_new_vmid_gen - check that the VMID is still valid
383 * @kvm: The VM's VMID to checkt
385 * return true if there is a new generation of VMIDs being used
387 * The hardware supports only 256 values with the value zero reserved for the
388 * host, so we check if an assigned value belongs to a previous generation,
389 * which which requires us to assign a new value. If we're the first to use a
390 * VMID for the new generation, we must flush necessary caches and TLBs on all
393 static bool need_new_vmid_gen(struct kvm
*kvm
)
395 return unlikely(kvm
->arch
.vmid_gen
!= atomic64_read(&kvm_vmid_gen
));
399 * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
400 * @kvm The guest that we are about to run
402 * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
403 * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
406 static void update_vttbr(struct kvm
*kvm
)
408 phys_addr_t pgd_phys
;
411 if (!need_new_vmid_gen(kvm
))
414 spin_lock(&kvm_vmid_lock
);
417 * We need to re-check the vmid_gen here to ensure that if another vcpu
418 * already allocated a valid vmid for this vm, then this vcpu should
421 if (!need_new_vmid_gen(kvm
)) {
422 spin_unlock(&kvm_vmid_lock
);
426 /* First user of a new VMID generation? */
427 if (unlikely(kvm_next_vmid
== 0)) {
428 atomic64_inc(&kvm_vmid_gen
);
432 * On SMP we know no other CPUs can use this CPU's or each
433 * other's VMID after force_vm_exit returns since the
434 * kvm_vmid_lock blocks them from reentry to the guest.
436 force_vm_exit(cpu_all_mask
);
438 * Now broadcast TLB + ICACHE invalidation over the inner
439 * shareable domain to make sure all data structures are
442 kvm_call_hyp(__kvm_flush_vm_context
);
445 kvm
->arch
.vmid_gen
= atomic64_read(&kvm_vmid_gen
);
446 kvm
->arch
.vmid
= kvm_next_vmid
;
448 kvm_next_vmid
&= (1 << kvm_vmid_bits
) - 1;
450 /* update vttbr to be used with the new vmid */
451 pgd_phys
= virt_to_phys(kvm_get_hwpgd(kvm
));
452 BUG_ON(pgd_phys
& ~VTTBR_BADDR_MASK
);
453 vmid
= ((u64
)(kvm
->arch
.vmid
) << VTTBR_VMID_SHIFT
) & VTTBR_VMID_MASK(kvm_vmid_bits
);
454 kvm
->arch
.vttbr
= pgd_phys
| vmid
;
456 spin_unlock(&kvm_vmid_lock
);
459 static int kvm_vcpu_first_run_init(struct kvm_vcpu
*vcpu
)
461 struct kvm
*kvm
= vcpu
->kvm
;
464 if (likely(vcpu
->arch
.has_run_once
))
467 vcpu
->arch
.has_run_once
= true;
470 * Map the VGIC hardware resources before running a vcpu the first
473 if (unlikely(irqchip_in_kernel(kvm
) && !vgic_ready(kvm
))) {
474 ret
= kvm_vgic_map_resources(kvm
);
480 * Enable the arch timers only if we have an in-kernel VGIC
481 * and it has been properly initialized, since we cannot handle
482 * interrupts from the virtual timer with a userspace gic.
484 if (irqchip_in_kernel(kvm
) && vgic_initialized(kvm
))
485 kvm_timer_enable(kvm
);
490 bool kvm_arch_intc_initialized(struct kvm
*kvm
)
492 return vgic_initialized(kvm
);
495 static void kvm_arm_halt_guest(struct kvm
*kvm
) __maybe_unused
;
496 static void kvm_arm_resume_guest(struct kvm
*kvm
) __maybe_unused
;
498 static void kvm_arm_halt_guest(struct kvm
*kvm
)
501 struct kvm_vcpu
*vcpu
;
503 kvm_for_each_vcpu(i
, vcpu
, kvm
)
504 vcpu
->arch
.pause
= true;
505 force_vm_exit(cpu_all_mask
);
508 static void kvm_arm_resume_guest(struct kvm
*kvm
)
511 struct kvm_vcpu
*vcpu
;
513 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
514 struct swait_queue_head
*wq
= kvm_arch_vcpu_wq(vcpu
);
516 vcpu
->arch
.pause
= false;
521 static void vcpu_sleep(struct kvm_vcpu
*vcpu
)
523 struct swait_queue_head
*wq
= kvm_arch_vcpu_wq(vcpu
);
525 swait_event_interruptible(*wq
, ((!vcpu
->arch
.power_off
) &&
526 (!vcpu
->arch
.pause
)));
529 static int kvm_vcpu_initialized(struct kvm_vcpu
*vcpu
)
531 return vcpu
->arch
.target
>= 0;
535 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
536 * @vcpu: The VCPU pointer
537 * @run: The kvm_run structure pointer used for userspace state exchange
539 * This function is called through the VCPU_RUN ioctl called from user space. It
540 * will execute VM code in a loop until the time slice for the process is used
541 * or some emulation is needed from user space in which case the function will
542 * return with return value 0 and with the kvm_run structure filled in with the
543 * required data for the requested emulation.
545 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
550 if (unlikely(!kvm_vcpu_initialized(vcpu
)))
553 ret
= kvm_vcpu_first_run_init(vcpu
);
557 if (run
->exit_reason
== KVM_EXIT_MMIO
) {
558 ret
= kvm_handle_mmio_return(vcpu
, vcpu
->run
);
563 if (vcpu
->sigset_active
)
564 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
567 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
570 * Check conditions before entering the guest
574 update_vttbr(vcpu
->kvm
);
576 if (vcpu
->arch
.power_off
|| vcpu
->arch
.pause
)
580 * Preparing the interrupts to be injected also
581 * involves poking the GIC, which must be done in a
582 * non-preemptible context.
585 kvm_pmu_flush_hwstate(vcpu
);
586 kvm_timer_flush_hwstate(vcpu
);
587 kvm_vgic_flush_hwstate(vcpu
);
592 * Re-check atomic conditions
594 if (signal_pending(current
)) {
596 run
->exit_reason
= KVM_EXIT_INTR
;
599 if (ret
<= 0 || need_new_vmid_gen(vcpu
->kvm
) ||
600 vcpu
->arch
.power_off
|| vcpu
->arch
.pause
) {
602 kvm_pmu_sync_hwstate(vcpu
);
603 kvm_timer_sync_hwstate(vcpu
);
604 kvm_vgic_sync_hwstate(vcpu
);
609 kvm_arm_setup_debug(vcpu
);
611 /**************************************************************
614 trace_kvm_entry(*vcpu_pc(vcpu
));
616 vcpu
->mode
= IN_GUEST_MODE
;
618 ret
= kvm_call_hyp(__kvm_vcpu_run
, vcpu
);
620 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
624 *************************************************************/
626 kvm_arm_clear_debug(vcpu
);
629 * We may have taken a host interrupt in HYP mode (ie
630 * while executing the guest). This interrupt is still
631 * pending, as we haven't serviced it yet!
633 * We're now back in SVC mode, with interrupts
634 * disabled. Enabling the interrupts now will have
635 * the effect of taking the interrupt again, in SVC
641 * We do local_irq_enable() before calling kvm_guest_exit() so
642 * that if a timer interrupt hits while running the guest we
643 * account that tick as being spent in the guest. We enable
644 * preemption after calling kvm_guest_exit() so that if we get
645 * preempted we make sure ticks after that is not counted as
649 trace_kvm_exit(ret
, kvm_vcpu_trap_get_class(vcpu
), *vcpu_pc(vcpu
));
652 * We must sync the PMU and timer state before the vgic state so
653 * that the vgic can properly sample the updated state of the
656 kvm_pmu_sync_hwstate(vcpu
);
657 kvm_timer_sync_hwstate(vcpu
);
659 kvm_vgic_sync_hwstate(vcpu
);
663 ret
= handle_exit(vcpu
, run
, ret
);
666 if (vcpu
->sigset_active
)
667 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
671 static int vcpu_interrupt_line(struct kvm_vcpu
*vcpu
, int number
, bool level
)
677 if (number
== KVM_ARM_IRQ_CPU_IRQ
)
678 bit_index
= __ffs(HCR_VI
);
679 else /* KVM_ARM_IRQ_CPU_FIQ */
680 bit_index
= __ffs(HCR_VF
);
682 ptr
= (unsigned long *)&vcpu
->arch
.irq_lines
;
684 set
= test_and_set_bit(bit_index
, ptr
);
686 set
= test_and_clear_bit(bit_index
, ptr
);
689 * If we didn't change anything, no need to wake up or kick other CPUs
695 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
696 * trigger a world-switch round on the running physical CPU to set the
697 * virtual IRQ/FIQ fields in the HCR appropriately.
704 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_level
,
707 u32 irq
= irq_level
->irq
;
708 unsigned int irq_type
, vcpu_idx
, irq_num
;
709 int nrcpus
= atomic_read(&kvm
->online_vcpus
);
710 struct kvm_vcpu
*vcpu
= NULL
;
711 bool level
= irq_level
->level
;
713 irq_type
= (irq
>> KVM_ARM_IRQ_TYPE_SHIFT
) & KVM_ARM_IRQ_TYPE_MASK
;
714 vcpu_idx
= (irq
>> KVM_ARM_IRQ_VCPU_SHIFT
) & KVM_ARM_IRQ_VCPU_MASK
;
715 irq_num
= (irq
>> KVM_ARM_IRQ_NUM_SHIFT
) & KVM_ARM_IRQ_NUM_MASK
;
717 trace_kvm_irq_line(irq_type
, vcpu_idx
, irq_num
, irq_level
->level
);
720 case KVM_ARM_IRQ_TYPE_CPU
:
721 if (irqchip_in_kernel(kvm
))
724 if (vcpu_idx
>= nrcpus
)
727 vcpu
= kvm_get_vcpu(kvm
, vcpu_idx
);
731 if (irq_num
> KVM_ARM_IRQ_CPU_FIQ
)
734 return vcpu_interrupt_line(vcpu
, irq_num
, level
);
735 case KVM_ARM_IRQ_TYPE_PPI
:
736 if (!irqchip_in_kernel(kvm
))
739 if (vcpu_idx
>= nrcpus
)
742 vcpu
= kvm_get_vcpu(kvm
, vcpu_idx
);
746 if (irq_num
< VGIC_NR_SGIS
|| irq_num
>= VGIC_NR_PRIVATE_IRQS
)
749 return kvm_vgic_inject_irq(kvm
, vcpu
->vcpu_id
, irq_num
, level
);
750 case KVM_ARM_IRQ_TYPE_SPI
:
751 if (!irqchip_in_kernel(kvm
))
754 if (irq_num
< VGIC_NR_PRIVATE_IRQS
)
757 return kvm_vgic_inject_irq(kvm
, 0, irq_num
, level
);
763 static int kvm_vcpu_set_target(struct kvm_vcpu
*vcpu
,
764 const struct kvm_vcpu_init
*init
)
767 int phys_target
= kvm_target_cpu();
769 if (init
->target
!= phys_target
)
773 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
774 * use the same target.
776 if (vcpu
->arch
.target
!= -1 && vcpu
->arch
.target
!= init
->target
)
779 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
780 for (i
= 0; i
< sizeof(init
->features
) * 8; i
++) {
781 bool set
= (init
->features
[i
/ 32] & (1 << (i
% 32)));
783 if (set
&& i
>= KVM_VCPU_MAX_FEATURES
)
787 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
788 * use the same feature set.
790 if (vcpu
->arch
.target
!= -1 && i
< KVM_VCPU_MAX_FEATURES
&&
791 test_bit(i
, vcpu
->arch
.features
) != set
)
795 set_bit(i
, vcpu
->arch
.features
);
798 vcpu
->arch
.target
= phys_target
;
800 /* Now we know what it is, we can reset it. */
801 return kvm_reset_vcpu(vcpu
);
805 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu
*vcpu
,
806 struct kvm_vcpu_init
*init
)
810 ret
= kvm_vcpu_set_target(vcpu
, init
);
815 * Ensure a rebooted VM will fault in RAM pages and detect if the
816 * guest MMU is turned off and flush the caches as needed.
818 if (vcpu
->arch
.has_run_once
)
819 stage2_unmap_vm(vcpu
->kvm
);
821 vcpu_reset_hcr(vcpu
);
824 * Handle the "start in power-off" case.
826 if (test_bit(KVM_ARM_VCPU_POWER_OFF
, vcpu
->arch
.features
))
827 vcpu
->arch
.power_off
= true;
829 vcpu
->arch
.power_off
= false;
834 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu
*vcpu
,
835 struct kvm_device_attr
*attr
)
839 switch (attr
->group
) {
841 ret
= kvm_arm_vcpu_arch_set_attr(vcpu
, attr
);
848 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu
*vcpu
,
849 struct kvm_device_attr
*attr
)
853 switch (attr
->group
) {
855 ret
= kvm_arm_vcpu_arch_get_attr(vcpu
, attr
);
862 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu
*vcpu
,
863 struct kvm_device_attr
*attr
)
867 switch (attr
->group
) {
869 ret
= kvm_arm_vcpu_arch_has_attr(vcpu
, attr
);
876 long kvm_arch_vcpu_ioctl(struct file
*filp
,
877 unsigned int ioctl
, unsigned long arg
)
879 struct kvm_vcpu
*vcpu
= filp
->private_data
;
880 void __user
*argp
= (void __user
*)arg
;
881 struct kvm_device_attr attr
;
884 case KVM_ARM_VCPU_INIT
: {
885 struct kvm_vcpu_init init
;
887 if (copy_from_user(&init
, argp
, sizeof(init
)))
890 return kvm_arch_vcpu_ioctl_vcpu_init(vcpu
, &init
);
892 case KVM_SET_ONE_REG
:
893 case KVM_GET_ONE_REG
: {
894 struct kvm_one_reg reg
;
896 if (unlikely(!kvm_vcpu_initialized(vcpu
)))
899 if (copy_from_user(®
, argp
, sizeof(reg
)))
901 if (ioctl
== KVM_SET_ONE_REG
)
902 return kvm_arm_set_reg(vcpu
, ®
);
904 return kvm_arm_get_reg(vcpu
, ®
);
906 case KVM_GET_REG_LIST
: {
907 struct kvm_reg_list __user
*user_list
= argp
;
908 struct kvm_reg_list reg_list
;
911 if (unlikely(!kvm_vcpu_initialized(vcpu
)))
914 if (copy_from_user(®_list
, user_list
, sizeof(reg_list
)))
917 reg_list
.n
= kvm_arm_num_regs(vcpu
);
918 if (copy_to_user(user_list
, ®_list
, sizeof(reg_list
)))
922 return kvm_arm_copy_reg_indices(vcpu
, user_list
->reg
);
924 case KVM_SET_DEVICE_ATTR
: {
925 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
927 return kvm_arm_vcpu_set_attr(vcpu
, &attr
);
929 case KVM_GET_DEVICE_ATTR
: {
930 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
932 return kvm_arm_vcpu_get_attr(vcpu
, &attr
);
934 case KVM_HAS_DEVICE_ATTR
: {
935 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
937 return kvm_arm_vcpu_has_attr(vcpu
, &attr
);
945 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
947 * @log: slot id and address to which we copy the log
949 * Steps 1-4 below provide general overview of dirty page logging. See
950 * kvm_get_dirty_log_protect() function description for additional details.
952 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
953 * always flush the TLB (step 4) even if previous step failed and the dirty
954 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
955 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
956 * writes will be marked dirty for next log read.
958 * 1. Take a snapshot of the bit and clear it if needed.
959 * 2. Write protect the corresponding page.
960 * 3. Copy the snapshot to the userspace.
961 * 4. Flush TLB's if needed.
963 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
965 bool is_dirty
= false;
968 mutex_lock(&kvm
->slots_lock
);
970 r
= kvm_get_dirty_log_protect(kvm
, log
, &is_dirty
);
973 kvm_flush_remote_tlbs(kvm
);
975 mutex_unlock(&kvm
->slots_lock
);
979 static int kvm_vm_ioctl_set_device_addr(struct kvm
*kvm
,
980 struct kvm_arm_device_addr
*dev_addr
)
982 unsigned long dev_id
, type
;
984 dev_id
= (dev_addr
->id
& KVM_ARM_DEVICE_ID_MASK
) >>
985 KVM_ARM_DEVICE_ID_SHIFT
;
986 type
= (dev_addr
->id
& KVM_ARM_DEVICE_TYPE_MASK
) >>
987 KVM_ARM_DEVICE_TYPE_SHIFT
;
990 case KVM_ARM_DEVICE_VGIC_V2
:
993 return kvm_vgic_addr(kvm
, type
, &dev_addr
->addr
, true);
999 long kvm_arch_vm_ioctl(struct file
*filp
,
1000 unsigned int ioctl
, unsigned long arg
)
1002 struct kvm
*kvm
= filp
->private_data
;
1003 void __user
*argp
= (void __user
*)arg
;
1006 case KVM_CREATE_IRQCHIP
: {
1009 return kvm_vgic_create(kvm
, KVM_DEV_TYPE_ARM_VGIC_V2
);
1011 case KVM_ARM_SET_DEVICE_ADDR
: {
1012 struct kvm_arm_device_addr dev_addr
;
1014 if (copy_from_user(&dev_addr
, argp
, sizeof(dev_addr
)))
1016 return kvm_vm_ioctl_set_device_addr(kvm
, &dev_addr
);
1018 case KVM_ARM_PREFERRED_TARGET
: {
1020 struct kvm_vcpu_init init
;
1022 err
= kvm_vcpu_preferred_target(&init
);
1026 if (copy_to_user(argp
, &init
, sizeof(init
)))
1036 static void cpu_init_stage2(void *dummy
)
1038 __cpu_init_stage2();
1041 static void cpu_init_hyp_mode(void *dummy
)
1043 phys_addr_t boot_pgd_ptr
;
1044 phys_addr_t pgd_ptr
;
1045 unsigned long hyp_stack_ptr
;
1046 unsigned long stack_page
;
1047 unsigned long vector_ptr
;
1049 /* Switch from the HYP stub to our own HYP init vector */
1050 __hyp_set_vectors(kvm_get_idmap_vector());
1052 boot_pgd_ptr
= kvm_mmu_get_boot_httbr();
1053 pgd_ptr
= kvm_mmu_get_httbr();
1054 stack_page
= __this_cpu_read(kvm_arm_hyp_stack_page
);
1055 hyp_stack_ptr
= stack_page
+ PAGE_SIZE
;
1056 vector_ptr
= (unsigned long)kvm_ksym_ref(__kvm_hyp_vector
);
1058 __cpu_init_hyp_mode(boot_pgd_ptr
, pgd_ptr
, hyp_stack_ptr
, vector_ptr
);
1059 __cpu_init_stage2();
1061 kvm_arm_init_debug();
1064 static void cpu_hyp_reinit(void)
1066 if (is_kernel_in_hyp_mode()) {
1068 * cpu_init_stage2() is safe to call even if the PM
1069 * event was cancelled before the CPU was reset.
1071 cpu_init_stage2(NULL
);
1073 if (__hyp_get_vectors() == hyp_default_vectors
)
1074 cpu_init_hyp_mode(NULL
);
1078 static int hyp_init_cpu_notify(struct notifier_block
*self
,
1079 unsigned long action
, void *cpu
)
1083 case CPU_STARTING_FROZEN
:
1090 static struct notifier_block hyp_init_cpu_nb
= {
1091 .notifier_call
= hyp_init_cpu_notify
,
1094 #ifdef CONFIG_CPU_PM
1095 static int hyp_init_cpu_pm_notifier(struct notifier_block
*self
,
1099 if (cmd
== CPU_PM_EXIT
) {
1107 static struct notifier_block hyp_init_cpu_pm_nb
= {
1108 .notifier_call
= hyp_init_cpu_pm_notifier
,
1111 static void __init
hyp_cpu_pm_init(void)
1113 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb
);
1115 static void __init
hyp_cpu_pm_exit(void)
1117 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb
);
1120 static inline void hyp_cpu_pm_init(void)
1123 static inline void hyp_cpu_pm_exit(void)
1128 static void teardown_common_resources(void)
1130 free_percpu(kvm_host_cpu_state
);
1133 static int init_common_resources(void)
1135 kvm_host_cpu_state
= alloc_percpu(kvm_cpu_context_t
);
1136 if (!kvm_host_cpu_state
) {
1137 kvm_err("Cannot allocate host CPU state\n");
1144 static int init_subsystems(void)
1149 * Register CPU Hotplug notifier
1151 err
= register_cpu_notifier(&hyp_init_cpu_nb
);
1153 kvm_err("Cannot register KVM init CPU notifier (%d)\n", err
);
1158 * Register CPU lower-power notifier
1163 * Init HYP view of VGIC
1165 err
= kvm_vgic_hyp_init();
1168 vgic_present
= true;
1172 vgic_present
= false;
1179 * Init HYP architected timer support
1181 err
= kvm_timer_hyp_init();
1186 kvm_coproc_table_init();
1191 static void teardown_hyp_mode(void)
1195 if (is_kernel_in_hyp_mode())
1199 for_each_possible_cpu(cpu
)
1200 free_page(per_cpu(kvm_arm_hyp_stack_page
, cpu
));
1201 unregister_cpu_notifier(&hyp_init_cpu_nb
);
1205 static int init_vhe_mode(void)
1208 * Execute the init code on each CPU.
1210 on_each_cpu(cpu_init_stage2
, NULL
, 1);
1212 /* set size of VMID supported by CPU */
1213 kvm_vmid_bits
= kvm_get_vmid_bits();
1214 kvm_info("%d-bit VMID\n", kvm_vmid_bits
);
1216 kvm_info("VHE mode initialized successfully\n");
1221 * Inits Hyp-mode on all online CPUs
1223 static int init_hyp_mode(void)
1229 * Allocate Hyp PGD and setup Hyp identity mapping
1231 err
= kvm_mmu_init();
1236 * It is probably enough to obtain the default on one
1237 * CPU. It's unlikely to be different on the others.
1239 hyp_default_vectors
= __hyp_get_vectors();
1242 * Allocate stack pages for Hypervisor-mode
1244 for_each_possible_cpu(cpu
) {
1245 unsigned long stack_page
;
1247 stack_page
= __get_free_page(GFP_KERNEL
);
1253 per_cpu(kvm_arm_hyp_stack_page
, cpu
) = stack_page
;
1257 * Map the Hyp-code called directly from the host
1259 err
= create_hyp_mappings(kvm_ksym_ref(__hyp_text_start
),
1260 kvm_ksym_ref(__hyp_text_end
));
1262 kvm_err("Cannot map world-switch code\n");
1266 err
= create_hyp_mappings(kvm_ksym_ref(__start_rodata
),
1267 kvm_ksym_ref(__end_rodata
));
1269 kvm_err("Cannot map rodata section\n");
1274 * Map the Hyp stack pages
1276 for_each_possible_cpu(cpu
) {
1277 char *stack_page
= (char *)per_cpu(kvm_arm_hyp_stack_page
, cpu
);
1278 err
= create_hyp_mappings(stack_page
, stack_page
+ PAGE_SIZE
);
1281 kvm_err("Cannot map hyp stack\n");
1286 for_each_possible_cpu(cpu
) {
1287 kvm_cpu_context_t
*cpu_ctxt
;
1289 cpu_ctxt
= per_cpu_ptr(kvm_host_cpu_state
, cpu
);
1290 err
= create_hyp_mappings(cpu_ctxt
, cpu_ctxt
+ 1);
1293 kvm_err("Cannot map host CPU state: %d\n", err
);
1299 * Execute the init code on each CPU.
1301 on_each_cpu(cpu_init_hyp_mode
, NULL
, 1);
1303 #ifndef CONFIG_HOTPLUG_CPU
1304 free_boot_hyp_pgd();
1307 /* set size of VMID supported by CPU */
1308 kvm_vmid_bits
= kvm_get_vmid_bits();
1309 kvm_info("%d-bit VMID\n", kvm_vmid_bits
);
1311 kvm_info("Hyp mode initialized successfully\n");
1316 teardown_hyp_mode();
1317 kvm_err("error initializing Hyp mode: %d\n", err
);
1321 static void check_kvm_target_cpu(void *ret
)
1323 *(int *)ret
= kvm_target_cpu();
1326 struct kvm_vcpu
*kvm_mpidr_to_vcpu(struct kvm
*kvm
, unsigned long mpidr
)
1328 struct kvm_vcpu
*vcpu
;
1331 mpidr
&= MPIDR_HWID_BITMASK
;
1332 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1333 if (mpidr
== kvm_vcpu_get_mpidr_aff(vcpu
))
1340 * Initialize Hyp-mode and memory mappings on all CPUs.
1342 int kvm_arch_init(void *opaque
)
1347 if (!is_hyp_mode_available()) {
1348 kvm_err("HYP mode not available\n");
1352 for_each_online_cpu(cpu
) {
1353 smp_call_function_single(cpu
, check_kvm_target_cpu
, &ret
, 1);
1355 kvm_err("Error, CPU %d not supported!\n", cpu
);
1360 err
= init_common_resources();
1364 if (is_kernel_in_hyp_mode())
1365 err
= init_vhe_mode();
1367 err
= init_hyp_mode();
1371 err
= init_subsystems();
1378 teardown_hyp_mode();
1380 teardown_common_resources();
1384 /* NOP: Compiling as a module not supported */
1385 void kvm_arch_exit(void)
1387 kvm_perf_teardown();
1390 static int arm_init(void)
1392 int rc
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1396 module_init(arm_init
);