Merge tag 'kvm-arm-for-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm...
[deliverable/linux.git] / virt / kvm / arm / hyp / vgic-v2-sr.c
CommitLineData
06282fd2
MZ
1/*
2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/compiler.h>
19#include <linux/irqchip/arm-gic.h>
20#include <linux/kvm_host.h>
21
13720a56 22#include <asm/kvm_hyp.h>
06282fd2 23
c813bb17
MZ
24static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
25 void __iomem *base)
26{
27 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
50926d82 28 int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
c813bb17
MZ
29 u32 eisr0, eisr1;
30 int i;
31 bool expect_mi;
32
33 expect_mi = !!(cpu_if->vgic_hcr & GICH_HCR_UIE);
34
35 for (i = 0; i < nr_lr; i++) {
36 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
37 continue;
38
39 expect_mi |= (!(cpu_if->vgic_lr[i] & GICH_LR_HW) &&
40 (cpu_if->vgic_lr[i] & GICH_LR_EOI));
41 }
42
43 if (expect_mi) {
44 cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
45
46 if (cpu_if->vgic_misr & GICH_MISR_EOI) {
47 eisr0 = readl_relaxed(base + GICH_EISR0);
48 if (unlikely(nr_lr > 32))
49 eisr1 = readl_relaxed(base + GICH_EISR1);
50 else
51 eisr1 = 0;
52 } else {
53 eisr0 = eisr1 = 0;
54 }
55 } else {
56 cpu_if->vgic_misr = 0;
57 eisr0 = eisr1 = 0;
58 }
59
60#ifdef CONFIG_CPU_BIG_ENDIAN
61 cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
62#else
63 cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
64#endif
65}
66
2a1044f8
MZ
67static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
68{
69 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
50926d82 70 int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
2a1044f8
MZ
71 u32 elrsr0, elrsr1;
72
73 elrsr0 = readl_relaxed(base + GICH_ELRSR0);
74 if (unlikely(nr_lr > 32))
75 elrsr1 = readl_relaxed(base + GICH_ELRSR1);
76 else
77 elrsr1 = 0;
78
79#ifdef CONFIG_CPU_BIG_ENDIAN
80 cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
81#else
82 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
83#endif
84}
85
f8cfbce1
MZ
86static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
87{
88 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
50926d82 89 int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
f8cfbce1
MZ
90 int i;
91
92 for (i = 0; i < nr_lr; i++) {
93 if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
94 continue;
95
4d3afc9b 96 if (cpu_if->vgic_elrsr & (1UL << i))
f8cfbce1 97 cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
4d3afc9b
CD
98 else
99 cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
f8cfbce1 100
cc1daf0b 101 writel_relaxed(0, base + GICH_LR0 + (i * 4));
f8cfbce1
MZ
102 }
103}
104
06282fd2
MZ
105/* vcpu is already in the HYP VA space */
106void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
107{
108 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
109 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
110 struct vgic_dist *vgic = &kvm->arch.vgic;
111 void __iomem *base = kern_hyp_va(vgic->vctrl_base);
06282fd2
MZ
112
113 if (!base)
114 return;
115
06282fd2 116 cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
59f00ff9
MZ
117
118 if (vcpu->arch.vgic_cpu.live_lrs) {
2a1044f8 119 cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
06282fd2 120
c813bb17 121 save_maint_int_state(vcpu, base);
2a1044f8 122 save_elrsr(vcpu, base);
f8cfbce1 123 save_lrs(vcpu, base);
06282fd2 124
59f00ff9
MZ
125 writel_relaxed(0, base + GICH_HCR);
126
127 vcpu->arch.vgic_cpu.live_lrs = 0;
128 } else {
129 cpu_if->vgic_eisr = 0;
130 cpu_if->vgic_elrsr = ~0UL;
131 cpu_if->vgic_misr = 0;
132 cpu_if->vgic_apr = 0;
133 }
06282fd2
MZ
134}
135
136/* vcpu is already in the HYP VA space */
137void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
138{
139 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
140 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
141 struct vgic_dist *vgic = &kvm->arch.vgic;
142 void __iomem *base = kern_hyp_va(vgic->vctrl_base);
50926d82 143 int nr_lr = (kern_hyp_va(&kvm_vgic_global_state))->nr_lr;
2db4c104 144 int i;
59f00ff9 145 u64 live_lrs = 0;
06282fd2
MZ
146
147 if (!base)
148 return;
149
59f00ff9 150
06282fd2 151 for (i = 0; i < nr_lr; i++)
59f00ff9
MZ
152 if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
153 live_lrs |= 1UL << i;
154
155 if (live_lrs) {
156 writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
157 writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
158 for (i = 0; i < nr_lr; i++) {
cc1daf0b
MZ
159 if (!(live_lrs & (1UL << i)))
160 continue;
59f00ff9 161
cc1daf0b
MZ
162 writel_relaxed(cpu_if->vgic_lr[i],
163 base + GICH_LR0 + (i * 4));
59f00ff9
MZ
164 }
165 }
166
167 writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
168 vcpu->arch.vgic_cpu.live_lrs = live_lrs;
06282fd2 169}
This page took 0.068099 seconds and 5 git commands to generate.