2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 #include <linux/irqchip/arm-gic-v3.h>
16 #include <linux/kvm.h>
17 #include <linux/kvm_host.h>
18 #include <kvm/arm_vgic.h>
19 #include <asm/kvm_mmu.h>
20 #include <asm/kvm_asm.h>
24 void vgic_v3_process_maintenance(struct kvm_vcpu
*vcpu
)
26 struct vgic_v3_cpu_if
*cpuif
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
27 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
29 if (cpuif
->vgic_misr
& ICH_MISR_EOI
) {
30 unsigned long eisr_bmap
= cpuif
->vgic_eisr
;
33 for_each_set_bit(lr
, &eisr_bmap
, kvm_vgic_global_state
.nr_lr
) {
35 u64 val
= cpuif
->vgic_lr
[lr
];
37 if (model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
38 intid
= val
& ICH_LR_VIRTUAL_ID_MASK
;
40 intid
= val
& GICH_LR_VIRTUALID
;
42 WARN_ON(cpuif
->vgic_lr
[lr
] & ICH_LR_STATE
);
44 kvm_notify_acked_irq(vcpu
->kvm
, 0,
45 intid
- VGIC_NR_PRIVATE_IRQS
);
49 * In the next iterations of the vcpu loop, if we sync
50 * the vgic state after flushing it, but before
51 * entering the guest (this happens for pending
52 * signals and vmid rollovers), then make sure we
53 * don't pick up any old maintenance interrupts here.
58 cpuif
->vgic_hcr
&= ~ICH_HCR_UIE
;
61 void vgic_v3_set_underflow(struct kvm_vcpu
*vcpu
)
63 struct vgic_v3_cpu_if
*cpuif
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
65 cpuif
->vgic_hcr
|= ICH_HCR_UIE
;
68 void vgic_v3_fold_lr_state(struct kvm_vcpu
*vcpu
)
70 struct vgic_v3_cpu_if
*cpuif
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
71 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
74 for (lr
= 0; lr
< vcpu
->arch
.vgic_cpu
.used_lrs
; lr
++) {
75 u64 val
= cpuif
->vgic_lr
[lr
];
79 if (model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
80 intid
= val
& ICH_LR_VIRTUAL_ID_MASK
;
82 intid
= val
& GICH_LR_VIRTUALID
;
83 irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
);
84 if (!irq
) /* An LPI could have been unmapped. */
87 spin_lock(&irq
->irq_lock
);
89 /* Always preserve the active bit */
90 irq
->active
= !!(val
& ICH_LR_ACTIVE_BIT
);
92 /* Edge is the only case where we preserve the pending bit */
93 if (irq
->config
== VGIC_CONFIG_EDGE
&&
94 (val
& ICH_LR_PENDING_BIT
)) {
97 if (vgic_irq_is_sgi(intid
) &&
98 model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
99 u32 cpuid
= val
& GICH_LR_PHYSID_CPUID
;
101 cpuid
>>= GICH_LR_PHYSID_CPUID_SHIFT
;
102 irq
->source
|= (1 << cpuid
);
107 * Clear soft pending state when level irqs have been acked.
108 * Always regenerate the pending state.
110 if (irq
->config
== VGIC_CONFIG_LEVEL
) {
111 if (!(val
& ICH_LR_PENDING_BIT
))
112 irq
->soft_pending
= false;
114 irq
->pending
= irq
->line_level
|| irq
->soft_pending
;
117 spin_unlock(&irq
->irq_lock
);
118 vgic_put_irq(vcpu
->kvm
, irq
);
122 /* Requires the irq to be locked already */
123 void vgic_v3_populate_lr(struct kvm_vcpu
*vcpu
, struct vgic_irq
*irq
, int lr
)
125 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
126 u64 val
= irq
->intid
;
129 val
|= ICH_LR_PENDING_BIT
;
131 if (irq
->config
== VGIC_CONFIG_EDGE
)
132 irq
->pending
= false;
134 if (vgic_irq_is_sgi(irq
->intid
) &&
135 model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
136 u32 src
= ffs(irq
->source
);
139 val
|= (src
- 1) << GICH_LR_PHYSID_CPUID_SHIFT
;
140 irq
->source
&= ~(1 << (src
- 1));
147 val
|= ICH_LR_ACTIVE_BIT
;
151 val
|= ((u64
)irq
->hwintid
) << ICH_LR_PHYS_ID_SHIFT
;
153 if (irq
->config
== VGIC_CONFIG_LEVEL
)
158 * We currently only support Group1 interrupts, which is a
159 * known defect. This needs to be addressed at some point.
161 if (model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
164 val
|= (u64
)irq
->priority
<< ICH_LR_PRIORITY_SHIFT
;
166 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[lr
] = val
;
169 void vgic_v3_clear_lr(struct kvm_vcpu
*vcpu
, int lr
)
171 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[lr
] = 0;
174 void vgic_v3_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
178 vmcr
= (vmcrp
->ctlr
<< ICH_VMCR_CTLR_SHIFT
) & ICH_VMCR_CTLR_MASK
;
179 vmcr
|= (vmcrp
->abpr
<< ICH_VMCR_BPR1_SHIFT
) & ICH_VMCR_BPR1_MASK
;
180 vmcr
|= (vmcrp
->bpr
<< ICH_VMCR_BPR0_SHIFT
) & ICH_VMCR_BPR0_MASK
;
181 vmcr
|= (vmcrp
->pmr
<< ICH_VMCR_PMR_SHIFT
) & ICH_VMCR_PMR_MASK
;
183 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_vmcr
= vmcr
;
186 void vgic_v3_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
188 u32 vmcr
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_vmcr
;
190 vmcrp
->ctlr
= (vmcr
& ICH_VMCR_CTLR_MASK
) >> ICH_VMCR_CTLR_SHIFT
;
191 vmcrp
->abpr
= (vmcr
& ICH_VMCR_BPR1_MASK
) >> ICH_VMCR_BPR1_SHIFT
;
192 vmcrp
->bpr
= (vmcr
& ICH_VMCR_BPR0_MASK
) >> ICH_VMCR_BPR0_SHIFT
;
193 vmcrp
->pmr
= (vmcr
& ICH_VMCR_PMR_MASK
) >> ICH_VMCR_PMR_SHIFT
;
196 #define INITIAL_PENDBASER_VALUE \
197 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
198 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
199 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
201 void vgic_v3_enable(struct kvm_vcpu
*vcpu
)
203 struct vgic_v3_cpu_if
*vgic_v3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
206 * By forcing VMCR to zero, the GIC will restore the binary
207 * points to their reset values. Anything else resets to zero
210 vgic_v3
->vgic_vmcr
= 0;
211 vgic_v3
->vgic_elrsr
= ~0;
214 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
215 * way, so we force SRE to 1 to demonstrate this to the guest.
216 * This goes with the spec allowing the value to be RAO/WI.
218 if (vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V3
) {
219 vgic_v3
->vgic_sre
= ICC_SRE_EL1_SRE
;
220 vcpu
->arch
.vgic_cpu
.pendbaser
= INITIAL_PENDBASER_VALUE
;
222 vgic_v3
->vgic_sre
= 0;
225 /* Get the show on the road... */
226 vgic_v3
->vgic_hcr
= ICH_HCR_EN
;
229 /* check for overlapping regions and for regions crossing the end of memory */
230 static bool vgic_v3_check_base(struct kvm
*kvm
)
232 struct vgic_dist
*d
= &kvm
->arch
.vgic
;
233 gpa_t redist_size
= KVM_VGIC_V3_REDIST_SIZE
;
235 redist_size
*= atomic_read(&kvm
->online_vcpus
);
237 if (d
->vgic_dist_base
+ KVM_VGIC_V3_DIST_SIZE
< d
->vgic_dist_base
)
239 if (d
->vgic_redist_base
+ redist_size
< d
->vgic_redist_base
)
242 if (d
->vgic_dist_base
+ KVM_VGIC_V3_DIST_SIZE
<= d
->vgic_redist_base
)
244 if (d
->vgic_redist_base
+ redist_size
<= d
->vgic_dist_base
)
250 int vgic_v3_map_resources(struct kvm
*kvm
)
253 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
258 if (IS_VGIC_ADDR_UNDEF(dist
->vgic_dist_base
) ||
259 IS_VGIC_ADDR_UNDEF(dist
->vgic_redist_base
)) {
260 kvm_err("Need to set vgic distributor addresses first\n");
265 if (!vgic_v3_check_base(kvm
)) {
266 kvm_err("VGIC redist and dist frames overlap\n");
272 * For a VGICv3 we require the userland to explicitly initialize
273 * the VGIC before we need to use it.
275 if (!vgic_initialized(kvm
)) {
280 ret
= vgic_register_dist_iodev(kvm
, dist
->vgic_dist_base
, VGIC_V3
);
282 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
286 ret
= vgic_register_redist_iodevs(kvm
, dist
->vgic_redist_base
);
288 kvm_err("Unable to register VGICv3 redist MMIO regions\n");
296 kvm_vgic_destroy(kvm
);
301 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
302 * @node: pointer to the DT node
304 * Returns 0 if a GICv3 has been found, returns an error code otherwise
306 int vgic_v3_probe(const struct gic_kvm_info
*info
)
308 u32 ich_vtr_el2
= kvm_call_hyp(__vgic_v3_get_ich_vtr_el2
);
312 * The ListRegs field is 5 bits, but there is a architectural
313 * maximum of 16 list registers. Just ignore bit 4...
315 kvm_vgic_global_state
.nr_lr
= (ich_vtr_el2
& 0xf) + 1;
316 kvm_vgic_global_state
.can_emulate_gicv2
= false;
318 if (!info
->vcpu
.start
) {
319 kvm_info("GICv3: no GICV resource entry\n");
320 kvm_vgic_global_state
.vcpu_base
= 0;
321 } else if (!PAGE_ALIGNED(info
->vcpu
.start
)) {
322 pr_warn("GICV physical address 0x%llx not page aligned\n",
323 (unsigned long long)info
->vcpu
.start
);
324 kvm_vgic_global_state
.vcpu_base
= 0;
325 } else if (!PAGE_ALIGNED(resource_size(&info
->vcpu
))) {
326 pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
327 (unsigned long long)resource_size(&info
->vcpu
),
329 kvm_vgic_global_state
.vcpu_base
= 0;
331 kvm_vgic_global_state
.vcpu_base
= info
->vcpu
.start
;
332 kvm_vgic_global_state
.can_emulate_gicv2
= true;
333 ret
= kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2
);
335 kvm_err("Cannot register GICv2 KVM device.\n");
338 kvm_info("vgic-v2@%llx\n", info
->vcpu
.start
);
340 ret
= kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3
);
342 kvm_err("Cannot register GICv3 KVM device.\n");
343 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2
);
347 if (kvm_vgic_global_state
.vcpu_base
== 0)
348 kvm_info("disabling GICv2 emulation\n");
350 kvm_vgic_global_state
.vctrl_base
= NULL
;
351 kvm_vgic_global_state
.type
= VGIC_V3
;
352 kvm_vgic_global_state
.max_gic_vcpus
= VGIC_V3_MAX_CPUS
;