arm/arm64: KVM: Fix BE accesses to GICv2 EISR and ELRSR regs
authorChristoffer Dall <christoffer.dall@linaro.org>
Sun, 28 Sep 2014 14:04:26 +0000 (16:04 +0200)
committerChristoffer Dall <christoffer.dall@linaro.org>
Thu, 16 Oct 2014 08:57:41 +0000 (10:57 +0200)
The EIRSR and ELRSR registers are 32-bit registers on GICv2, and we
store these as an array of two such registers on the vgic vcpu struct.
However, we access them as a single 64-bit value or as a bitmap pointer
in the generic vgic code, which breaks BE support.

Instead, store them as u64 values on the vgic structure and do the
word-swapping in the assembly code, which already handles the byte order
for BE systems.

Tested-by: Victor Kamensky <victor.kamensky@linaro.org>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
arch/arm/kvm/interrupts_head.S
arch/arm64/kvm/vgic-v2-switch.S
include/kvm/arm_vgic.h
virt/kvm/arm/vgic-v2.c
virt/kvm/arm/vgic.c

index 98c8c5b9a87f392a0410a1ca51481e93172bad8f..14d488388480ea50a80d24b18bbed9636c8d1c25 100644 (file)
@@ -433,10 +433,17 @@ ARM_BE8(rev       r10, r10        )
        str     r3, [r11, #VGIC_V2_CPU_HCR]
        str     r4, [r11, #VGIC_V2_CPU_VMCR]
        str     r5, [r11, #VGIC_V2_CPU_MISR]
+#ifdef CONFIG_CPU_ENDIAN_BE8
+       str     r6, [r11, #(VGIC_V2_CPU_EISR + 4)]
+       str     r7, [r11, #VGIC_V2_CPU_EISR]
+       str     r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
+       str     r9, [r11, #VGIC_V2_CPU_ELRSR]
+#else
        str     r6, [r11, #VGIC_V2_CPU_EISR]
        str     r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
        str     r8, [r11, #VGIC_V2_CPU_ELRSR]
        str     r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
+#endif
        str     r10, [r11, #VGIC_V2_CPU_APR]
 
        /* Clear GICH_HCR */
index ae211772f99177e7653050525652806ed4bd0159..f002fe1c37002aff928e834a3c6c93008a3aa7d1 100644 (file)
@@ -67,10 +67,14 @@ CPU_BE(     rev     w11, w11 )
        str     w4, [x3, #VGIC_V2_CPU_HCR]
        str     w5, [x3, #VGIC_V2_CPU_VMCR]
        str     w6, [x3, #VGIC_V2_CPU_MISR]
-       str     w7, [x3, #VGIC_V2_CPU_EISR]
-       str     w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
-       str     w9, [x3, #VGIC_V2_CPU_ELRSR]
-       str     w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
+CPU_LE(        str     w7, [x3, #VGIC_V2_CPU_EISR] )
+CPU_LE(        str     w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
+CPU_LE(        str     w9, [x3, #VGIC_V2_CPU_ELRSR] )
+CPU_LE(        str     w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
+CPU_BE(        str     w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
+CPU_BE(        str     w8, [x3, #VGIC_V2_CPU_EISR] )
+CPU_BE(        str     w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
+CPU_BE(        str     w10, [x3, #VGIC_V2_CPU_ELRSR] )
        str     w11, [x3, #VGIC_V2_CPU_APR]
 
        /* Clear GICH_HCR */
index ec559d3264cc0b260a54651e32f1638138e5d186..206dcc3b3f7aa60bace5ba9c6d039d8b4b2c6ace 100644 (file)
@@ -219,8 +219,8 @@ struct vgic_v2_cpu_if {
        u32             vgic_hcr;
        u32             vgic_vmcr;
        u32             vgic_misr;      /* Saved only */
-       u32             vgic_eisr[2];   /* Saved only */
-       u32             vgic_elrsr[2];  /* Saved only */
+       u64             vgic_eisr;      /* Saved only */
+       u64             vgic_elrsr;     /* Saved only */
        u32             vgic_apr;
        u32             vgic_lr[VGIC_V2_MAX_LRS];
 };
index 01124ef3690a03e1c9ecdc464b954b19c6354440..2935405ad22f6ce7f1aaa1239653c3c37224a065 100644 (file)
@@ -71,35 +71,17 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
                                  struct vgic_lr lr_desc)
 {
        if (!(lr_desc.state & LR_STATE_MASK))
-               set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
+               vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
 }
 
 static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
 {
-       u64 val;
-
-#if BITS_PER_LONG == 64
-       val  = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1];
-       val <<= 32;
-       val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0];
-#else
-       val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
-#endif
-       return val;
+       return vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
 }
 
 static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
 {
-       u64 val;
-
-#if BITS_PER_LONG == 64
-       val  = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1];
-       val <<= 32;
-       val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0];
-#else
-       val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
-#endif
-       return val;
+       return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
 }
 
 static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
index 382fb5a88b9c53950a96c811c8de0761ff0582b2..3aaca49de3257eed0bd905ac26112cacd49588dd 100644 (file)
@@ -145,6 +145,20 @@ static void vgic_free_bitmap(struct vgic_bitmap *b)
        b->shared = NULL;
 }
 
+/*
+ * Call this function to convert a u64 value to an unsigned long * bitmask
+ * in a way that works on both 32-bit and 64-bit LE and BE platforms.
+ *
+ * Warning: Calling this function may modify *val.
+ */
+static unsigned long *u64_to_bitmask(u64 *val)
+{
+#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
+       *val = (*val >> 32) | (*val << 32);
+#endif
+       return (unsigned long *)val;
+}
+
 static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
                                int cpuid, u32 offset)
 {
@@ -1442,7 +1456,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
                 * active bit.
                 */
                u64 eisr = vgic_get_eisr(vcpu);
-               unsigned long *eisr_ptr = (unsigned long *)&eisr;
+               unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
                int lr;
 
                for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
@@ -1505,7 +1519,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 
        level_pending = vgic_process_maintenance(vcpu);
        elrsr = vgic_get_elrsr(vcpu);
-       elrsr_ptr = (unsigned long *)&elrsr;
+       elrsr_ptr = u64_to_bitmask(&elrsr);
 
        /* Clear mappings for empty LRs */
        for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
This page took 0.02948 seconds and 5 git commands to generate.