aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2014-09-28 10:04:26 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2014-10-16 04:57:41 -0400
commit2df36a5dd6792870bef48f63bfca42055ea5b79c (patch)
treeef822d51fb8cd4adb030eab48c7dab131b632461
parent3d08c629244257473450a8ba17cb8184b91e68f8 (diff)
arm/arm64: KVM: Fix BE accesses to GICv2 EISR and ELRSR regs
The EIRSR and ELRSR registers are 32-bit registers on GICv2, and we store these as an array of two such registers on the vgic vcpu struct. However, we access them as a single 64-bit value or as a bitmap pointer in the generic vgic code, which breaks BE support. Instead, store them as u64 values on the vgic structure and do the word-swapping in the assembly code, which already handles the byte order for BE systems. Tested-by: Victor Kamensky <victor.kamensky@linaro.org> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r--arch/arm/kvm/interrupts_head.S7
-rw-r--r--arch/arm64/kvm/vgic-v2-switch.S12
-rw-r--r--include/kvm/arm_vgic.h4
-rw-r--r--virt/kvm/arm/vgic-v2.c24
-rw-r--r--virt/kvm/arm/vgic.c18
5 files changed, 36 insertions, 29 deletions
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 98c8c5b9a87f..14d488388480 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -433,10 +433,17 @@ ARM_BE8(rev r10, r10 )
433 str r3, [r11, #VGIC_V2_CPU_HCR] 433 str r3, [r11, #VGIC_V2_CPU_HCR]
434 str r4, [r11, #VGIC_V2_CPU_VMCR] 434 str r4, [r11, #VGIC_V2_CPU_VMCR]
435 str r5, [r11, #VGIC_V2_CPU_MISR] 435 str r5, [r11, #VGIC_V2_CPU_MISR]
436#ifdef CONFIG_CPU_ENDIAN_BE8
437 str r6, [r11, #(VGIC_V2_CPU_EISR + 4)]
438 str r7, [r11, #VGIC_V2_CPU_EISR]
439 str r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
440 str r9, [r11, #VGIC_V2_CPU_ELRSR]
441#else
436 str r6, [r11, #VGIC_V2_CPU_EISR] 442 str r6, [r11, #VGIC_V2_CPU_EISR]
437 str r7, [r11, #(VGIC_V2_CPU_EISR + 4)] 443 str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
438 str r8, [r11, #VGIC_V2_CPU_ELRSR] 444 str r8, [r11, #VGIC_V2_CPU_ELRSR]
439 str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)] 445 str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
446#endif
440 str r10, [r11, #VGIC_V2_CPU_APR] 447 str r10, [r11, #VGIC_V2_CPU_APR]
441 448
442 /* Clear GICH_HCR */ 449 /* Clear GICH_HCR */
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
index ae211772f991..f002fe1c3700 100644
--- a/arch/arm64/kvm/vgic-v2-switch.S
+++ b/arch/arm64/kvm/vgic-v2-switch.S
@@ -67,10 +67,14 @@ CPU_BE( rev w11, w11 )
67 str w4, [x3, #VGIC_V2_CPU_HCR] 67 str w4, [x3, #VGIC_V2_CPU_HCR]
68 str w5, [x3, #VGIC_V2_CPU_VMCR] 68 str w5, [x3, #VGIC_V2_CPU_VMCR]
69 str w6, [x3, #VGIC_V2_CPU_MISR] 69 str w6, [x3, #VGIC_V2_CPU_MISR]
70 str w7, [x3, #VGIC_V2_CPU_EISR] 70CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
71 str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] 71CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
72 str w9, [x3, #VGIC_V2_CPU_ELRSR] 72CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] )
73 str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] 73CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
74CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
75CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] )
76CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
77CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] )
74 str w11, [x3, #VGIC_V2_CPU_APR] 78 str w11, [x3, #VGIC_V2_CPU_APR]
75 79
76 /* Clear GICH_HCR */ 80 /* Clear GICH_HCR */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index ec559d3264cc..206dcc3b3f7a 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -219,8 +219,8 @@ struct vgic_v2_cpu_if {
219 u32 vgic_hcr; 219 u32 vgic_hcr;
220 u32 vgic_vmcr; 220 u32 vgic_vmcr;
221 u32 vgic_misr; /* Saved only */ 221 u32 vgic_misr; /* Saved only */
222 u32 vgic_eisr[2]; /* Saved only */ 222 u64 vgic_eisr; /* Saved only */
223 u32 vgic_elrsr[2]; /* Saved only */ 223 u64 vgic_elrsr; /* Saved only */
224 u32 vgic_apr; 224 u32 vgic_apr;
225 u32 vgic_lr[VGIC_V2_MAX_LRS]; 225 u32 vgic_lr[VGIC_V2_MAX_LRS];
226}; 226};
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 01124ef3690a..2935405ad22f 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -71,35 +71,17 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
71 struct vgic_lr lr_desc) 71 struct vgic_lr lr_desc)
72{ 72{
73 if (!(lr_desc.state & LR_STATE_MASK)) 73 if (!(lr_desc.state & LR_STATE_MASK))
74 set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr); 74 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
75} 75}
76 76
77static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) 77static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
78{ 78{
79 u64 val; 79 return vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
80
81#if BITS_PER_LONG == 64
82 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1];
83 val <<= 32;
84 val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0];
85#else
86 val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
87#endif
88 return val;
89} 80}
90 81
91static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) 82static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
92{ 83{
93 u64 val; 84 return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
94
95#if BITS_PER_LONG == 64
96 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1];
97 val <<= 32;
98 val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0];
99#else
100 val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
101#endif
102 return val;
103} 85}
104 86
105static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) 87static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 382fb5a88b9c..3aaca49de325 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -145,6 +145,20 @@ static void vgic_free_bitmap(struct vgic_bitmap *b)
145 b->shared = NULL; 145 b->shared = NULL;
146} 146}
147 147
148/*
149 * Call this function to convert a u64 value to an unsigned long * bitmask
150 * in a way that works on both 32-bit and 64-bit LE and BE platforms.
151 *
152 * Warning: Calling this function may modify *val.
153 */
154static unsigned long *u64_to_bitmask(u64 *val)
155{
156#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
157 *val = (*val >> 32) | (*val << 32);
158#endif
159 return (unsigned long *)val;
160}
161
148static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, 162static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
149 int cpuid, u32 offset) 163 int cpuid, u32 offset)
150{ 164{
@@ -1442,7 +1456,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1442 * active bit. 1456 * active bit.
1443 */ 1457 */
1444 u64 eisr = vgic_get_eisr(vcpu); 1458 u64 eisr = vgic_get_eisr(vcpu);
1445 unsigned long *eisr_ptr = (unsigned long *)&eisr; 1459 unsigned long *eisr_ptr = u64_to_bitmask(&eisr);
1446 int lr; 1460 int lr;
1447 1461
1448 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { 1462 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
@@ -1505,7 +1519,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1505 1519
1506 level_pending = vgic_process_maintenance(vcpu); 1520 level_pending = vgic_process_maintenance(vcpu);
1507 elrsr = vgic_get_elrsr(vcpu); 1521 elrsr = vgic_get_elrsr(vcpu);
1508 elrsr_ptr = (unsigned long *)&elrsr; 1522 elrsr_ptr = u64_to_bitmask(&elrsr);
1509 1523
1510 /* Clear mappings for empty LRs */ 1524 /* Clear mappings for empty LRs */
1511 for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) { 1525 for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {