aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavel Fedin <p.fedin@samsung.com>2015-10-27 04:37:31 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2015-11-04 09:29:49 -0500
commit26caea7693cb99833fe4ecc544c842289d6b3f69 (patch)
treea34f97555427536057beb1d616e03b4153080152
parent212c76545dde8370ebde2a170e4f8e1ed8441dc0 (diff)
KVM: arm/arm64: Merge vgic_set_lr() and vgic_sync_lr_elrsr()
Now we see that vgic_set_lr() and vgic_sync_lr_elrsr() are always used together. Merge them into one function, saving from second vgic_ops dereferencing every time. Signed-off-by: Pavel Fedin <p.fedin@samsung.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r--include/kvm/arm_vgic.h1
-rw-r--r--virt/kvm/arm/vgic-v2.c5
-rw-r--r--virt/kvm/arm/vgic-v3.c5
-rw-r--r--virt/kvm/arm/vgic.c14
4 files changed, 2 insertions, 23 deletions
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 3936bf802e1d..f62addc17dcf 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -112,7 +112,6 @@ struct vgic_vmcr {
112struct vgic_ops { 112struct vgic_ops {
113 struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int); 113 struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
114 void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr); 114 void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
115 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
116 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); 115 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
117 u64 (*get_eisr)(const struct kvm_vcpu *vcpu); 116 u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
118 void (*clear_eisr)(struct kvm_vcpu *vcpu); 117 void (*clear_eisr)(struct kvm_vcpu *vcpu);
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index c0f5d7fad9ea..ff02f08df74d 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -79,11 +79,7 @@ static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
79 lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT); 79 lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT);
80 80
81 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val; 81 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
82}
83 82
84static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
85 struct vgic_lr lr_desc)
86{
87 if (!(lr_desc.state & LR_STATE_MASK)) 83 if (!(lr_desc.state & LR_STATE_MASK))
88 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr); 84 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
89 else 85 else
@@ -167,7 +163,6 @@ static void vgic_v2_enable(struct kvm_vcpu *vcpu)
167static const struct vgic_ops vgic_v2_ops = { 163static const struct vgic_ops vgic_v2_ops = {
168 .get_lr = vgic_v2_get_lr, 164 .get_lr = vgic_v2_get_lr,
169 .set_lr = vgic_v2_set_lr, 165 .set_lr = vgic_v2_set_lr,
170 .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
171 .get_elrsr = vgic_v2_get_elrsr, 166 .get_elrsr = vgic_v2_get_elrsr,
172 .get_eisr = vgic_v2_get_eisr, 167 .get_eisr = vgic_v2_get_eisr,
173 .clear_eisr = vgic_v2_clear_eisr, 168 .clear_eisr = vgic_v2_clear_eisr,
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 92003cb61a0a..487d6357b7e7 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -112,11 +112,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
112 } 112 }
113 113
114 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; 114 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
115}
116 115
117static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
118 struct vgic_lr lr_desc)
119{
120 if (!(lr_desc.state & LR_STATE_MASK)) 116 if (!(lr_desc.state & LR_STATE_MASK))
121 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); 117 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
122 else 118 else
@@ -212,7 +208,6 @@ static void vgic_v3_enable(struct kvm_vcpu *vcpu)
212static const struct vgic_ops vgic_v3_ops = { 208static const struct vgic_ops vgic_v3_ops = {
213 .get_lr = vgic_v3_get_lr, 209 .get_lr = vgic_v3_get_lr,
214 .set_lr = vgic_v3_set_lr, 210 .set_lr = vgic_v3_set_lr,
215 .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
216 .get_elrsr = vgic_v3_get_elrsr, 211 .get_elrsr = vgic_v3_get_elrsr,
217 .get_eisr = vgic_v3_get_eisr, 212 .get_eisr = vgic_v3_get_eisr,
218 .clear_eisr = vgic_v3_clear_eisr, 213 .clear_eisr = vgic_v3_clear_eisr,
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 96e45f3da534..fe451d4885ae 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1032,12 +1032,6 @@ static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1032 vgic_ops->set_lr(vcpu, lr, vlr); 1032 vgic_ops->set_lr(vcpu, lr, vlr);
1033} 1033}
1034 1034
1035static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1036 struct vgic_lr vlr)
1037{
1038 vgic_ops->sync_lr_elrsr(vcpu, lr, vlr);
1039}
1040
1041static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu) 1035static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1042{ 1036{
1043 return vgic_ops->get_elrsr(vcpu); 1037 return vgic_ops->get_elrsr(vcpu);
@@ -1100,7 +1094,6 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
1100 1094
1101 vlr.state = 0; 1095 vlr.state = 0;
1102 vgic_set_lr(vcpu, lr_nr, vlr); 1096 vgic_set_lr(vcpu, lr_nr, vlr);
1103 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
1104} 1097}
1105 1098
1106/* 1099/*
@@ -1162,7 +1155,6 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
1162 } 1155 }
1163 1156
1164 vgic_set_lr(vcpu, lr_nr, vlr); 1157 vgic_set_lr(vcpu, lr_nr, vlr);
1165 vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
1166} 1158}
1167 1159
1168/* 1160/*
@@ -1340,8 +1332,6 @@ static int process_queued_irq(struct kvm_vcpu *vcpu,
1340 vlr.hwirq = 0; 1332 vlr.hwirq = 0;
1341 vgic_set_lr(vcpu, lr, vlr); 1333 vgic_set_lr(vcpu, lr, vlr);
1342 1334
1343 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1344
1345 return pending; 1335 return pending;
1346} 1336}
1347 1337
@@ -1442,8 +1432,6 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1442 bool level_pending; 1432 bool level_pending;
1443 1433
1444 level_pending = vgic_process_maintenance(vcpu); 1434 level_pending = vgic_process_maintenance(vcpu);
1445 elrsr = vgic_get_elrsr(vcpu);
1446 elrsr_ptr = u64_to_bitmask(&elrsr);
1447 1435
1448 /* Deal with HW interrupts, and clear mappings for empty LRs */ 1436 /* Deal with HW interrupts, and clear mappings for empty LRs */
1449 for (lr = 0; lr < vgic->nr_lr; lr++) { 1437 for (lr = 0; lr < vgic->nr_lr; lr++) {
@@ -1454,6 +1442,8 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1454 } 1442 }
1455 1443
1456 /* Check if we still have something up our sleeve... */ 1444 /* Check if we still have something up our sleeve... */
1445 elrsr = vgic_get_elrsr(vcpu);
1446 elrsr_ptr = u64_to_bitmask(&elrsr);
1457 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr); 1447 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1458 if (level_pending || pending < vgic->nr_lr) 1448 if (level_pending || pending < vgic->nr_lr)
1459 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); 1449 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);