aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2014-06-09 06:55:13 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2014-09-18 21:44:30 -0400
commitdbf20f9d8105cca531614c8bff9a74351e8e67e7 (patch)
tree1d86e929fa155683d665ba5f8217e54da2845a05 /virt/kvm
parent227844f53864077ccaefe01d0960fcccc03445ce (diff)
arm/arm64: KVM: Rename irq_active to irq_queued
We have a special bitmap on the distributor struct to keep track of when level-triggered interrupts are queued on the list registers. This was named irq_active, which is confusing, because the active state of an interrupt as per the GIC spec is a different thing, not specifically related to edge-triggered/level-triggered configurations but rather indicates an interrupt which has been ack'ed but not yet eoi'ed. Rename the bitmap and the corresponding accessor functions to irq_queued to clarify what this is actually used for. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/vgic.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 7e86a36f3fc5..ce1a2d17ee81 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -60,12 +60,12 @@
60 * the 'line' again. This is achieved as such: 60 * the 'line' again. This is achieved as such:
61 * 61 *
62 * - When a level interrupt is moved onto a vcpu, the corresponding 62 * - When a level interrupt is moved onto a vcpu, the corresponding
63 * bit in irq_active is set. As long as this bit is set, the line 63 * bit in irq_queued is set. As long as this bit is set, the line
64 * will be ignored for further interrupts. The interrupt is injected 64 * will be ignored for further interrupts. The interrupt is injected
65 * into the vcpu with the GICH_LR_EOI bit set (generate a 65 * into the vcpu with the GICH_LR_EOI bit set (generate a
66 * maintenance interrupt on EOI). 66 * maintenance interrupt on EOI).
67 * - When the interrupt is EOIed, the maintenance interrupt fires, 67 * - When the interrupt is EOIed, the maintenance interrupt fires,
68 * and clears the corresponding bit in irq_active. This allow the 68 * and clears the corresponding bit in irq_queued. This allows the
69 * interrupt line to be sampled again. 69 * interrupt line to be sampled again.
70 */ 70 */
71 71
@@ -196,25 +196,25 @@ static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
196 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq); 196 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
197} 197}
198 198
199static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq) 199static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
200{ 200{
201 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 201 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
202 202
203 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq); 203 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
204} 204}
205 205
206static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq) 206static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
207{ 207{
208 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 208 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
209 209
210 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1); 210 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 1);
211} 211}
212 212
213static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq) 213static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
214{ 214{
215 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 215 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
216 216
217 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0); 217 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
218} 218}
219 219
220static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) 220static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
@@ -256,6 +256,11 @@ static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
256 vcpu->arch.vgic_cpu.pending_shared); 256 vcpu->arch.vgic_cpu.pending_shared);
257} 257}
258 258
259static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
260{
261 return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
262}
263
259static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) 264static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
260{ 265{
261 return le32_to_cpu(*((u32 *)mmio->data)) & mask; 266 return le32_to_cpu(*((u32 *)mmio->data)) & mask;
@@ -1079,8 +1084,8 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1079 1084
1080 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) { 1085 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
1081 vgic_retire_lr(lr, vlr.irq, vcpu); 1086 vgic_retire_lr(lr, vlr.irq, vcpu);
1082 if (vgic_irq_is_active(vcpu, vlr.irq)) 1087 if (vgic_irq_is_queued(vcpu, vlr.irq))
1083 vgic_irq_clear_active(vcpu, vlr.irq); 1088 vgic_irq_clear_queued(vcpu, vlr.irq);
1084 } 1089 }
1085 } 1090 }
1086} 1091}
@@ -1170,7 +1175,7 @@ static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
1170 1175
1171static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq) 1176static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1172{ 1177{
1173 if (vgic_irq_is_active(vcpu, irq)) 1178 if (!vgic_can_sample_irq(vcpu, irq))
1174 return true; /* level interrupt, already queued */ 1179 return true; /* level interrupt, already queued */
1175 1180
1176 if (vgic_queue_irq(vcpu, 0, irq)) { 1181 if (vgic_queue_irq(vcpu, 0, irq)) {
@@ -1178,7 +1183,7 @@ static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1178 vgic_dist_irq_clear_pending(vcpu, irq); 1183 vgic_dist_irq_clear_pending(vcpu, irq);
1179 vgic_cpu_irq_clear(vcpu, irq); 1184 vgic_cpu_irq_clear(vcpu, irq);
1180 } else { 1185 } else {
1181 vgic_irq_set_active(vcpu, irq); 1186 vgic_irq_set_queued(vcpu, irq);
1182 } 1187 }
1183 1188
1184 return true; 1189 return true;
@@ -1262,7 +1267,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1262 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { 1267 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1263 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); 1268 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1264 1269
1265 vgic_irq_clear_active(vcpu, vlr.irq); 1270 vgic_irq_clear_queued(vcpu, vlr.irq);
1266 WARN_ON(vlr.state & LR_STATE_MASK); 1271 WARN_ON(vlr.state & LR_STATE_MASK);
1267 vlr.state = 0; 1272 vlr.state = 0;
1268 vgic_set_lr(vcpu, lr, vlr); 1273 vgic_set_lr(vcpu, lr, vlr);
@@ -1429,7 +1434,7 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1429 goto out; 1434 goto out;
1430 } 1435 }
1431 1436
1432 if (level_triggered && vgic_irq_is_active(vcpu, irq_num)) { 1437 if (!vgic_can_sample_irq(vcpu, irq_num)) {
1433 /* 1438 /*
1434 * Level interrupt in progress, will be picked up 1439 * Level interrupt in progress, will be picked up
1435 * when EOId. 1440 * when EOId.