summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2015-09-04 15:25:12 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2015-10-22 17:01:44 -0400
commit8fe2f19e6e6015911bdd4cfcdb23a32e146ba570 (patch)
treeaba5549ec12b1ef1542df6d32c89e878543acf7f /virt
parent4b4b4512da2a844b8da2585609b67fae1ce4f4db (diff)
arm/arm64: KVM: Support edge-triggered forwarded interrupts
We mark edge-triggered interrupts with the HW bit set as queued to prevent the VGIC code from injecting LRs with both the Active and Pending bits set at the same time while also setting the HW bit, because the hardware does not support this. However, this means that we must also clear the queued flag when we sync back a LR where the state on the physical distributor went from active to inactive because the guest deactivated the interrupt. At this point we must also check if the interrupt is pending on the distributor, and tell the VGIC to queue it again if it is. Since these actions on the sync path are extremely close to those for level-triggered interrupts, rename process_level_irq to process_queued_irq, allowing it to cater for both cases. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 3c2909c1bda3..84abc6f38c1d 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1313,13 +1313,10 @@ epilog:
1313 } 1313 }
1314} 1314}
1315 1315
1316static int process_level_irq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) 1316static int process_queued_irq(struct kvm_vcpu *vcpu,
1317 int lr, struct vgic_lr vlr)
1317{ 1318{
1318 int level_pending = 0; 1319 int pending = 0;
1319
1320 vlr.state = 0;
1321 vlr.hwirq = 0;
1322 vgic_set_lr(vcpu, lr, vlr);
1323 1320
1324 /* 1321 /*
1325 * If the IRQ was EOIed (called from vgic_process_maintenance) or it 1322 * If the IRQ was EOIed (called from vgic_process_maintenance) or it
@@ -1335,26 +1332,35 @@ static int process_level_irq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
1335 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); 1332 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1336 1333
1337 /* 1334 /*
1338 * Tell the gic to start sampling the line of this interrupt again. 1335 * Tell the gic to start sampling this interrupt again.
1339 */ 1336 */
1340 vgic_irq_clear_queued(vcpu, vlr.irq); 1337 vgic_irq_clear_queued(vcpu, vlr.irq);
1341 1338
1342 /* Any additional pending interrupt? */ 1339 /* Any additional pending interrupt? */
1343 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { 1340 if (vgic_irq_is_edge(vcpu, vlr.irq)) {
1344 vgic_cpu_irq_set(vcpu, vlr.irq); 1341 BUG_ON(!(vlr.state & LR_HW));
1345 level_pending = 1; 1342 pending = vgic_dist_irq_is_pending(vcpu, vlr.irq);
1346 } else { 1343 } else {
1347 vgic_dist_irq_clear_pending(vcpu, vlr.irq); 1344 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1348 vgic_cpu_irq_clear(vcpu, vlr.irq); 1345 vgic_cpu_irq_set(vcpu, vlr.irq);
1346 pending = 1;
1347 } else {
1348 vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1349 vgic_cpu_irq_clear(vcpu, vlr.irq);
1350 }
1349 } 1351 }
1350 1352
1351 /* 1353 /*
1352 * Despite being EOIed, the LR may not have 1354 * Despite being EOIed, the LR may not have
1353 * been marked as empty. 1355 * been marked as empty.
1354 */ 1356 */
1357 vlr.state = 0;
1358 vlr.hwirq = 0;
1359 vgic_set_lr(vcpu, lr, vlr);
1360
1355 vgic_sync_lr_elrsr(vcpu, lr, vlr); 1361 vgic_sync_lr_elrsr(vcpu, lr, vlr);
1356 1362
1357 return level_pending; 1363 return pending;
1358} 1364}
1359 1365
1360static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) 1366static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
@@ -1391,7 +1397,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1391 vlr.irq - VGIC_NR_PRIVATE_IRQS); 1397 vlr.irq - VGIC_NR_PRIVATE_IRQS);
1392 1398
1393 spin_lock(&dist->lock); 1399 spin_lock(&dist->lock);
1394 level_pending |= process_level_irq(vcpu, lr, vlr); 1400 level_pending |= process_queued_irq(vcpu, lr, vlr);
1395 spin_unlock(&dist->lock); 1401 spin_unlock(&dist->lock);
1396 } 1402 }
1397 } 1403 }
@@ -1413,7 +1419,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1413/* 1419/*
1414 * Save the physical active state, and reset it to inactive. 1420 * Save the physical active state, and reset it to inactive.
1415 * 1421 *
1416 * Return true if there's a pending level triggered interrupt line to queue. 1422 * Return true if there's a pending forwarded interrupt to queue.
1417 */ 1423 */
1418static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) 1424static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
1419{ 1425{
@@ -1438,10 +1444,8 @@ static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
1438 if (phys_active) 1444 if (phys_active)
1439 return 0; 1445 return 0;
1440 1446
1441 /* Mapped edge-triggered interrupts not yet supported. */
1442 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1443 spin_lock(&dist->lock); 1447 spin_lock(&dist->lock);
1444 level_pending = process_level_irq(vcpu, lr, vlr); 1448 level_pending = process_queued_irq(vcpu, lr, vlr);
1445 spin_unlock(&dist->lock); 1449 spin_unlock(&dist->lock);
1446 return level_pending; 1450 return level_pending;
1447} 1451}