aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2014-06-14 15:54:51 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2014-09-18 21:44:31 -0400
commitfaa1b46c3e9f4d40359aee04ff275eea5f4cae3a (patch)
treeee960c075d6d84402892f8d1025812208b105326 /virt/kvm/arm
parentcced50c9280ef7ca1af48080707a170efa1adfa0 (diff)
arm/arm64: KVM: vgic: Improve handling of GICD_I{CS}PENDRn
Writes to GICD_ISPENDRn and GICD_ICPENDRn are currently not handled correctly for level-triggered interrupts. The spec states that for level-triggered interrupts, writes to the GICD_ISPENDRn activate the output of a flip-flop which is in turn or'ed with the actual input interrupt signal. Correspondingly, writes to GICD_ICPENDRn simply deactivates the output of that flip-flop, but does not (of course) affect the external input signal. Reads from GICC_IAR will also deactivate the flip-flop output. This requires us to track the state of the level-input separately from the state in the flip-flop. We therefore introduce two new variables on the distributor struct to track these two states. Astute readers may notice that this is introducing more state than required (because an OR of the two states gives you the pending state), but the remaining vgic code uses the pending bitmap for optimized operations to figure out, at the end of the day, if an interrupt is pending or not on the distributor side. Refactoring the code to consider the two state variables all the places where we currently access the precomputed pending value, did not look pretty. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt/kvm/arm')
-rw-r--r--virt/kvm/arm/vgic.c119
1 files changed, 108 insertions, 11 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 2026b6147805..435d8e7ad137 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -67,6 +67,11 @@
67 * - When the interrupt is EOIed, the maintenance interrupt fires, 67 * - When the interrupt is EOIed, the maintenance interrupt fires,
68 * and clears the corresponding bit in irq_queued. This allows the 68 * and clears the corresponding bit in irq_queued. This allows the
69 * interrupt line to be sampled again. 69 * interrupt line to be sampled again.
70 * - Note that level-triggered interrupts can also be set to pending from
71 * writes to GICD_ISPENDRn and lowering the external input line does not
72 * cause the interrupt to become inactive in such a situation.
73 * Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
74 * inactive as long as the external input line is held high.
70 */ 75 */
71 76
72#define VGIC_ADDR_UNDEF (-1) 77#define VGIC_ADDR_UNDEF (-1)
@@ -217,6 +222,41 @@ static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
217 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0); 222 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
218} 223}
219 224
225static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
226{
227 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
228
229 return vgic_bitmap_get_irq_val(&dist->irq_level, vcpu->vcpu_id, irq);
230}
231
232static void vgic_dist_irq_set_level(struct kvm_vcpu *vcpu, int irq)
233{
234 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
235
236 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 1);
237}
238
239static void vgic_dist_irq_clear_level(struct kvm_vcpu *vcpu, int irq)
240{
241 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
242
243 vgic_bitmap_set_irq_val(&dist->irq_level, vcpu->vcpu_id, irq, 0);
244}
245
246static int vgic_dist_irq_soft_pend(struct kvm_vcpu *vcpu, int irq)
247{
248 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
249
250 return vgic_bitmap_get_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq);
251}
252
253static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq)
254{
255 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
256
257 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
258}
259
220static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) 260static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
221{ 261{
222 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 262 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -414,11 +454,26 @@ static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
414 struct kvm_exit_mmio *mmio, 454 struct kvm_exit_mmio *mmio,
415 phys_addr_t offset) 455 phys_addr_t offset)
416{ 456{
417 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_pending, 457 u32 *reg;
418 vcpu->vcpu_id, offset); 458 u32 level_mask;
459 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
460
461 reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset);
462 level_mask = (~(*reg));
463
464 /* Mark both level and edge triggered irqs as pending */
465 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
419 vgic_reg_access(mmio, reg, offset, 466 vgic_reg_access(mmio, reg, offset,
420 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); 467 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
468
421 if (mmio->is_write) { 469 if (mmio->is_write) {
470 /* Set the soft-pending flag only for level-triggered irqs */
471 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
472 vcpu->vcpu_id, offset);
473 vgic_reg_access(mmio, reg, offset,
474 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
475 *reg &= level_mask;
476
422 vgic_update_state(vcpu->kvm); 477 vgic_update_state(vcpu->kvm);
423 return true; 478 return true;
424 } 479 }
@@ -430,11 +485,27 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
430 struct kvm_exit_mmio *mmio, 485 struct kvm_exit_mmio *mmio,
431 phys_addr_t offset) 486 phys_addr_t offset)
432{ 487{
433 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_pending, 488 u32 *level_active;
434 vcpu->vcpu_id, offset); 489 u32 *reg;
490 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
491
492 reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset);
435 vgic_reg_access(mmio, reg, offset, 493 vgic_reg_access(mmio, reg, offset,
436 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); 494 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
437 if (mmio->is_write) { 495 if (mmio->is_write) {
496 /* Re-set level triggered level-active interrupts */
497 level_active = vgic_bitmap_get_reg(&dist->irq_level,
498 vcpu->vcpu_id, offset);
499 reg = vgic_bitmap_get_reg(&dist->irq_pending,
500 vcpu->vcpu_id, offset);
501 *reg |= *level_active;
502
503 /* Clear soft-pending flags */
504 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend,
505 vcpu->vcpu_id, offset);
506 vgic_reg_access(mmio, reg, offset,
507 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
508
438 vgic_update_state(vcpu->kvm); 509 vgic_update_state(vcpu->kvm);
439 return true; 510 return true;
440 } 511 }
@@ -1268,17 +1339,32 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1268 1339
1269 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { 1340 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1270 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); 1341 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1342 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1271 1343
1272 vgic_irq_clear_queued(vcpu, vlr.irq); 1344 vgic_irq_clear_queued(vcpu, vlr.irq);
1273 WARN_ON(vlr.state & LR_STATE_MASK); 1345 WARN_ON(vlr.state & LR_STATE_MASK);
1274 vlr.state = 0; 1346 vlr.state = 0;
1275 vgic_set_lr(vcpu, lr, vlr); 1347 vgic_set_lr(vcpu, lr, vlr);
1276 1348
1349 /*
1350 * If the IRQ was EOIed it was also ACKed and we we
1351 * therefore assume we can clear the soft pending
1352 * state (should it had been set) for this interrupt.
1353 *
1354 * Note: if the IRQ soft pending state was set after
1355 * the IRQ was acked, it actually shouldn't be
1356 * cleared, but we have no way of knowing that unless
1357 * we start trapping ACKs when the soft-pending state
1358 * is set.
1359 */
1360 vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1361
1277 /* Any additional pending interrupt? */ 1362 /* Any additional pending interrupt? */
1278 if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) { 1363 if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1279 vgic_cpu_irq_set(vcpu, vlr.irq); 1364 vgic_cpu_irq_set(vcpu, vlr.irq);
1280 level_pending = true; 1365 level_pending = true;
1281 } else { 1366 } else {
1367 vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1282 vgic_cpu_irq_clear(vcpu, vlr.irq); 1368 vgic_cpu_irq_clear(vcpu, vlr.irq);
1283 } 1369 }
1284 1370
@@ -1384,17 +1470,19 @@ static void vgic_kick_vcpus(struct kvm *kvm)
1384static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) 1470static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1385{ 1471{
1386 int edge_triggered = vgic_irq_is_edge(vcpu, irq); 1472 int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1387 int state = vgic_dist_irq_is_pending(vcpu, irq);
1388 1473
1389 /* 1474 /*
1390 * Only inject an interrupt if: 1475 * Only inject an interrupt if:
1391 * - edge triggered and we have a rising edge 1476 * - edge triggered and we have a rising edge
1392 * - level triggered and we change level 1477 * - level triggered and we change level
1393 */ 1478 */
1394 if (edge_triggered) 1479 if (edge_triggered) {
1480 int state = vgic_dist_irq_is_pending(vcpu, irq);
1395 return level > state; 1481 return level > state;
1396 else 1482 } else {
1483 int state = vgic_dist_irq_get_level(vcpu, irq);
1397 return level != state; 1484 return level != state;
1485 }
1398} 1486}
1399 1487
1400static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid, 1488static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
@@ -1424,10 +1512,19 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1424 1512
1425 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid); 1513 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1426 1514
1427 if (level) 1515 if (level) {
1516 if (level_triggered)
1517 vgic_dist_irq_set_level(vcpu, irq_num);
1428 vgic_dist_irq_set_pending(vcpu, irq_num); 1518 vgic_dist_irq_set_pending(vcpu, irq_num);
1429 else 1519 } else {
1430 vgic_dist_irq_clear_pending(vcpu, irq_num); 1520 if (level_triggered) {
1521 vgic_dist_irq_clear_level(vcpu, irq_num);
1522 if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
1523 vgic_dist_irq_clear_pending(vcpu, irq_num);
1524 } else {
1525 vgic_dist_irq_clear_pending(vcpu, irq_num);
1526 }
1527 }
1431 1528
1432 enabled = vgic_irq_is_enabled(vcpu, irq_num); 1529 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1433 1530