diff options
author | Christoffer Dall <christoffer.dall@linaro.org> | 2015-08-25 16:50:57 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2015-10-22 17:01:42 -0400 |
commit | 9103617df202d74e5c65f8af84a9aa727f812a06 (patch) | |
tree | da6f0e7a58c098feb137504929de293f0726a86a /virt | |
parent | d35268da66870d733ae763fd7f9b06a1f63f395e (diff) |
arm/arm64: KVM: vgic: Factor out level irq processing on guest exit
Currently vgic_process_maintenance() processes dealing with a completed
level-triggered interrupt directly, but we are soon going to reuse this
logic for level-triggered mapped interrupts with the HW bit set, so
move this logic into a separate static function.
Probably the most scary part of this commit is convincing yourself that
the current flow is safe compared to the old one. In the following I
try to list the changes and why they are harmless:
Move vgic_irq_clear_queued after kvm_notify_acked_irq:
Harmless because the only potential effect of clearing the queued
flag wrt. kvm_set_irq is that vgic_update_irq_pending does not set
the pending bit on the emulated CPU interface or in the
pending_on_cpu bitmask if the function is called with level=1.
However, the point of kvm_notify_acked_irq is to call kvm_set_irq
with level=0, and we set the queued flag again in
__kvm_vgic_sync_hwstate later on if the level is stil high.
Move vgic_set_lr before kvm_notify_acked_irq:
Also, harmless because the LR are cpu-local operations and
kvm_notify_acked only affects the dist
Move vgic_dist_irq_clear_soft_pend after kvm_notify_acked_irq:
Also harmless, because now we check the level state in the
clear_soft_pend function and lower the pending bits if the level is
low.
Reviewed-by: Eric Auger <eric.auger@linaro.org>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/vgic.c | 94 |
1 files changed, 56 insertions, 38 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 66c66165e712..367a180fb5ac 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -107,6 +107,7 @@ static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); | |||
107 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); | 107 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); |
108 | static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu, | 108 | static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu, |
109 | int virt_irq); | 109 | int virt_irq); |
110 | static int compute_pending_for_cpu(struct kvm_vcpu *vcpu); | ||
110 | 111 | ||
111 | static const struct vgic_ops *vgic_ops; | 112 | static const struct vgic_ops *vgic_ops; |
112 | static const struct vgic_params *vgic; | 113 | static const struct vgic_params *vgic; |
@@ -357,6 +358,11 @@ static void vgic_dist_irq_clear_soft_pend(struct kvm_vcpu *vcpu, int irq) | |||
357 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 358 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
358 | 359 | ||
359 | vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0); | 360 | vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0); |
361 | if (!vgic_dist_irq_get_level(vcpu, irq)) { | ||
362 | vgic_dist_irq_clear_pending(vcpu, irq); | ||
363 | if (!compute_pending_for_cpu(vcpu)) | ||
364 | clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); | ||
365 | } | ||
360 | } | 366 | } |
361 | 367 | ||
362 | static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) | 368 | static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) |
@@ -1338,12 +1344,56 @@ epilog: | |||
1338 | } | 1344 | } |
1339 | } | 1345 | } |
1340 | 1346 | ||
1347 | static int process_level_irq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) | ||
1348 | { | ||
1349 | int level_pending = 0; | ||
1350 | |||
1351 | vlr.state = 0; | ||
1352 | vlr.hwirq = 0; | ||
1353 | vgic_set_lr(vcpu, lr, vlr); | ||
1354 | |||
1355 | /* | ||
1356 | * If the IRQ was EOIed (called from vgic_process_maintenance) or it | ||
1357 | * went from active to non-active (called from vgic_sync_hwirq) it was | ||
1358 | * also ACKed and we we therefore assume we can clear the soft pending | ||
1359 | * state (should it had been set) for this interrupt. | ||
1360 | * | ||
1361 | * Note: if the IRQ soft pending state was set after the IRQ was | ||
1362 | * acked, it actually shouldn't be cleared, but we have no way of | ||
1363 | * knowing that unless we start trapping ACKs when the soft-pending | ||
1364 | * state is set. | ||
1365 | */ | ||
1366 | vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); | ||
1367 | |||
1368 | /* | ||
1369 | * Tell the gic to start sampling the line of this interrupt again. | ||
1370 | */ | ||
1371 | vgic_irq_clear_queued(vcpu, vlr.irq); | ||
1372 | |||
1373 | /* Any additional pending interrupt? */ | ||
1374 | if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { | ||
1375 | vgic_cpu_irq_set(vcpu, vlr.irq); | ||
1376 | level_pending = 1; | ||
1377 | } else { | ||
1378 | vgic_dist_irq_clear_pending(vcpu, vlr.irq); | ||
1379 | vgic_cpu_irq_clear(vcpu, vlr.irq); | ||
1380 | } | ||
1381 | |||
1382 | /* | ||
1383 | * Despite being EOIed, the LR may not have | ||
1384 | * been marked as empty. | ||
1385 | */ | ||
1386 | vgic_sync_lr_elrsr(vcpu, lr, vlr); | ||
1387 | |||
1388 | return level_pending; | ||
1389 | } | ||
1390 | |||
1341 | static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | 1391 | static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) |
1342 | { | 1392 | { |
1343 | u32 status = vgic_get_interrupt_status(vcpu); | 1393 | u32 status = vgic_get_interrupt_status(vcpu); |
1344 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1394 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
1345 | bool level_pending = false; | ||
1346 | struct kvm *kvm = vcpu->kvm; | 1395 | struct kvm *kvm = vcpu->kvm; |
1396 | int level_pending = 0; | ||
1347 | 1397 | ||
1348 | kvm_debug("STATUS = %08x\n", status); | 1398 | kvm_debug("STATUS = %08x\n", status); |
1349 | 1399 | ||
@@ -1358,54 +1408,22 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1358 | 1408 | ||
1359 | for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { | 1409 | for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { |
1360 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr); | 1410 | struct vgic_lr vlr = vgic_get_lr(vcpu, lr); |
1361 | WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); | ||
1362 | 1411 | ||
1363 | spin_lock(&dist->lock); | 1412 | WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); |
1364 | vgic_irq_clear_queued(vcpu, vlr.irq); | ||
1365 | WARN_ON(vlr.state & LR_STATE_MASK); | 1413 | WARN_ON(vlr.state & LR_STATE_MASK); |
1366 | vlr.state = 0; | ||
1367 | vgic_set_lr(vcpu, lr, vlr); | ||
1368 | 1414 | ||
1369 | /* | ||
1370 | * If the IRQ was EOIed it was also ACKed and we we | ||
1371 | * therefore assume we can clear the soft pending | ||
1372 | * state (should it had been set) for this interrupt. | ||
1373 | * | ||
1374 | * Note: if the IRQ soft pending state was set after | ||
1375 | * the IRQ was acked, it actually shouldn't be | ||
1376 | * cleared, but we have no way of knowing that unless | ||
1377 | * we start trapping ACKs when the soft-pending state | ||
1378 | * is set. | ||
1379 | */ | ||
1380 | vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq); | ||
1381 | 1415 | ||
1382 | /* | 1416 | /* |
1383 | * kvm_notify_acked_irq calls kvm_set_irq() | 1417 | * kvm_notify_acked_irq calls kvm_set_irq() |
1384 | * to reset the IRQ level. Need to release the | 1418 | * to reset the IRQ level, which grabs the dist->lock |
1385 | * lock for kvm_set_irq to grab it. | 1419 | * so we call this before taking the dist->lock. |
1386 | */ | 1420 | */ |
1387 | spin_unlock(&dist->lock); | ||
1388 | |||
1389 | kvm_notify_acked_irq(kvm, 0, | 1421 | kvm_notify_acked_irq(kvm, 0, |
1390 | vlr.irq - VGIC_NR_PRIVATE_IRQS); | 1422 | vlr.irq - VGIC_NR_PRIVATE_IRQS); |
1391 | spin_lock(&dist->lock); | ||
1392 | |||
1393 | /* Any additional pending interrupt? */ | ||
1394 | if (vgic_dist_irq_get_level(vcpu, vlr.irq)) { | ||
1395 | vgic_cpu_irq_set(vcpu, vlr.irq); | ||
1396 | level_pending = true; | ||
1397 | } else { | ||
1398 | vgic_dist_irq_clear_pending(vcpu, vlr.irq); | ||
1399 | vgic_cpu_irq_clear(vcpu, vlr.irq); | ||
1400 | } | ||
1401 | 1423 | ||
1424 | spin_lock(&dist->lock); | ||
1425 | level_pending |= process_level_irq(vcpu, lr, vlr); | ||
1402 | spin_unlock(&dist->lock); | 1426 | spin_unlock(&dist->lock); |
1403 | |||
1404 | /* | ||
1405 | * Despite being EOIed, the LR may not have | ||
1406 | * been marked as empty. | ||
1407 | */ | ||
1408 | vgic_sync_lr_elrsr(vcpu, lr, vlr); | ||
1409 | } | 1427 | } |
1410 | } | 1428 | } |
1411 | 1429 | ||