aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/kvm/arm_vgic.h4
-rw-r--r--virt/kvm/arm/vgic.c52
2 files changed, 28 insertions, 28 deletions
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 35b0c121bb65..388d442eecb5 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -140,8 +140,8 @@ struct vgic_dist {
140 /* Interrupt enabled (one bit per IRQ) */ 140 /* Interrupt enabled (one bit per IRQ) */
141 struct vgic_bitmap irq_enabled; 141 struct vgic_bitmap irq_enabled;
142 142
143 /* Interrupt 'pin' level */ 143 /* Interrupt state is pending on the distributor */
144 struct vgic_bitmap irq_state; 144 struct vgic_bitmap irq_pending;
145 145
146 /* Level-triggered interrupt in progress */ 146 /* Level-triggered interrupt in progress */
147 struct vgic_bitmap irq_active; 147 struct vgic_bitmap irq_active;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index eeb23b37f87c..7e86a36f3fc5 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -37,7 +37,7 @@
37 * 37 *
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if 38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending 39 * something is pending
40 * - VGIC pending interrupts are stored on the vgic.irq_state vgic 40 * - VGIC pending interrupts are stored on the vgic.irq_pending vgic
41 * bitmap (this bitmap is updated by both user land ioctls and guest 41 * bitmap (this bitmap is updated by both user land ioctls and guest
42 * mmio ops, and other in-kernel peripherals such as the 42 * mmio ops, and other in-kernel peripherals such as the
43 * arch. timers) and indicate the 'wire' state. 43 * arch. timers) and indicate the 'wire' state.
@@ -45,8 +45,8 @@
45 * recalculated 45 * recalculated
46 * - To calculate the oracle, we need info for each cpu from 46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers: 47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_state & dist->irq_enable 48 * - PPI: dist->irq_pending & dist->irq_enable
49 * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target 49 * - SPI: dist->irq_pending & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ICFGR 50 * - irq_spi_target is a 'formatted' version of the GICD_ICFGR
51 * registers, stored on each vcpu. We only keep one bit of 51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can 52 * information per interrupt, making sure that only one vcpu can
@@ -221,21 +221,21 @@ static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
221{ 221{
222 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 222 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
223 223
224 return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq); 224 return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq);
225} 225}
226 226
227static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq) 227static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq)
228{ 228{
229 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 229 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
230 230
231 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1); 231 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1);
232} 232}
233 233
234static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq) 234static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq)
235{ 235{
236 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 236 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
237 237
238 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0); 238 vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 0);
239} 239}
240 240
241static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) 241static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
@@ -409,7 +409,7 @@ static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
409 struct kvm_exit_mmio *mmio, 409 struct kvm_exit_mmio *mmio,
410 phys_addr_t offset) 410 phys_addr_t offset)
411{ 411{
412 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, 412 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_pending,
413 vcpu->vcpu_id, offset); 413 vcpu->vcpu_id, offset);
414 vgic_reg_access(mmio, reg, offset, 414 vgic_reg_access(mmio, reg, offset,
415 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); 415 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
@@ -425,7 +425,7 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
425 struct kvm_exit_mmio *mmio, 425 struct kvm_exit_mmio *mmio,
426 phys_addr_t offset) 426 phys_addr_t offset)
427{ 427{
428 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state, 428 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_pending,
429 vcpu->vcpu_id, offset); 429 vcpu->vcpu_id, offset);
430 vgic_reg_access(mmio, reg, offset, 430 vgic_reg_access(mmio, reg, offset,
431 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); 431 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
@@ -651,7 +651,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
651 * is fine, then we are only setting a few bits that were 651 * is fine, then we are only setting a few bits that were
652 * already set. 652 * already set.
653 */ 653 */
654 vgic_dist_irq_set(vcpu, lr.irq); 654 vgic_dist_irq_set_pending(vcpu, lr.irq);
655 if (lr.irq < VGIC_NR_SGIS) 655 if (lr.irq < VGIC_NR_SGIS)
656 dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source; 656 dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source;
657 lr.state &= ~LR_STATE_PENDING; 657 lr.state &= ~LR_STATE_PENDING;
@@ -932,7 +932,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
932 kvm_for_each_vcpu(c, vcpu, kvm) { 932 kvm_for_each_vcpu(c, vcpu, kvm) {
933 if (target_cpus & 1) { 933 if (target_cpus & 1) {
934 /* Flag the SGI as pending */ 934 /* Flag the SGI as pending */
935 vgic_dist_irq_set(vcpu, sgi); 935 vgic_dist_irq_set_pending(vcpu, sgi);
936 dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id; 936 dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
937 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); 937 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
938 } 938 }
@@ -952,11 +952,11 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
952 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; 952 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
953 pend_shared = vcpu->arch.vgic_cpu.pending_shared; 953 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
954 954
955 pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id); 955 pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
956 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); 956 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
957 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); 957 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
958 958
959 pending = vgic_bitmap_get_shared_map(&dist->irq_state); 959 pending = vgic_bitmap_get_shared_map(&dist->irq_pending);
960 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled); 960 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
961 bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS); 961 bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
962 bitmap_and(pend_shared, pend_shared, 962 bitmap_and(pend_shared, pend_shared,
@@ -1160,7 +1160,7 @@ static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
1160 * our emulated gic and can get rid of them. 1160 * our emulated gic and can get rid of them.
1161 */ 1161 */
1162 if (!sources) { 1162 if (!sources) {
1163 vgic_dist_irq_clear(vcpu, irq); 1163 vgic_dist_irq_clear_pending(vcpu, irq);
1164 vgic_cpu_irq_clear(vcpu, irq); 1164 vgic_cpu_irq_clear(vcpu, irq);
1165 return true; 1165 return true;
1166 } 1166 }
@@ -1175,7 +1175,7 @@ static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1175 1175
1176 if (vgic_queue_irq(vcpu, 0, irq)) { 1176 if (vgic_queue_irq(vcpu, 0, irq)) {
1177 if (vgic_irq_is_edge(vcpu, irq)) { 1177 if (vgic_irq_is_edge(vcpu, irq)) {
1178 vgic_dist_irq_clear(vcpu, irq); 1178 vgic_dist_irq_clear_pending(vcpu, irq);
1179 vgic_cpu_irq_clear(vcpu, irq); 1179 vgic_cpu_irq_clear(vcpu, irq);
1180 } else { 1180 } else {
1181 vgic_irq_set_active(vcpu, irq); 1181 vgic_irq_set_active(vcpu, irq);
@@ -1376,7 +1376,7 @@ static void vgic_kick_vcpus(struct kvm *kvm)
1376 1376
1377static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) 1377static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1378{ 1378{
1379 int is_edge = vgic_irq_is_edge(vcpu, irq); 1379 int edge_triggered = vgic_irq_is_edge(vcpu, irq);
1380 int state = vgic_dist_irq_is_pending(vcpu, irq); 1380 int state = vgic_dist_irq_is_pending(vcpu, irq);
1381 1381
1382 /* 1382 /*
@@ -1384,26 +1384,26 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1384 * - edge triggered and we have a rising edge 1384 * - edge triggered and we have a rising edge
1385 * - level triggered and we change level 1385 * - level triggered and we change level
1386 */ 1386 */
1387 if (is_edge) 1387 if (edge_triggered)
1388 return level > state; 1388 return level > state;
1389 else 1389 else
1390 return level != state; 1390 return level != state;
1391} 1391}
1392 1392
1393static bool vgic_update_irq_state(struct kvm *kvm, int cpuid, 1393static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1394 unsigned int irq_num, bool level) 1394 unsigned int irq_num, bool level)
1395{ 1395{
1396 struct vgic_dist *dist = &kvm->arch.vgic; 1396 struct vgic_dist *dist = &kvm->arch.vgic;
1397 struct kvm_vcpu *vcpu; 1397 struct kvm_vcpu *vcpu;
1398 int is_edge, is_level; 1398 int edge_triggered, level_triggered;
1399 int enabled; 1399 int enabled;
1400 bool ret = true; 1400 bool ret = true;
1401 1401
1402 spin_lock(&dist->lock); 1402 spin_lock(&dist->lock);
1403 1403
1404 vcpu = kvm_get_vcpu(kvm, cpuid); 1404 vcpu = kvm_get_vcpu(kvm, cpuid);
1405 is_edge = vgic_irq_is_edge(vcpu, irq_num); 1405 edge_triggered = vgic_irq_is_edge(vcpu, irq_num);
1406 is_level = !is_edge; 1406 level_triggered = !edge_triggered;
1407 1407
1408 if (!vgic_validate_injection(vcpu, irq_num, level)) { 1408 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1409 ret = false; 1409 ret = false;
@@ -1418,9 +1418,9 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
1418 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid); 1418 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1419 1419
1420 if (level) 1420 if (level)
1421 vgic_dist_irq_set(vcpu, irq_num); 1421 vgic_dist_irq_set_pending(vcpu, irq_num);
1422 else 1422 else
1423 vgic_dist_irq_clear(vcpu, irq_num); 1423 vgic_dist_irq_clear_pending(vcpu, irq_num);
1424 1424
1425 enabled = vgic_irq_is_enabled(vcpu, irq_num); 1425 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1426 1426
@@ -1429,7 +1429,7 @@ static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
1429 goto out; 1429 goto out;
1430 } 1430 }
1431 1431
1432 if (is_level && vgic_irq_is_active(vcpu, irq_num)) { 1432 if (level_triggered && vgic_irq_is_active(vcpu, irq_num)) {
1433 /* 1433 /*
1434 * Level interrupt in progress, will be picked up 1434 * Level interrupt in progress, will be picked up
1435 * when EOId. 1435 * when EOId.
@@ -1466,7 +1466,7 @@ out:
1466int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, 1466int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1467 bool level) 1467 bool level)
1468{ 1468{
1469 if (vgic_update_irq_state(kvm, cpuid, irq_num, level)) 1469 if (vgic_update_irq_pending(kvm, cpuid, irq_num, level))
1470 vgic_kick_vcpus(kvm); 1470 vgic_kick_vcpus(kvm);
1471 1471
1472 return 0; 1472 return 0;