aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2015-03-13 13:02:54 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2015-03-14 08:46:44 -0400
commit47a98b15ba7cf6a13bd94ab8455d3f586b16420b (patch)
tree5b0263f9e628236b50dd863da13269912cf5873b /virt/kvm
parent71760950bf3dc796e5e53ea3300dec724a09f593 (diff)
arm/arm64: KVM: support for un-queuing active IRQs
Migrating active interrupts causes the active state to be lost completely. This implements some additional bitmaps to track the active state on the distributor and export this to user space. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/vgic-v2-emul.c20
-rw-r--r--virt/kvm/arm/vgic.c207
-rw-r--r--virt/kvm/arm/vgic.h8
3 files changed, 198 insertions, 37 deletions
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
index 19c6210f02cf..c81866284441 100644
--- a/virt/kvm/arm/vgic-v2-emul.c
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -107,6 +107,22 @@ static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
107 vcpu->vcpu_id); 107 vcpu->vcpu_id);
108} 108}
109 109
110static bool handle_mmio_set_active_reg(struct kvm_vcpu *vcpu,
111 struct kvm_exit_mmio *mmio,
112 phys_addr_t offset)
113{
114 return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
115 vcpu->vcpu_id);
116}
117
118static bool handle_mmio_clear_active_reg(struct kvm_vcpu *vcpu,
119 struct kvm_exit_mmio *mmio,
120 phys_addr_t offset)
121{
122 return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
123 vcpu->vcpu_id);
124}
125
110static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, 126static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
111 struct kvm_exit_mmio *mmio, 127 struct kvm_exit_mmio *mmio,
112 phys_addr_t offset) 128 phys_addr_t offset)
@@ -344,13 +360,13 @@ static const struct kvm_mmio_range vgic_dist_ranges[] = {
344 .base = GIC_DIST_ACTIVE_SET, 360 .base = GIC_DIST_ACTIVE_SET,
345 .len = VGIC_MAX_IRQS / 8, 361 .len = VGIC_MAX_IRQS / 8,
346 .bits_per_irq = 1, 362 .bits_per_irq = 1,
347 .handle_mmio = handle_mmio_raz_wi, 363 .handle_mmio = handle_mmio_set_active_reg,
348 }, 364 },
349 { 365 {
350 .base = GIC_DIST_ACTIVE_CLEAR, 366 .base = GIC_DIST_ACTIVE_CLEAR,
351 .len = VGIC_MAX_IRQS / 8, 367 .len = VGIC_MAX_IRQS / 8,
352 .bits_per_irq = 1, 368 .bits_per_irq = 1,
353 .handle_mmio = handle_mmio_raz_wi, 369 .handle_mmio = handle_mmio_clear_active_reg,
354 }, 370 },
355 { 371 {
356 .base = GIC_DIST_PRI, 372 .base = GIC_DIST_PRI,
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 697ce17538f5..ffd937ca5141 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -264,6 +264,13 @@ static int vgic_irq_is_queued(struct kvm_vcpu *vcpu, int irq)
264 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq); 264 return vgic_bitmap_get_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq);
265} 265}
266 266
267static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
268{
269 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
270
271 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
272}
273
267static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq) 274static void vgic_irq_set_queued(struct kvm_vcpu *vcpu, int irq)
268{ 275{
269 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 276 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -278,6 +285,20 @@ static void vgic_irq_clear_queued(struct kvm_vcpu *vcpu, int irq)
278 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0); 285 vgic_bitmap_set_irq_val(&dist->irq_queued, vcpu->vcpu_id, irq, 0);
279} 286}
280 287
288static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
289{
290 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
291
292 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
293}
294
295static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
296{
297 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
298
299 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
300}
301
281static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq) 302static int vgic_dist_irq_get_level(struct kvm_vcpu *vcpu, int irq)
282{ 303{
283 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 304 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -521,6 +542,44 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm,
521 return false; 542 return false;
522} 543}
523 544
545bool vgic_handle_set_active_reg(struct kvm *kvm,
546 struct kvm_exit_mmio *mmio,
547 phys_addr_t offset, int vcpu_id)
548{
549 u32 *reg;
550 struct vgic_dist *dist = &kvm->arch.vgic;
551
552 reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
553 vgic_reg_access(mmio, reg, offset,
554 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
555
556 if (mmio->is_write) {
557 vgic_update_state(kvm);
558 return true;
559 }
560
561 return false;
562}
563
564bool vgic_handle_clear_active_reg(struct kvm *kvm,
565 struct kvm_exit_mmio *mmio,
566 phys_addr_t offset, int vcpu_id)
567{
568 u32 *reg;
569 struct vgic_dist *dist = &kvm->arch.vgic;
570
571 reg = vgic_bitmap_get_reg(&dist->irq_active, vcpu_id, offset);
572 vgic_reg_access(mmio, reg, offset,
573 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
574
575 if (mmio->is_write) {
576 vgic_update_state(kvm);
577 return true;
578 }
579
580 return false;
581}
582
524static u32 vgic_cfg_expand(u16 val) 583static u32 vgic_cfg_expand(u16 val)
525{ 584{
526 u32 res = 0; 585 u32 res = 0;
@@ -589,16 +648,12 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
589} 648}
590 649
591/** 650/**
592 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor 651 * vgic_unqueue_irqs - move pending/active IRQs from LRs to the distributor
593 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs 652 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
594 * 653 *
595 * Move any pending IRQs that have already been assigned to LRs back to the 654 * Move any IRQs that have already been assigned to LRs back to the
596 * emulated distributor state so that the complete emulated state can be read 655 * emulated distributor state so that the complete emulated state can be read
597 * from the main emulation structures without investigating the LRs. 656 * from the main emulation structures without investigating the LRs.
598 *
599 * Note that IRQs in the active state in the LRs get their pending state moved
600 * to the distributor but the active state stays in the LRs, because we don't
601 * track the active state on the distributor side.
602 */ 657 */
603void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) 658void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
604{ 659{
@@ -614,12 +669,22 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
614 * 01: pending 669 * 01: pending
615 * 10: active 670 * 10: active
616 * 11: pending and active 671 * 11: pending and active
617 *
618 * If the LR holds only an active interrupt (not pending) then
619 * just leave it alone.
620 */ 672 */
621 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE) 673 BUG_ON(!(lr.state & LR_STATE_MASK));
622 continue; 674
675 /* Reestablish SGI source for pending and active IRQs */
676 if (lr.irq < VGIC_NR_SGIS)
677 add_sgi_source(vcpu, lr.irq, lr.source);
678
679 /*
680 * If the LR holds an active (10) or a pending and active (11)
681 * interrupt then move the active state to the
682 * distributor tracking bit.
683 */
684 if (lr.state & LR_STATE_ACTIVE) {
685 vgic_irq_set_active(vcpu, lr.irq);
686 lr.state &= ~LR_STATE_ACTIVE;
687 }
623 688
624 /* 689 /*
625 * Reestablish the pending state on the distributor and the 690 * Reestablish the pending state on the distributor and the
@@ -627,21 +692,19 @@ void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
627 * is fine, then we are only setting a few bits that were 692 * is fine, then we are only setting a few bits that were
628 * already set. 693 * already set.
629 */ 694 */
630 vgic_dist_irq_set_pending(vcpu, lr.irq); 695 if (lr.state & LR_STATE_PENDING) {
631 if (lr.irq < VGIC_NR_SGIS) 696 vgic_dist_irq_set_pending(vcpu, lr.irq);
632 add_sgi_source(vcpu, lr.irq, lr.source); 697 lr.state &= ~LR_STATE_PENDING;
633 lr.state &= ~LR_STATE_PENDING; 698 }
699
634 vgic_set_lr(vcpu, i, lr); 700 vgic_set_lr(vcpu, i, lr);
635 701
636 /* 702 /*
637 * If there's no state left on the LR (it could still be 703 * Mark the LR as free for other use.
638 * active), then the LR does not hold any useful info and can
639 * be marked as free for other use.
640 */ 704 */
641 if (!(lr.state & LR_STATE_MASK)) { 705 BUG_ON(lr.state & LR_STATE_MASK);
642 vgic_retire_lr(i, lr.irq, vcpu); 706 vgic_retire_lr(i, lr.irq, vcpu);
643 vgic_irq_clear_queued(vcpu, lr.irq); 707 vgic_irq_clear_queued(vcpu, lr.irq);
644 }
645 708
646 /* Finally update the VGIC state. */ 709 /* Finally update the VGIC state. */
647 vgic_update_state(vcpu->kvm); 710 vgic_update_state(vcpu->kvm);
@@ -805,6 +868,36 @@ static int vgic_nr_shared_irqs(struct vgic_dist *dist)
805 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS; 868 return dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
806} 869}
807 870
871static int compute_active_for_cpu(struct kvm_vcpu *vcpu)
872{
873 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
874 unsigned long *active, *enabled, *act_percpu, *act_shared;
875 unsigned long active_private, active_shared;
876 int nr_shared = vgic_nr_shared_irqs(dist);
877 int vcpu_id;
878
879 vcpu_id = vcpu->vcpu_id;
880 act_percpu = vcpu->arch.vgic_cpu.active_percpu;
881 act_shared = vcpu->arch.vgic_cpu.active_shared;
882
883 active = vgic_bitmap_get_cpu_map(&dist->irq_active, vcpu_id);
884 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
885 bitmap_and(act_percpu, active, enabled, VGIC_NR_PRIVATE_IRQS);
886
887 active = vgic_bitmap_get_shared_map(&dist->irq_active);
888 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
889 bitmap_and(act_shared, active, enabled, nr_shared);
890 bitmap_and(act_shared, act_shared,
891 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
892 nr_shared);
893
894 active_private = find_first_bit(act_percpu, VGIC_NR_PRIVATE_IRQS);
895 active_shared = find_first_bit(act_shared, nr_shared);
896
897 return (active_private < VGIC_NR_PRIVATE_IRQS ||
898 active_shared < nr_shared);
899}
900
808static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) 901static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
809{ 902{
810 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 903 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
@@ -836,7 +929,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
836 929
837/* 930/*
838 * Update the interrupt state and determine which CPUs have pending 931 * Update the interrupt state and determine which CPUs have pending
839 * interrupts. Must be called with distributor lock held. 932 * or active interrupts. Must be called with distributor lock held.
840 */ 933 */
841void vgic_update_state(struct kvm *kvm) 934void vgic_update_state(struct kvm *kvm)
842{ 935{
@@ -850,10 +943,13 @@ void vgic_update_state(struct kvm *kvm)
850 } 943 }
851 944
852 kvm_for_each_vcpu(c, vcpu, kvm) { 945 kvm_for_each_vcpu(c, vcpu, kvm) {
853 if (compute_pending_for_cpu(vcpu)) { 946 if (compute_pending_for_cpu(vcpu))
854 pr_debug("CPU%d has pending interrupts\n", c);
855 set_bit(c, dist->irq_pending_on_cpu); 947 set_bit(c, dist->irq_pending_on_cpu);
856 } 948
949 if (compute_active_for_cpu(vcpu))
950 set_bit(c, dist->irq_active_on_cpu);
951 else
952 clear_bit(c, dist->irq_active_on_cpu);
857 } 953 }
858} 954}
859 955
@@ -953,7 +1049,12 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
953static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq, 1049static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
954 int lr_nr, struct vgic_lr vlr) 1050 int lr_nr, struct vgic_lr vlr)
955{ 1051{
956 if (vgic_dist_irq_is_pending(vcpu, irq)) { 1052 if (vgic_irq_is_active(vcpu, irq)) {
1053 vlr.state |= LR_STATE_ACTIVE;
1054 kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
1055 vgic_irq_clear_active(vcpu, irq);
1056 vgic_update_state(vcpu->kvm);
1057 } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
957 vlr.state |= LR_STATE_PENDING; 1058 vlr.state |= LR_STATE_PENDING;
958 kvm_debug("Set pending: 0x%x\n", vlr.state); 1059 kvm_debug("Set pending: 0x%x\n", vlr.state);
959 } 1060 }
@@ -1041,39 +1142,49 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1041{ 1142{
1042 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1143 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1043 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1144 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1145 unsigned long *pa_percpu, *pa_shared;
1044 int i, vcpu_id; 1146 int i, vcpu_id;
1045 int overflow = 0; 1147 int overflow = 0;
1148 int nr_shared = vgic_nr_shared_irqs(dist);
1046 1149
1047 vcpu_id = vcpu->vcpu_id; 1150 vcpu_id = vcpu->vcpu_id;
1048 1151
1152 pa_percpu = vcpu->arch.vgic_cpu.pend_act_percpu;
1153 pa_shared = vcpu->arch.vgic_cpu.pend_act_shared;
1154
1155 bitmap_or(pa_percpu, vgic_cpu->pending_percpu, vgic_cpu->active_percpu,
1156 VGIC_NR_PRIVATE_IRQS);
1157 bitmap_or(pa_shared, vgic_cpu->pending_shared, vgic_cpu->active_shared,
1158 nr_shared);
1049 /* 1159 /*
1050 * We may not have any pending interrupt, or the interrupts 1160 * We may not have any pending interrupt, or the interrupts
1051 * may have been serviced from another vcpu. In all cases, 1161 * may have been serviced from another vcpu. In all cases,
1052 * move along. 1162 * move along.
1053 */ 1163 */
1054 if (!kvm_vgic_vcpu_pending_irq(vcpu)) { 1164 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu))
1055 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1056 goto epilog; 1165 goto epilog;
1057 }
1058 1166
1059 /* SGIs */ 1167 /* SGIs */
1060 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) { 1168 for_each_set_bit(i, pa_percpu, VGIC_NR_SGIS) {
1061 if (!queue_sgi(vcpu, i)) 1169 if (!queue_sgi(vcpu, i))
1062 overflow = 1; 1170 overflow = 1;
1063 } 1171 }
1064 1172
1065 /* PPIs */ 1173 /* PPIs */
1066 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) { 1174 for_each_set_bit_from(i, pa_percpu, VGIC_NR_PRIVATE_IRQS) {
1067 if (!vgic_queue_hwirq(vcpu, i)) 1175 if (!vgic_queue_hwirq(vcpu, i))
1068 overflow = 1; 1176 overflow = 1;
1069 } 1177 }
1070 1178
1071 /* SPIs */ 1179 /* SPIs */
1072 for_each_set_bit(i, vgic_cpu->pending_shared, vgic_nr_shared_irqs(dist)) { 1180 for_each_set_bit(i, pa_shared, nr_shared) {
1073 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS)) 1181 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1074 overflow = 1; 1182 overflow = 1;
1075 } 1183 }
1076 1184
1185
1186
1187
1077epilog: 1188epilog:
1078 if (overflow) { 1189 if (overflow) {
1079 vgic_enable_underflow(vcpu); 1190 vgic_enable_underflow(vcpu);
@@ -1229,6 +1340,17 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1229 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); 1340 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1230} 1341}
1231 1342
1343int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
1344{
1345 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1346
1347 if (!irqchip_in_kernel(vcpu->kvm))
1348 return 0;
1349
1350 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1351}
1352
1353
1232void vgic_kick_vcpus(struct kvm *kvm) 1354void vgic_kick_vcpus(struct kvm *kvm)
1233{ 1355{
1234 struct kvm_vcpu *vcpu; 1356 struct kvm_vcpu *vcpu;
@@ -1401,8 +1523,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1401 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1523 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1402 1524
1403 kfree(vgic_cpu->pending_shared); 1525 kfree(vgic_cpu->pending_shared);
1526 kfree(vgic_cpu->active_shared);
1527 kfree(vgic_cpu->pend_act_shared);
1404 kfree(vgic_cpu->vgic_irq_lr_map); 1528 kfree(vgic_cpu->vgic_irq_lr_map);
1405 vgic_cpu->pending_shared = NULL; 1529 vgic_cpu->pending_shared = NULL;
1530 vgic_cpu->active_shared = NULL;
1531 vgic_cpu->pend_act_shared = NULL;
1406 vgic_cpu->vgic_irq_lr_map = NULL; 1532 vgic_cpu->vgic_irq_lr_map = NULL;
1407} 1533}
1408 1534
@@ -1412,9 +1538,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1412 1538
1413 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; 1539 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1414 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); 1540 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1541 vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
1542 vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
1415 vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL); 1543 vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
1416 1544
1417 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { 1545 if (!vgic_cpu->pending_shared
1546 || !vgic_cpu->active_shared
1547 || !vgic_cpu->pend_act_shared
1548 || !vgic_cpu->vgic_irq_lr_map) {
1418 kvm_vgic_vcpu_destroy(vcpu); 1549 kvm_vgic_vcpu_destroy(vcpu);
1419 return -ENOMEM; 1550 return -ENOMEM;
1420 } 1551 }
@@ -1467,10 +1598,12 @@ void kvm_vgic_destroy(struct kvm *kvm)
1467 kfree(dist->irq_spi_mpidr); 1598 kfree(dist->irq_spi_mpidr);
1468 kfree(dist->irq_spi_target); 1599 kfree(dist->irq_spi_target);
1469 kfree(dist->irq_pending_on_cpu); 1600 kfree(dist->irq_pending_on_cpu);
1601 kfree(dist->irq_active_on_cpu);
1470 dist->irq_sgi_sources = NULL; 1602 dist->irq_sgi_sources = NULL;
1471 dist->irq_spi_cpu = NULL; 1603 dist->irq_spi_cpu = NULL;
1472 dist->irq_spi_target = NULL; 1604 dist->irq_spi_target = NULL;
1473 dist->irq_pending_on_cpu = NULL; 1605 dist->irq_pending_on_cpu = NULL;
1606 dist->irq_active_on_cpu = NULL;
1474 dist->nr_cpus = 0; 1607 dist->nr_cpus = 0;
1475} 1608}
1476 1609
@@ -1506,6 +1639,7 @@ int vgic_init(struct kvm *kvm)
1506 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs); 1639 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1507 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs); 1640 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1508 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs); 1641 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1642 ret |= vgic_init_bitmap(&dist->irq_active, nr_cpus, nr_irqs);
1509 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs); 1643 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1510 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs); 1644 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1511 1645
@@ -1518,10 +1652,13 @@ int vgic_init(struct kvm *kvm)
1518 GFP_KERNEL); 1652 GFP_KERNEL);
1519 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long), 1653 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1520 GFP_KERNEL); 1654 GFP_KERNEL);
1655 dist->irq_active_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1656 GFP_KERNEL);
1521 if (!dist->irq_sgi_sources || 1657 if (!dist->irq_sgi_sources ||
1522 !dist->irq_spi_cpu || 1658 !dist->irq_spi_cpu ||
1523 !dist->irq_spi_target || 1659 !dist->irq_spi_target ||
1524 !dist->irq_pending_on_cpu) { 1660 !dist->irq_pending_on_cpu ||
1661 !dist->irq_active_on_cpu) {
1525 ret = -ENOMEM; 1662 ret = -ENOMEM;
1526 goto out; 1663 goto out;
1527 } 1664 }
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
index 1e83bdf5f499..1e5a38128804 100644
--- a/virt/kvm/arm/vgic.h
+++ b/virt/kvm/arm/vgic.h
@@ -107,6 +107,14 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
107bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 107bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
108 phys_addr_t offset, int vcpu_id); 108 phys_addr_t offset, int vcpu_id);
109 109
110bool vgic_handle_set_active_reg(struct kvm *kvm,
111 struct kvm_exit_mmio *mmio,
112 phys_addr_t offset, int vcpu_id);
113
114bool vgic_handle_clear_active_reg(struct kvm *kvm,
115 struct kvm_exit_mmio *mmio,
116 phys_addr_t offset, int vcpu_id);
117
110bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, 118bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
111 phys_addr_t offset); 119 phys_addr_t offset);
112 120