summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2018-04-18 05:39:04 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2018-04-27 07:39:09 -0400
commit53692908b0f594285aba18ab848318262332ed25 (patch)
treed486da77b3dc691e89c2ab1fbe8942991fe14e84 /virt
parent85bd0ba1ff9875798fad94218b627ea9f768f3c3 (diff)
KVM: arm/arm64: vgic: Fix source vcpu issues for GICv2 SGI
Now that we make sure we don't inject multiple instances of the same GICv2 SGI at the same time, we've made another bug more obvious: If we exit with an active SGI, we completely lose track of which vcpu it came from. On the next entry, we restore it with 0 as a source, and if that wasn't the right one, too bad. While this doesn't seem to trouble GIC-400, the architectural model gets offended and doesn't deactivate the interrupt on EOI. Another connected issue is that we will happilly make pending an interrupt from another vcpu, overriding the above zero with something that is just as inconsistent. Don't do that. The final issue is that we signal a maintenance interrupt when no pending interrupts are present in the LR. Assuming we've fixed the two issues above, we end-up in a situation where we keep exiting as soon as we've reached the active state, and not be able to inject the following pending. The fix comes in 3 parts: - GICv2 SGIs have their source vcpu saved if they are active on exit, and restored on entry - Multi-SGIs cannot go via the Pending+Active state, as this would corrupt the source field - Multi-SGIs are converted to using MI on EOI instead of NPIE Fixes: 16ca6a607d84bef0 ("KVM: arm/arm64: vgic: Don't populate multiple LRs with the same vintid") Reported-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c10
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c38
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c49
-rw-r--r--virt/kvm/arm/vgic/vgic.c30
-rw-r--r--virt/kvm/arm/vgic/vgic.h14
5 files changed, 80 insertions, 61 deletions
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index dbe99d635c80..ff9655cfeb2f 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -289,10 +289,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
289 irq->vcpu->cpu != -1) /* VCPU thread is running */ 289 irq->vcpu->cpu != -1) /* VCPU thread is running */
290 cond_resched_lock(&irq->irq_lock); 290 cond_resched_lock(&irq->irq_lock);
291 291
292 if (irq->hw) 292 if (irq->hw) {
293 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); 293 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
294 else 294 } else {
295 u32 model = vcpu->kvm->arch.vgic.vgic_model;
296
295 irq->active = active; 297 irq->active = active;
298 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
299 active && vgic_irq_is_sgi(irq->intid))
300 irq->active_source = requester_vcpu->vcpu_id;
301 }
296 302
297 if (irq->active) 303 if (irq->active)
298 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 304 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 45aa433f018f..a5f2e44f1c33 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -37,13 +37,6 @@ void vgic_v2_init_lrs(void)
37 vgic_v2_write_lr(i, 0); 37 vgic_v2_write_lr(i, 0);
38} 38}
39 39
40void vgic_v2_set_npie(struct kvm_vcpu *vcpu)
41{
42 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
43
44 cpuif->vgic_hcr |= GICH_HCR_NPIE;
45}
46
47void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) 40void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
48{ 41{
49 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; 42 struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -71,13 +64,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
71 int lr; 64 int lr;
72 unsigned long flags; 65 unsigned long flags;
73 66
74 cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE); 67 cpuif->vgic_hcr &= ~GICH_HCR_UIE;
75 68
76 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { 69 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
77 u32 val = cpuif->vgic_lr[lr]; 70 u32 val = cpuif->vgic_lr[lr];
78 u32 intid = val & GICH_LR_VIRTUALID; 71 u32 cpuid, intid = val & GICH_LR_VIRTUALID;
79 struct vgic_irq *irq; 72 struct vgic_irq *irq;
80 73
74 /* Extract the source vCPU id from the LR */
75 cpuid = val & GICH_LR_PHYSID_CPUID;
76 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
77 cpuid &= 7;
78
81 /* Notify fds when the guest EOI'ed a level-triggered SPI */ 79 /* Notify fds when the guest EOI'ed a level-triggered SPI */
82 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) 80 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
83 kvm_notify_acked_irq(vcpu->kvm, 0, 81 kvm_notify_acked_irq(vcpu->kvm, 0,
@@ -90,17 +88,16 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
90 /* Always preserve the active bit */ 88 /* Always preserve the active bit */
91 irq->active = !!(val & GICH_LR_ACTIVE_BIT); 89 irq->active = !!(val & GICH_LR_ACTIVE_BIT);
92 90
91 if (irq->active && vgic_irq_is_sgi(intid))
92 irq->active_source = cpuid;
93
93 /* Edge is the only case where we preserve the pending bit */ 94 /* Edge is the only case where we preserve the pending bit */
94 if (irq->config == VGIC_CONFIG_EDGE && 95 if (irq->config == VGIC_CONFIG_EDGE &&
95 (val & GICH_LR_PENDING_BIT)) { 96 (val & GICH_LR_PENDING_BIT)) {
96 irq->pending_latch = true; 97 irq->pending_latch = true;
97 98
98 if (vgic_irq_is_sgi(intid)) { 99 if (vgic_irq_is_sgi(intid))
99 u32 cpuid = val & GICH_LR_PHYSID_CPUID;
100
101 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
102 irq->source |= (1 << cpuid); 100 irq->source |= (1 << cpuid);
103 }
104 } 101 }
105 102
106 /* 103 /*
@@ -152,8 +149,15 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
152 u32 val = irq->intid; 149 u32 val = irq->intid;
153 bool allow_pending = true; 150 bool allow_pending = true;
154 151
155 if (irq->active) 152 if (irq->active) {
156 val |= GICH_LR_ACTIVE_BIT; 153 val |= GICH_LR_ACTIVE_BIT;
154 if (vgic_irq_is_sgi(irq->intid))
155 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
156 if (vgic_irq_is_multi_sgi(irq)) {
157 allow_pending = false;
158 val |= GICH_LR_EOI;
159 }
160 }
157 161
158 if (irq->hw) { 162 if (irq->hw) {
159 val |= GICH_LR_HW; 163 val |= GICH_LR_HW;
@@ -190,8 +194,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
190 BUG_ON(!src); 194 BUG_ON(!src);
191 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 195 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
192 irq->source &= ~(1 << (src - 1)); 196 irq->source &= ~(1 << (src - 1));
193 if (irq->source) 197 if (irq->source) {
194 irq->pending_latch = true; 198 irq->pending_latch = true;
199 val |= GICH_LR_EOI;
200 }
195 } 201 }
196 } 202 }
197 203
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 8195f52ae6f0..c7423f3768e5 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -27,13 +27,6 @@ static bool group1_trap;
27static bool common_trap; 27static bool common_trap;
28static bool gicv4_enable; 28static bool gicv4_enable;
29 29
30void vgic_v3_set_npie(struct kvm_vcpu *vcpu)
31{
32 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
33
34 cpuif->vgic_hcr |= ICH_HCR_NPIE;
35}
36
37void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) 30void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
38{ 31{
39 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; 32 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
@@ -55,17 +48,23 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
55 int lr; 48 int lr;
56 unsigned long flags; 49 unsigned long flags;
57 50
58 cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE); 51 cpuif->vgic_hcr &= ~ICH_HCR_UIE;
59 52
60 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { 53 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
61 u64 val = cpuif->vgic_lr[lr]; 54 u64 val = cpuif->vgic_lr[lr];
62 u32 intid; 55 u32 intid, cpuid;
63 struct vgic_irq *irq; 56 struct vgic_irq *irq;
57 bool is_v2_sgi = false;
64 58
65 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) 59 cpuid = val & GICH_LR_PHYSID_CPUID;
60 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
61
62 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
66 intid = val & ICH_LR_VIRTUAL_ID_MASK; 63 intid = val & ICH_LR_VIRTUAL_ID_MASK;
67 else 64 } else {
68 intid = val & GICH_LR_VIRTUALID; 65 intid = val & GICH_LR_VIRTUALID;
66 is_v2_sgi = vgic_irq_is_sgi(intid);
67 }
69 68
70 /* Notify fds when the guest EOI'ed a level-triggered IRQ */ 69 /* Notify fds when the guest EOI'ed a level-triggered IRQ */
71 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) 70 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
@@ -81,18 +80,16 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
81 /* Always preserve the active bit */ 80 /* Always preserve the active bit */
82 irq->active = !!(val & ICH_LR_ACTIVE_BIT); 81 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
83 82
83 if (irq->active && is_v2_sgi)
84 irq->active_source = cpuid;
85
84 /* Edge is the only case where we preserve the pending bit */ 86 /* Edge is the only case where we preserve the pending bit */
85 if (irq->config == VGIC_CONFIG_EDGE && 87 if (irq->config == VGIC_CONFIG_EDGE &&
86 (val & ICH_LR_PENDING_BIT)) { 88 (val & ICH_LR_PENDING_BIT)) {
87 irq->pending_latch = true; 89 irq->pending_latch = true;
88 90
89 if (vgic_irq_is_sgi(intid) && 91 if (is_v2_sgi)
90 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
91 u32 cpuid = val & GICH_LR_PHYSID_CPUID;
92
93 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
94 irq->source |= (1 << cpuid); 92 irq->source |= (1 << cpuid);
95 }
96 } 93 }
97 94
98 /* 95 /*
@@ -133,10 +130,20 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
133{ 130{
134 u32 model = vcpu->kvm->arch.vgic.vgic_model; 131 u32 model = vcpu->kvm->arch.vgic.vgic_model;
135 u64 val = irq->intid; 132 u64 val = irq->intid;
136 bool allow_pending = true; 133 bool allow_pending = true, is_v2_sgi;
137 134
138 if (irq->active) 135 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
136 model == KVM_DEV_TYPE_ARM_VGIC_V2);
137
138 if (irq->active) {
139 val |= ICH_LR_ACTIVE_BIT; 139 val |= ICH_LR_ACTIVE_BIT;
140 if (is_v2_sgi)
141 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
142 if (vgic_irq_is_multi_sgi(irq)) {
143 allow_pending = false;
144 val |= ICH_LR_EOI;
145 }
146 }
140 147
141 if (irq->hw) { 148 if (irq->hw) {
142 val |= ICH_LR_HW; 149 val |= ICH_LR_HW;
@@ -174,8 +181,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
174 BUG_ON(!src); 181 BUG_ON(!src);
175 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 182 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
176 irq->source &= ~(1 << (src - 1)); 183 irq->source &= ~(1 << (src - 1));
177 if (irq->source) 184 if (irq->source) {
178 irq->pending_latch = true; 185 irq->pending_latch = true;
186 val |= ICH_LR_EOI;
187 }
179 } 188 }
180 } 189 }
181 190
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 4b6d72939c42..568c65f852e1 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -719,14 +719,6 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
719 vgic_v3_set_underflow(vcpu); 719 vgic_v3_set_underflow(vcpu);
720} 720}
721 721
722static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
723{
724 if (kvm_vgic_global_state.type == VGIC_V2)
725 vgic_v2_set_npie(vcpu);
726 else
727 vgic_v3_set_npie(vcpu);
728}
729
730/* Requires the ap_list_lock to be held. */ 722/* Requires the ap_list_lock to be held. */
731static int compute_ap_list_depth(struct kvm_vcpu *vcpu, 723static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
732 bool *multi_sgi) 724 bool *multi_sgi)
@@ -740,17 +732,15 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
740 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); 732 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
741 733
742 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 734 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
735 int w;
736
743 spin_lock(&irq->irq_lock); 737 spin_lock(&irq->irq_lock);
744 /* GICv2 SGIs can count for more than one... */ 738 /* GICv2 SGIs can count for more than one... */
745 if (vgic_irq_is_sgi(irq->intid) && irq->source) { 739 w = vgic_irq_get_lr_count(irq);
746 int w = hweight8(irq->source);
747
748 count += w;
749 *multi_sgi |= (w > 1);
750 } else {
751 count++;
752 }
753 spin_unlock(&irq->irq_lock); 740 spin_unlock(&irq->irq_lock);
741
742 count += w;
743 *multi_sgi |= (w > 1);
754 } 744 }
755 return count; 745 return count;
756} 746}
@@ -761,7 +751,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
761 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 751 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
762 struct vgic_irq *irq; 752 struct vgic_irq *irq;
763 int count; 753 int count;
764 bool npie = false;
765 bool multi_sgi; 754 bool multi_sgi;
766 u8 prio = 0xff; 755 u8 prio = 0xff;
767 756
@@ -791,10 +780,8 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
791 if (likely(vgic_target_oracle(irq) == vcpu)) { 780 if (likely(vgic_target_oracle(irq) == vcpu)) {
792 vgic_populate_lr(vcpu, irq, count++); 781 vgic_populate_lr(vcpu, irq, count++);
793 782
794 if (irq->source) { 783 if (irq->source)
795 npie = true;
796 prio = irq->priority; 784 prio = irq->priority;
797 }
798 } 785 }
799 786
800 spin_unlock(&irq->irq_lock); 787 spin_unlock(&irq->irq_lock);
@@ -807,9 +794,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
807 } 794 }
808 } 795 }
809 796
810 if (npie)
811 vgic_set_npie(vcpu);
812
813 vcpu->arch.vgic_cpu.used_lrs = count; 797 vcpu->arch.vgic_cpu.used_lrs = count;
814 798
815 /* Nuke remaining LRs */ 799 /* Nuke remaining LRs */
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 830e815748a0..32c25d42c93f 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -110,6 +110,20 @@ static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
110 return irq->config == VGIC_CONFIG_LEVEL && irq->hw; 110 return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
111} 111}
112 112
113static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
114{
115 /* Account for the active state as an interrupt */
116 if (vgic_irq_is_sgi(irq->intid) && irq->source)
117 return hweight8(irq->source) + irq->active;
118
119 return irq_is_pending(irq) || irq->active;
120}
121
122static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
123{
124 return vgic_irq_get_lr_count(irq) > 1;
125}
126
113/* 127/*
114 * This struct provides an intermediate representation of the fields contained 128 * This struct provides an intermediate representation of the fields contained
115 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC 129 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC