summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJulien Thierry <julien.thierry@arm.com>2019-01-07 10:06:15 -0500
committerChristoffer Dall <christoffer.dall@arm.com>2019-01-24 09:08:50 -0500
commit8fa3adb8c6beee4af079ac90b9575ab92951de3f (patch)
tree787ba19846fcb30c340008510ea12406533173cd
parent49a57857aeea06ca831043acbb0fa5e0f50602fd (diff)
KVM: arm/arm64: vgic: Make vgic_irq->irq_lock a raw_spinlock
vgic_irq->irq_lock must always be taken with interrupts disabled as it is used in interrupt context. For configurations such as PREEMPT_RT_FULL, this means that it should be a raw_spinlock since RT spinlocks are interruptible. Signed-off-by: Julien Thierry <julien.thierry@arm.com> Acked-by: Christoffer Dall <christoffer.dall@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
-rw-r--r--include/kvm/arm_vgic.h2
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c14
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c12
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c34
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic/vgic.c71
10 files changed, 83 insertions, 84 deletions
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 4f31f96bbfab..b5426052152e 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -100,7 +100,7 @@ enum vgic_irq_config {
100}; 100};
101 101
102struct vgic_irq { 102struct vgic_irq {
103 spinlock_t irq_lock; /* Protects the content of the struct */ 103 raw_spinlock_t irq_lock; /* Protects the content of the struct */
104 struct list_head lpi_list; /* Used to link all LPIs together */ 104 struct list_head lpi_list; /* Used to link all LPIs together */
105 struct list_head ap_list; 105 struct list_head ap_list;
106 106
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index 07aa900bac56..1f62f2b8065d 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
251 return 0; 251 return 0;
252 } 252 }
253 253
254 spin_lock_irqsave(&irq->irq_lock, flags); 254 raw_spin_lock_irqsave(&irq->irq_lock, flags);
255 print_irq_state(s, irq, vcpu); 255 print_irq_state(s, irq, vcpu);
256 spin_unlock_irqrestore(&irq->irq_lock, flags); 256 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
257 257
258 vgic_put_irq(kvm, irq); 258 vgic_put_irq(kvm, irq);
259 return 0; 259 return 0;
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index c0c0b88af1d5..1128e97406cf 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
171 171
172 irq->intid = i + VGIC_NR_PRIVATE_IRQS; 172 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
173 INIT_LIST_HEAD(&irq->ap_list); 173 INIT_LIST_HEAD(&irq->ap_list);
174 spin_lock_init(&irq->irq_lock); 174 raw_spin_lock_init(&irq->irq_lock);
175 irq->vcpu = NULL; 175 irq->vcpu = NULL;
176 irq->target_vcpu = vcpu0; 176 irq->target_vcpu = vcpu0;
177 kref_init(&irq->refcount); 177 kref_init(&irq->refcount);
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 216 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
217 217
218 INIT_LIST_HEAD(&irq->ap_list); 218 INIT_LIST_HEAD(&irq->ap_list);
219 spin_lock_init(&irq->irq_lock); 219 raw_spin_lock_init(&irq->irq_lock);
220 irq->intid = i; 220 irq->intid = i;
221 irq->vcpu = NULL; 221 irq->vcpu = NULL;
222 irq->target_vcpu = vcpu; 222 irq->target_vcpu = vcpu;
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index eb2a390a6c86..911ba61505ee 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
65 65
66 INIT_LIST_HEAD(&irq->lpi_list); 66 INIT_LIST_HEAD(&irq->lpi_list);
67 INIT_LIST_HEAD(&irq->ap_list); 67 INIT_LIST_HEAD(&irq->ap_list);
68 spin_lock_init(&irq->irq_lock); 68 raw_spin_lock_init(&irq->irq_lock);
69 69
70 irq->config = VGIC_CONFIG_EDGE; 70 irq->config = VGIC_CONFIG_EDGE;
71 kref_init(&irq->refcount); 71 kref_init(&irq->refcount);
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
287 if (ret) 287 if (ret)
288 return ret; 288 return ret;
289 289
290 spin_lock_irqsave(&irq->irq_lock, flags); 290 raw_spin_lock_irqsave(&irq->irq_lock, flags);
291 291
292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { 292 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
293 irq->priority = LPI_PROP_PRIORITY(prop); 293 irq->priority = LPI_PROP_PRIORITY(prop);
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
299 } 299 }
300 } 300 }
301 301
302 spin_unlock_irqrestore(&irq->irq_lock, flags); 302 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
303 303
304 if (irq->hw) 304 if (irq->hw)
305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); 305 return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
352 int ret = 0; 352 int ret = 0;
353 unsigned long flags; 353 unsigned long flags;
354 354
355 spin_lock_irqsave(&irq->irq_lock, flags); 355 raw_spin_lock_irqsave(&irq->irq_lock, flags);
356 irq->target_vcpu = vcpu; 356 irq->target_vcpu = vcpu;
357 spin_unlock_irqrestore(&irq->irq_lock, flags); 357 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
358 358
359 if (irq->hw) { 359 if (irq->hw) {
360 struct its_vlpi_map map; 360 struct its_vlpi_map map;
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
455 } 455 }
456 456
457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); 457 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
458 spin_lock_irqsave(&irq->irq_lock, flags); 458 raw_spin_lock_irqsave(&irq->irq_lock, flags);
459 irq->pending_latch = pendmask & (1U << bit_nr); 459 irq->pending_latch = pendmask & (1U << bit_nr);
460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 460 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
461 vgic_put_irq(vcpu->kvm, irq); 461 vgic_put_irq(vcpu->kvm, irq);
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
612 return irq_set_irqchip_state(irq->host_irq, 612 return irq_set_irqchip_state(irq->host_irq,
613 IRQCHIP_STATE_PENDING, true); 613 IRQCHIP_STATE_PENDING, true);
614 614
615 spin_lock_irqsave(&irq->irq_lock, flags); 615 raw_spin_lock_irqsave(&irq->irq_lock, flags);
616 irq->pending_latch = true; 616 irq->pending_latch = true;
617 vgic_queue_irq_unlock(kvm, irq, flags); 617 vgic_queue_irq_unlock(kvm, irq, flags);
618 618
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 738b65d2d0e7..b535fffc7400 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
147 147
148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); 148 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
149 149
150 spin_lock_irqsave(&irq->irq_lock, flags); 150 raw_spin_lock_irqsave(&irq->irq_lock, flags);
151 irq->pending_latch = true; 151 irq->pending_latch = true;
152 irq->source |= 1U << source_vcpu->vcpu_id; 152 irq->source |= 1U << source_vcpu->vcpu_id;
153 153
@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); 191 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
192 int target; 192 int target;
193 193
194 spin_lock_irqsave(&irq->irq_lock, flags); 194 raw_spin_lock_irqsave(&irq->irq_lock, flags);
195 195
196 irq->targets = (val >> (i * 8)) & cpu_mask; 196 irq->targets = (val >> (i * 8)) & cpu_mask;
197 target = irq->targets ? __ffs(irq->targets) : 0; 197 target = irq->targets ? __ffs(irq->targets) : 0;
198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); 198 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
199 199
200 spin_unlock_irqrestore(&irq->irq_lock, flags); 200 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
201 vgic_put_irq(vcpu->kvm, irq); 201 vgic_put_irq(vcpu->kvm, irq);
202 } 202 }
203} 203}
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
230 for (i = 0; i < len; i++) { 230 for (i = 0; i < len; i++) {
231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 231 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
232 232
233 spin_lock_irqsave(&irq->irq_lock, flags); 233 raw_spin_lock_irqsave(&irq->irq_lock, flags);
234 234
235 irq->source &= ~((val >> (i * 8)) & 0xff); 235 irq->source &= ~((val >> (i * 8)) & 0xff);
236 if (!irq->source) 236 if (!irq->source)
237 irq->pending_latch = false; 237 irq->pending_latch = false;
238 238
239 spin_unlock_irqrestore(&irq->irq_lock, flags); 239 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
240 vgic_put_irq(vcpu->kvm, irq); 240 vgic_put_irq(vcpu->kvm, irq);
241 } 241 }
242} 242}
@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
252 for (i = 0; i < len; i++) { 252 for (i = 0; i < len; i++) {
253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 253 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
254 254
255 spin_lock_irqsave(&irq->irq_lock, flags); 255 raw_spin_lock_irqsave(&irq->irq_lock, flags);
256 256
257 irq->source |= (val >> (i * 8)) & 0xff; 257 irq->source |= (val >> (i * 8)) & 0xff;
258 258
@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
260 irq->pending_latch = true; 260 irq->pending_latch = true;
261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 261 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
262 } else { 262 } else {
263 spin_unlock_irqrestore(&irq->irq_lock, flags); 263 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
264 } 264 }
265 vgic_put_irq(vcpu->kvm, irq); 265 vgic_put_irq(vcpu->kvm, irq);
266 } 266 }
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index b3d1f0985117..4a12322bf7df 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
169 if (!irq) 169 if (!irq)
170 return; 170 return;
171 171
172 spin_lock_irqsave(&irq->irq_lock, flags); 172 raw_spin_lock_irqsave(&irq->irq_lock, flags);
173 173
174 /* We only care about and preserve Aff0, Aff1 and Aff2. */ 174 /* We only care about and preserve Aff0, Aff1 and Aff2. */
175 irq->mpidr = val & GENMASK(23, 0); 175 irq->mpidr = val & GENMASK(23, 0);
176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); 176 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
177 177
178 spin_unlock_irqrestore(&irq->irq_lock, flags); 178 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
179 vgic_put_irq(vcpu->kvm, irq); 179 vgic_put_irq(vcpu->kvm, irq);
180} 180}
181 181
@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
281 for (i = 0; i < len * 8; i++) { 281 for (i = 0; i < len * 8; i++) {
282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 282 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
283 283
284 spin_lock_irqsave(&irq->irq_lock, flags); 284 raw_spin_lock_irqsave(&irq->irq_lock, flags);
285 if (test_bit(i, &val)) { 285 if (test_bit(i, &val)) {
286 /* 286 /*
287 * pending_latch is set irrespective of irq type 287 * pending_latch is set irrespective of irq type
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 292 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
293 } else { 293 } else {
294 irq->pending_latch = false; 294 irq->pending_latch = false;
295 spin_unlock_irqrestore(&irq->irq_lock, flags); 295 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
296 } 296 }
297 297
298 vgic_put_irq(vcpu->kvm, irq); 298 vgic_put_irq(vcpu->kvm, irq);
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
957 957
958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); 958 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
959 959
960 spin_lock_irqsave(&irq->irq_lock, flags); 960 raw_spin_lock_irqsave(&irq->irq_lock, flags);
961 961
962 /* 962 /*
963 * An access targetting Group0 SGIs can only generate 963 * An access targetting Group0 SGIs can only generate
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
968 irq->pending_latch = true; 968 irq->pending_latch = true;
969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 969 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
970 } else { 970 } else {
971 spin_unlock_irqrestore(&irq->irq_lock, flags); 971 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
972 } 972 }
973 973
974 vgic_put_irq(vcpu->kvm, irq); 974 vgic_put_irq(vcpu->kvm, irq);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index ceeda7e04a4d..7de42fba05b5 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
77 for (i = 0; i < len * 8; i++) { 77 for (i = 0; i < len * 8; i++) {
78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
79 79
80 spin_lock_irqsave(&irq->irq_lock, flags); 80 raw_spin_lock_irqsave(&irq->irq_lock, flags);
81 irq->group = !!(val & BIT(i)); 81 irq->group = !!(val & BIT(i));
82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
83 83
@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
120 for_each_set_bit(i, &val, len * 8) { 120 for_each_set_bit(i, &val, len * 8) {
121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
122 122
123 spin_lock_irqsave(&irq->irq_lock, flags); 123 raw_spin_lock_irqsave(&irq->irq_lock, flags);
124 irq->enabled = true; 124 irq->enabled = true;
125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
126 126
@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
139 for_each_set_bit(i, &val, len * 8) { 139 for_each_set_bit(i, &val, len * 8) {
140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
141 141
142 spin_lock_irqsave(&irq->irq_lock, flags); 142 raw_spin_lock_irqsave(&irq->irq_lock, flags);
143 143
144 irq->enabled = false; 144 irq->enabled = false;
145 145
146 spin_unlock_irqrestore(&irq->irq_lock, flags); 146 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
147 vgic_put_irq(vcpu->kvm, irq); 147 vgic_put_irq(vcpu->kvm, irq);
148 } 148 }
149} 149}
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
161 unsigned long flags; 161 unsigned long flags;
162 162
163 spin_lock_irqsave(&irq->irq_lock, flags); 163 raw_spin_lock_irqsave(&irq->irq_lock, flags);
164 if (irq_is_pending(irq)) 164 if (irq_is_pending(irq))
165 value |= (1U << i); 165 value |= (1U << i);
166 spin_unlock_irqrestore(&irq->irq_lock, flags); 166 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
167 167
168 vgic_put_irq(vcpu->kvm, irq); 168 vgic_put_irq(vcpu->kvm, irq);
169 } 169 }
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
215 for_each_set_bit(i, &val, len * 8) { 215 for_each_set_bit(i, &val, len * 8) {
216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
217 217
218 spin_lock_irqsave(&irq->irq_lock, flags); 218 raw_spin_lock_irqsave(&irq->irq_lock, flags);
219 if (irq->hw) 219 if (irq->hw)
220 vgic_hw_irq_spending(vcpu, irq, is_uaccess); 220 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
221 else 221 else
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
262 for_each_set_bit(i, &val, len * 8) { 262 for_each_set_bit(i, &val, len * 8) {
263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
264 264
265 spin_lock_irqsave(&irq->irq_lock, flags); 265 raw_spin_lock_irqsave(&irq->irq_lock, flags);
266 266
267 if (irq->hw) 267 if (irq->hw)
268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess); 268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
269 else 269 else
270 irq->pending_latch = false; 270 irq->pending_latch = false;
271 271
272 spin_unlock_irqrestore(&irq->irq_lock, flags); 272 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
273 vgic_put_irq(vcpu->kvm, irq); 273 vgic_put_irq(vcpu->kvm, irq);
274 } 274 }
275} 275}
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
311 unsigned long flags; 311 unsigned long flags;
312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); 312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
313 313
314 spin_lock_irqsave(&irq->irq_lock, flags); 314 raw_spin_lock_irqsave(&irq->irq_lock, flags);
315 315
316 if (irq->hw) { 316 if (irq->hw) {
317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); 317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
342 if (irq->active) 342 if (irq->active)
343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 343 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
344 else 344 else
345 spin_unlock_irqrestore(&irq->irq_lock, flags); 345 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
346} 346}
347 347
348/* 348/*
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
485 for (i = 0; i < len; i++) { 485 for (i = 0; i < len; i++) {
486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 486 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
487 487
488 spin_lock_irqsave(&irq->irq_lock, flags); 488 raw_spin_lock_irqsave(&irq->irq_lock, flags);
489 /* Narrow the priority range to what we actually support */ 489 /* Narrow the priority range to what we actually support */
490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); 490 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
491 spin_unlock_irqrestore(&irq->irq_lock, flags); 491 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
492 492
493 vgic_put_irq(vcpu->kvm, irq); 493 vgic_put_irq(vcpu->kvm, irq);
494 } 494 }
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
534 continue; 534 continue;
535 535
536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 536 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
537 spin_lock_irqsave(&irq->irq_lock, flags); 537 raw_spin_lock_irqsave(&irq->irq_lock, flags);
538 538
539 if (test_bit(i * 2 + 1, &val)) 539 if (test_bit(i * 2 + 1, &val))
540 irq->config = VGIC_CONFIG_EDGE; 540 irq->config = VGIC_CONFIG_EDGE;
541 else 541 else
542 irq->config = VGIC_CONFIG_LEVEL; 542 irq->config = VGIC_CONFIG_LEVEL;
543 543
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 544 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 545 vgic_put_irq(vcpu->kvm, irq);
546 } 546 }
547} 547}
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
590 * restore irq config before line level. 590 * restore irq config before line level.
591 */ 591 */
592 new_level = !!(val & (1U << i)); 592 new_level = !!(val & (1U << i));
593 spin_lock_irqsave(&irq->irq_lock, flags); 593 raw_spin_lock_irqsave(&irq->irq_lock, flags);
594 irq->line_level = new_level; 594 irq->line_level = new_level;
595 if (new_level) 595 if (new_level)
596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); 596 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
597 else 597 else
598 spin_unlock_irqrestore(&irq->irq_lock, flags); 598 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
599 599
600 vgic_put_irq(vcpu->kvm, irq); 600 vgic_put_irq(vcpu->kvm, irq);
601 } 601 }
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 69b892abd7dc..d91a8938aa7c 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
84 84
85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 85 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
86 86
87 spin_lock(&irq->irq_lock); 87 raw_spin_lock(&irq->irq_lock);
88 88
89 /* Always preserve the active bit */ 89 /* Always preserve the active bit */
90 irq->active = !!(val & GICH_LR_ACTIVE_BIT); 90 irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
127 vgic_irq_set_phys_active(irq, false); 127 vgic_irq_set_phys_active(irq, false);
128 } 128 }
129 129
130 spin_unlock(&irq->irq_lock); 130 raw_spin_unlock(&irq->irq_lock);
131 vgic_put_irq(vcpu->kvm, irq); 131 vgic_put_irq(vcpu->kvm, irq);
132 } 132 }
133 133
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9c0dd234ebe8..4ee0aeb9a905 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
76 if (!irq) /* An LPI could have been unmapped. */ 76 if (!irq) /* An LPI could have been unmapped. */
77 continue; 77 continue;
78 78
79 spin_lock(&irq->irq_lock); 79 raw_spin_lock(&irq->irq_lock);
80 80
81 /* Always preserve the active bit */ 81 /* Always preserve the active bit */
82 irq->active = !!(val & ICH_LR_ACTIVE_BIT); 82 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
119 vgic_irq_set_phys_active(irq, false); 119 vgic_irq_set_phys_active(irq, false);
120 } 120 }
121 121
122 spin_unlock(&irq->irq_lock); 122 raw_spin_unlock(&irq->irq_lock);
123 vgic_put_irq(vcpu->kvm, irq); 123 vgic_put_irq(vcpu->kvm, irq);
124 } 124 }
125 125
@@ -347,9 +347,9 @@ retry:
347 347
348 status = val & (1 << bit_nr); 348 status = val & (1 << bit_nr);
349 349
350 spin_lock_irqsave(&irq->irq_lock, flags); 350 raw_spin_lock_irqsave(&irq->irq_lock, flags);
351 if (irq->target_vcpu != vcpu) { 351 if (irq->target_vcpu != vcpu) {
352 spin_unlock_irqrestore(&irq->irq_lock, flags); 352 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
353 goto retry; 353 goto retry;
354 } 354 }
355 irq->pending_latch = status; 355 irq->pending_latch = status;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 870b1185173b..bc36f2e68f5a 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
244 bool penda, pendb; 244 bool penda, pendb;
245 int ret; 245 int ret;
246 246
247 spin_lock(&irqa->irq_lock); 247 raw_spin_lock(&irqa->irq_lock);
248 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); 248 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
249 249
250 if (irqa->active || irqb->active) { 250 if (irqa->active || irqb->active) {
251 ret = (int)irqb->active - (int)irqa->active; 251 ret = (int)irqb->active - (int)irqa->active;
@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
263 /* Both pending and enabled, sort by priority */ 263 /* Both pending and enabled, sort by priority */
264 ret = irqa->priority - irqb->priority; 264 ret = irqa->priority - irqb->priority;
265out: 265out:
266 spin_unlock(&irqb->irq_lock); 266 raw_spin_unlock(&irqb->irq_lock);
267 spin_unlock(&irqa->irq_lock); 267 raw_spin_unlock(&irqa->irq_lock);
268 return ret; 268 return ret;
269} 269}
270 270
@@ -325,7 +325,7 @@ retry:
325 * not need to be inserted into an ap_list and there is also 325 * not need to be inserted into an ap_list and there is also
326 * no more work for us to do. 326 * no more work for us to do.
327 */ 327 */
328 spin_unlock_irqrestore(&irq->irq_lock, flags); 328 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
329 329
330 /* 330 /*
331 * We have to kick the VCPU here, because we could be 331 * We have to kick the VCPU here, because we could be
@@ -347,12 +347,12 @@ retry:
347 * We must unlock the irq lock to take the ap_list_lock where 347 * We must unlock the irq lock to take the ap_list_lock where
348 * we are going to insert this new pending interrupt. 348 * we are going to insert this new pending interrupt.
349 */ 349 */
350 spin_unlock_irqrestore(&irq->irq_lock, flags); 350 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
351 351
352 /* someone can do stuff here, which we re-check below */ 352 /* someone can do stuff here, which we re-check below */
353 353
354 spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 354 spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
355 spin_lock(&irq->irq_lock); 355 raw_spin_lock(&irq->irq_lock);
356 356
357 /* 357 /*
358 * Did something change behind our backs? 358 * Did something change behind our backs?
@@ -367,10 +367,10 @@ retry:
367 */ 367 */
368 368
369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { 369 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
370 spin_unlock(&irq->irq_lock); 370 raw_spin_unlock(&irq->irq_lock);
371 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 371 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
372 372
373 spin_lock_irqsave(&irq->irq_lock, flags); 373 raw_spin_lock_irqsave(&irq->irq_lock, flags);
374 goto retry; 374 goto retry;
375 } 375 }
376 376
@@ -382,7 +382,7 @@ retry:
382 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); 382 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
383 irq->vcpu = vcpu; 383 irq->vcpu = vcpu;
384 384
385 spin_unlock(&irq->irq_lock); 385 raw_spin_unlock(&irq->irq_lock);
386 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 386 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
387 387
388 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 388 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
@@ -430,11 +430,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
430 if (!irq) 430 if (!irq)
431 return -EINVAL; 431 return -EINVAL;
432 432
433 spin_lock_irqsave(&irq->irq_lock, flags); 433 raw_spin_lock_irqsave(&irq->irq_lock, flags);
434 434
435 if (!vgic_validate_injection(irq, level, owner)) { 435 if (!vgic_validate_injection(irq, level, owner)) {
436 /* Nothing to see here, move along... */ 436 /* Nothing to see here, move along... */
437 spin_unlock_irqrestore(&irq->irq_lock, flags); 437 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
438 vgic_put_irq(kvm, irq); 438 vgic_put_irq(kvm, irq);
439 return 0; 439 return 0;
440 } 440 }
@@ -494,9 +494,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
494 494
495 BUG_ON(!irq); 495 BUG_ON(!irq);
496 496
497 spin_lock_irqsave(&irq->irq_lock, flags); 497 raw_spin_lock_irqsave(&irq->irq_lock, flags);
498 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); 498 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
499 spin_unlock_irqrestore(&irq->irq_lock, flags); 499 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
500 vgic_put_irq(vcpu->kvm, irq); 500 vgic_put_irq(vcpu->kvm, irq);
501 501
502 return ret; 502 return ret;
@@ -519,11 +519,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
519 if (!irq->hw) 519 if (!irq->hw)
520 goto out; 520 goto out;
521 521
522 spin_lock_irqsave(&irq->irq_lock, flags); 522 raw_spin_lock_irqsave(&irq->irq_lock, flags);
523 irq->active = false; 523 irq->active = false;
524 irq->pending_latch = false; 524 irq->pending_latch = false;
525 irq->line_level = false; 525 irq->line_level = false;
526 spin_unlock_irqrestore(&irq->irq_lock, flags); 526 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
527out: 527out:
528 vgic_put_irq(vcpu->kvm, irq); 528 vgic_put_irq(vcpu->kvm, irq);
529} 529}
@@ -539,9 +539,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
539 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 539 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
540 BUG_ON(!irq); 540 BUG_ON(!irq);
541 541
542 spin_lock_irqsave(&irq->irq_lock, flags); 542 raw_spin_lock_irqsave(&irq->irq_lock, flags);
543 kvm_vgic_unmap_irq(irq); 543 kvm_vgic_unmap_irq(irq);
544 spin_unlock_irqrestore(&irq->irq_lock, flags); 544 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545 vgic_put_irq(vcpu->kvm, irq); 545 vgic_put_irq(vcpu->kvm, irq);
546 546
547 return 0; 547 return 0;
@@ -571,12 +571,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
571 return -EINVAL; 571 return -EINVAL;
572 572
573 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 573 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
574 spin_lock_irqsave(&irq->irq_lock, flags); 574 raw_spin_lock_irqsave(&irq->irq_lock, flags);
575 if (irq->owner && irq->owner != owner) 575 if (irq->owner && irq->owner != owner)
576 ret = -EEXIST; 576 ret = -EEXIST;
577 else 577 else
578 irq->owner = owner; 578 irq->owner = owner;
579 spin_unlock_irqrestore(&irq->irq_lock, flags); 579 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
580 580
581 return ret; 581 return ret;
582} 582}
@@ -603,7 +603,7 @@ retry:
603 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 603 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
604 bool target_vcpu_needs_kick = false; 604 bool target_vcpu_needs_kick = false;
605 605
606 spin_lock(&irq->irq_lock); 606 raw_spin_lock(&irq->irq_lock);
607 607
608 BUG_ON(vcpu != irq->vcpu); 608 BUG_ON(vcpu != irq->vcpu);
609 609
@@ -616,7 +616,7 @@ retry:
616 */ 616 */
617 list_del(&irq->ap_list); 617 list_del(&irq->ap_list);
618 irq->vcpu = NULL; 618 irq->vcpu = NULL;
619 spin_unlock(&irq->irq_lock); 619 raw_spin_unlock(&irq->irq_lock);
620 620
621 /* 621 /*
622 * This vgic_put_irq call matches the 622 * This vgic_put_irq call matches the
@@ -631,13 +631,13 @@ retry:
631 631
632 if (target_vcpu == vcpu) { 632 if (target_vcpu == vcpu) {
633 /* We're on the right CPU */ 633 /* We're on the right CPU */
634 spin_unlock(&irq->irq_lock); 634 raw_spin_unlock(&irq->irq_lock);
635 continue; 635 continue;
636 } 636 }
637 637
638 /* This interrupt looks like it has to be migrated. */ 638 /* This interrupt looks like it has to be migrated. */
639 639
640 spin_unlock(&irq->irq_lock); 640 raw_spin_unlock(&irq->irq_lock);
641 spin_unlock(&vgic_cpu->ap_list_lock); 641 spin_unlock(&vgic_cpu->ap_list_lock);
642 642
643 /* 643 /*
@@ -655,7 +655,7 @@ retry:
655 spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); 655 spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
656 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, 656 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
657 SINGLE_DEPTH_NESTING); 657 SINGLE_DEPTH_NESTING);
658 spin_lock(&irq->irq_lock); 658 raw_spin_lock(&irq->irq_lock);
659 659
660 /* 660 /*
661 * If the affinity has been preserved, move the 661 * If the affinity has been preserved, move the
@@ -675,7 +675,7 @@ retry:
675 target_vcpu_needs_kick = true; 675 target_vcpu_needs_kick = true;
676 } 676 }
677 677
678 spin_unlock(&irq->irq_lock); 678 raw_spin_unlock(&irq->irq_lock);
679 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 679 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
680 spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); 680 spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
681 681
@@ -741,10 +741,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
741 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 741 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
742 int w; 742 int w;
743 743
744 spin_lock(&irq->irq_lock); 744 raw_spin_lock(&irq->irq_lock);
745 /* GICv2 SGIs can count for more than one... */ 745 /* GICv2 SGIs can count for more than one... */
746 w = vgic_irq_get_lr_count(irq); 746 w = vgic_irq_get_lr_count(irq);
747 spin_unlock(&irq->irq_lock); 747 raw_spin_unlock(&irq->irq_lock);
748 748
749 count += w; 749 count += w;
750 *multi_sgi |= (w > 1); 750 *multi_sgi |= (w > 1);
@@ -770,7 +770,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
770 count = 0; 770 count = 0;
771 771
772 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 772 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
773 spin_lock(&irq->irq_lock); 773 raw_spin_lock(&irq->irq_lock);
774 774
775 /* 775 /*
776 * If we have multi-SGIs in the pipeline, we need to 776 * If we have multi-SGIs in the pipeline, we need to
@@ -780,7 +780,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
780 * the AP list has been sorted already. 780 * the AP list has been sorted already.
781 */ 781 */
782 if (multi_sgi && irq->priority > prio) { 782 if (multi_sgi && irq->priority > prio) {
783 spin_unlock(&irq->irq_lock); 783 _raw_spin_unlock(&irq->irq_lock);
784 break; 784 break;
785 } 785 }
786 786
@@ -791,7 +791,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
791 prio = irq->priority; 791 prio = irq->priority;
792 } 792 }
793 793
794 spin_unlock(&irq->irq_lock); 794 raw_spin_unlock(&irq->irq_lock);
795 795
796 if (count == kvm_vgic_global_state.nr_lr) { 796 if (count == kvm_vgic_global_state.nr_lr) {
797 if (!list_is_last(&irq->ap_list, 797 if (!list_is_last(&irq->ap_list,
@@ -921,11 +921,11 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
921 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); 921 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
922 922
923 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 923 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
924 spin_lock(&irq->irq_lock); 924 raw_spin_lock(&irq->irq_lock);
925 pending = irq_is_pending(irq) && irq->enabled && 925 pending = irq_is_pending(irq) && irq->enabled &&
926 !irq->active && 926 !irq->active &&
927 irq->priority < vmcr.pmr; 927 irq->priority < vmcr.pmr;
928 spin_unlock(&irq->irq_lock); 928 raw_spin_unlock(&irq->irq_lock);
929 929
930 if (pending) 930 if (pending)
931 break; 931 break;
@@ -963,11 +963,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
963 return false; 963 return false;
964 964
965 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 965 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
966 spin_lock_irqsave(&irq->irq_lock, flags); 966 raw_spin_lock_irqsave(&irq->irq_lock, flags);
967 map_is_active = irq->hw && irq->active; 967 map_is_active = irq->hw && irq->active;
968 spin_unlock_irqrestore(&irq->irq_lock, flags); 968 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
969 vgic_put_irq(vcpu->kvm, irq); 969 vgic_put_irq(vcpu->kvm, irq);
970 970
971 return map_is_active; 971 return map_is_active;
972} 972}
973