aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/irq_comm.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 59cf8dae0062..fb861dd956fc 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -159,7 +159,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
159 * IOAPIC. So set the bit in both. The guest will ignore 159 * IOAPIC. So set the bit in both. The guest will ignore
160 * writes to the unused one. 160 * writes to the unused one.
161 */ 161 */
162 irq_rt = kvm->irq_routing; 162 rcu_read_lock();
163 irq_rt = rcu_dereference(kvm->irq_routing);
163 if (irq < irq_rt->nr_rt_entries) 164 if (irq < irq_rt->nr_rt_entries)
164 hlist_for_each_entry(e, n, &irq_rt->map[irq], link) { 165 hlist_for_each_entry(e, n, &irq_rt->map[irq], link) {
165 int r = e->set(e, kvm, irq_source_id, level); 166 int r = e->set(e, kvm, irq_source_id, level);
@@ -168,6 +169,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
168 169
169 ret = r + ((ret < 0) ? 0 : ret); 170 ret = r + ((ret < 0) ? 0 : ret);
170 } 171 }
172 rcu_read_unlock();
171 return ret; 173 return ret;
172} 174}
173 175
@@ -179,7 +181,10 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
179 181
180 trace_kvm_ack_irq(irqchip, pin); 182 trace_kvm_ack_irq(irqchip, pin);
181 183
182 gsi = kvm->irq_routing->chip[irqchip][pin]; 184 rcu_read_lock();
185 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
186 rcu_read_unlock();
187
183 if (gsi != -1) 188 if (gsi != -1)
184 hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, 189 hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list,
185 link) 190 link)
@@ -279,9 +284,9 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
279 284
280void kvm_free_irq_routing(struct kvm *kvm) 285void kvm_free_irq_routing(struct kvm *kvm)
281{ 286{
282 mutex_lock(&kvm->irq_lock); 287 /* Called only during vm destruction. Nobody can use the pointer
288 at this stage */
283 kfree(kvm->irq_routing); 289 kfree(kvm->irq_routing);
284 mutex_unlock(&kvm->irq_lock);
285} 290}
286 291
287static int setup_routing_entry(struct kvm_irq_routing_table *rt, 292static int setup_routing_entry(struct kvm_irq_routing_table *rt,
@@ -387,8 +392,9 @@ int kvm_set_irq_routing(struct kvm *kvm,
387 392
388 mutex_lock(&kvm->irq_lock); 393 mutex_lock(&kvm->irq_lock);
389 old = kvm->irq_routing; 394 old = kvm->irq_routing;
390 kvm->irq_routing = new; 395 rcu_assign_pointer(kvm->irq_routing, new);
391 mutex_unlock(&kvm->irq_lock); 396 mutex_unlock(&kvm->irq_lock);
397 synchronize_rcu();
392 398
393 new = old; 399 new = old;
394 r = 0; 400 r = 0;