aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2009-08-24 04:54:24 -0400
committerAvi Kivity <avi@redhat.com>2009-12-03 02:32:08 -0500
commit280aa177dcd1edc718d8a92f17f235b783ec6307 (patch)
tree63597a9ffd8cf72571986584aee82c067b5b684d
parent136bdfeee7b5bc986fc94af3a40d7d13ea37bb95 (diff)
KVM: Convert irq notifiers lists to RCU locking
Use RCU locking for mask/ack notifiers lists. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--virt/kvm/irq_comm.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index f01972595938..6c946141dbcc 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -183,19 +183,19 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
183 183
184 rcu_read_lock(); 184 rcu_read_lock();
185 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 185 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
186 rcu_read_unlock();
187
188 if (gsi != -1) 186 if (gsi != -1)
189 hlist_for_each_entry(kian, n, &kvm->irq_ack_notifier_list, link) 187 hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list,
188 link)
190 if (kian->gsi == gsi) 189 if (kian->gsi == gsi)
191 kian->irq_acked(kian); 190 kian->irq_acked(kian);
191 rcu_read_unlock();
192} 192}
193 193
194void kvm_register_irq_ack_notifier(struct kvm *kvm, 194void kvm_register_irq_ack_notifier(struct kvm *kvm,
195 struct kvm_irq_ack_notifier *kian) 195 struct kvm_irq_ack_notifier *kian)
196{ 196{
197 mutex_lock(&kvm->irq_lock); 197 mutex_lock(&kvm->irq_lock);
198 hlist_add_head(&kian->link, &kvm->irq_ack_notifier_list); 198 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
199 mutex_unlock(&kvm->irq_lock); 199 mutex_unlock(&kvm->irq_lock);
200} 200}
201 201
@@ -203,8 +203,9 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
203 struct kvm_irq_ack_notifier *kian) 203 struct kvm_irq_ack_notifier *kian)
204{ 204{
205 mutex_lock(&kvm->irq_lock); 205 mutex_lock(&kvm->irq_lock);
206 hlist_del_init(&kian->link); 206 hlist_del_init_rcu(&kian->link);
207 mutex_unlock(&kvm->irq_lock); 207 mutex_unlock(&kvm->irq_lock);
208 synchronize_rcu();
208} 209}
209 210
210int kvm_request_irq_source_id(struct kvm *kvm) 211int kvm_request_irq_source_id(struct kvm *kvm)
@@ -257,7 +258,7 @@ void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
257{ 258{
258 mutex_lock(&kvm->irq_lock); 259 mutex_lock(&kvm->irq_lock);
259 kimn->irq = irq; 260 kimn->irq = irq;
260 hlist_add_head(&kimn->link, &kvm->mask_notifier_list); 261 hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
261 mutex_unlock(&kvm->irq_lock); 262 mutex_unlock(&kvm->irq_lock);
262} 263}
263 264
@@ -265,8 +266,9 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
265 struct kvm_irq_mask_notifier *kimn) 266 struct kvm_irq_mask_notifier *kimn)
266{ 267{
267 mutex_lock(&kvm->irq_lock); 268 mutex_lock(&kvm->irq_lock);
268 hlist_del(&kimn->link); 269 hlist_del_rcu(&kimn->link);
269 mutex_unlock(&kvm->irq_lock); 270 mutex_unlock(&kvm->irq_lock);
271 synchronize_rcu();
270} 272}
271 273
272void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) 274void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
@@ -274,11 +276,11 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
274 struct kvm_irq_mask_notifier *kimn; 276 struct kvm_irq_mask_notifier *kimn;
275 struct hlist_node *n; 277 struct hlist_node *n;
276 278
277 WARN_ON(!mutex_is_locked(&kvm->irq_lock)); 279 rcu_read_lock();
278 280 hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link)
279 hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
280 if (kimn->irq == irq) 281 if (kimn->irq == irq)
281 kimn->func(kimn, mask); 282 kimn->func(kimn, mask);
283 rcu_read_unlock();
282} 284}
283 285
284void kvm_free_irq_routing(struct kvm *kvm) 286void kvm_free_irq_routing(struct kvm *kvm)