aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/eventfd.c4
-rw-r--r--virt/kvm/irq_comm.c34
-rw-r--r--virt/kvm/kvm_main.c16
3 files changed, 39 insertions, 15 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 314012323afe..4092b8dcd510 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -57,10 +57,10 @@ irqfd_inject(struct work_struct *work)
57 struct _irqfd *irqfd = container_of(work, struct _irqfd, inject); 57 struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
58 struct kvm *kvm = irqfd->kvm; 58 struct kvm *kvm = irqfd->kvm;
59 59
60 mutex_lock(&kvm->lock); 60 mutex_lock(&kvm->irq_lock);
61 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); 61 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1);
62 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); 62 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0);
63 mutex_unlock(&kvm->lock); 63 mutex_unlock(&kvm->irq_lock);
64} 64}
65 65
66/* 66/*
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index ddc17f0e2f35..08a9a49481b2 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -62,6 +62,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
62 int i, r = -1; 62 int i, r = -1;
63 struct kvm_vcpu *vcpu, *lowest = NULL; 63 struct kvm_vcpu *vcpu, *lowest = NULL;
64 64
65 WARN_ON(!mutex_is_locked(&kvm->irq_lock));
66
65 if (irq->dest_mode == 0 && irq->dest_id == 0xff && 67 if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
66 kvm_is_dm_lowest_prio(irq)) 68 kvm_is_dm_lowest_prio(irq))
67 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); 69 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
@@ -113,7 +115,7 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
113 return kvm_irq_delivery_to_apic(kvm, NULL, &irq); 115 return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
114} 116}
115 117
116/* This should be called with the kvm->lock mutex held 118/* This should be called with the kvm->irq_lock mutex held
117 * Return value: 119 * Return value:
118 * < 0 Interrupt was ignored (masked or not delivered for other reasons) 120 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
119 * = 0 Interrupt was coalesced (previous irq is still pending) 121 * = 0 Interrupt was coalesced (previous irq is still pending)
@@ -125,6 +127,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
125 unsigned long *irq_state, sig_level; 127 unsigned long *irq_state, sig_level;
126 int ret = -1; 128 int ret = -1;
127 129
130 WARN_ON(!mutex_is_locked(&kvm->irq_lock));
131
128 if (irq < KVM_IOAPIC_NUM_PINS) { 132 if (irq < KVM_IOAPIC_NUM_PINS) {
129 irq_state = (unsigned long *)&kvm->arch.irq_states[irq]; 133 irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
130 134
@@ -175,19 +179,26 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
175void kvm_register_irq_ack_notifier(struct kvm *kvm, 179void kvm_register_irq_ack_notifier(struct kvm *kvm,
176 struct kvm_irq_ack_notifier *kian) 180 struct kvm_irq_ack_notifier *kian)
177{ 181{
182 mutex_lock(&kvm->irq_lock);
178 hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list); 183 hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
184 mutex_unlock(&kvm->irq_lock);
179} 185}
180 186
181void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian) 187void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
188 struct kvm_irq_ack_notifier *kian)
182{ 189{
190 mutex_lock(&kvm->irq_lock);
183 hlist_del_init(&kian->link); 191 hlist_del_init(&kian->link);
192 mutex_unlock(&kvm->irq_lock);
184} 193}
185 194
186/* The caller must hold kvm->lock mutex */
187int kvm_request_irq_source_id(struct kvm *kvm) 195int kvm_request_irq_source_id(struct kvm *kvm)
188{ 196{
189 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; 197 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
190 int irq_source_id = find_first_zero_bit(bitmap, 198 int irq_source_id;
199
200 mutex_lock(&kvm->irq_lock);
201 irq_source_id = find_first_zero_bit(bitmap,
191 sizeof(kvm->arch.irq_sources_bitmap)); 202 sizeof(kvm->arch.irq_sources_bitmap));
192 203
193 if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { 204 if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
@@ -197,6 +208,7 @@ int kvm_request_irq_source_id(struct kvm *kvm)
197 208
198 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); 209 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
199 set_bit(irq_source_id, bitmap); 210 set_bit(irq_source_id, bitmap);
211 mutex_unlock(&kvm->irq_lock);
200 212
201 return irq_source_id; 213 return irq_source_id;
202} 214}
@@ -207,6 +219,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
207 219
208 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); 220 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
209 221
222 mutex_lock(&kvm->irq_lock);
210 if (irq_source_id < 0 || 223 if (irq_source_id < 0 ||
211 irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { 224 irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
212 printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); 225 printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
@@ -215,19 +228,24 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
215 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) 228 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
216 clear_bit(irq_source_id, &kvm->arch.irq_states[i]); 229 clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
217 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); 230 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
231 mutex_unlock(&kvm->irq_lock);
218} 232}
219 233
220void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 234void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
221 struct kvm_irq_mask_notifier *kimn) 235 struct kvm_irq_mask_notifier *kimn)
222{ 236{
237 mutex_lock(&kvm->irq_lock);
223 kimn->irq = irq; 238 kimn->irq = irq;
224 hlist_add_head(&kimn->link, &kvm->mask_notifier_list); 239 hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
240 mutex_unlock(&kvm->irq_lock);
225} 241}
226 242
227void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 243void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
228 struct kvm_irq_mask_notifier *kimn) 244 struct kvm_irq_mask_notifier *kimn)
229{ 245{
246 mutex_lock(&kvm->irq_lock);
230 hlist_del(&kimn->link); 247 hlist_del(&kimn->link);
248 mutex_unlock(&kvm->irq_lock);
231} 249}
232 250
233void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) 251void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
@@ -235,6 +253,8 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
235 struct kvm_irq_mask_notifier *kimn; 253 struct kvm_irq_mask_notifier *kimn;
236 struct hlist_node *n; 254 struct hlist_node *n;
237 255
256 WARN_ON(!mutex_is_locked(&kvm->irq_lock));
257
238 hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link) 258 hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
239 if (kimn->irq == irq) 259 if (kimn->irq == irq)
240 kimn->func(kimn, mask); 260 kimn->func(kimn, mask);
@@ -250,7 +270,9 @@ static void __kvm_free_irq_routing(struct list_head *irq_routing)
250 270
251void kvm_free_irq_routing(struct kvm *kvm) 271void kvm_free_irq_routing(struct kvm *kvm)
252{ 272{
273 mutex_lock(&kvm->irq_lock);
253 __kvm_free_irq_routing(&kvm->irq_routing); 274 __kvm_free_irq_routing(&kvm->irq_routing);
275 mutex_unlock(&kvm->irq_lock);
254} 276}
255 277
256static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e, 278static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
@@ -325,13 +347,13 @@ int kvm_set_irq_routing(struct kvm *kvm,
325 e = NULL; 347 e = NULL;
326 } 348 }
327 349
328 mutex_lock(&kvm->lock); 350 mutex_lock(&kvm->irq_lock);
329 list_splice(&kvm->irq_routing, &tmp); 351 list_splice(&kvm->irq_routing, &tmp);
330 INIT_LIST_HEAD(&kvm->irq_routing); 352 INIT_LIST_HEAD(&kvm->irq_routing);
331 list_splice(&irq_list, &kvm->irq_routing); 353 list_splice(&irq_list, &kvm->irq_routing);
332 INIT_LIST_HEAD(&irq_list); 354 INIT_LIST_HEAD(&irq_list);
333 list_splice(&tmp, &irq_list); 355 list_splice(&tmp, &irq_list);
334 mutex_unlock(&kvm->lock); 356 mutex_unlock(&kvm->irq_lock);
335 357
336 r = 0; 358 r = 0;
337 359
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d47e660fb709..0d481b282448 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -62,6 +62,12 @@
62MODULE_AUTHOR("Qumranet"); 62MODULE_AUTHOR("Qumranet");
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64 64
65/*
66 * Ordering of locks:
67 *
68 * kvm->lock --> kvm->irq_lock
69 */
70
65DEFINE_SPINLOCK(kvm_lock); 71DEFINE_SPINLOCK(kvm_lock);
66LIST_HEAD(vm_list); 72LIST_HEAD(vm_list);
67 73
@@ -126,11 +132,7 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
126 interrupt_work); 132 interrupt_work);
127 kvm = assigned_dev->kvm; 133 kvm = assigned_dev->kvm;
128 134
129 /* This is taken to safely inject irq inside the guest. When 135 mutex_lock(&kvm->irq_lock);
130 * the interrupt injection (or the ioapic code) uses a
131 * finer-grained lock, update this
132 */
133 mutex_lock(&kvm->lock);
134 spin_lock_irq(&assigned_dev->assigned_dev_lock); 136 spin_lock_irq(&assigned_dev->assigned_dev_lock);
135 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 137 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
136 struct kvm_guest_msix_entry *guest_entries = 138 struct kvm_guest_msix_entry *guest_entries =
@@ -149,7 +151,7 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
149 assigned_dev->guest_irq, 1); 151 assigned_dev->guest_irq, 1);
150 152
151 spin_unlock_irq(&assigned_dev->assigned_dev_lock); 153 spin_unlock_irq(&assigned_dev->assigned_dev_lock);
152 mutex_unlock(&assigned_dev->kvm->lock); 154 mutex_unlock(&assigned_dev->kvm->irq_lock);
153} 155}
154 156
155static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) 157static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
@@ -207,7 +209,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
207static void deassign_guest_irq(struct kvm *kvm, 209static void deassign_guest_irq(struct kvm *kvm,
208 struct kvm_assigned_dev_kernel *assigned_dev) 210 struct kvm_assigned_dev_kernel *assigned_dev)
209{ 211{
210 kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier); 212 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
211 assigned_dev->ack_notifier.gsi = -1; 213 assigned_dev->ack_notifier.gsi = -1;
212 214
213 if (assigned_dev->irq_source_id != -1) 215 if (assigned_dev->irq_source_id != -1)