aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-06-04 14:08:24 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:32:49 -0400
commitfa40a8214bb9bcae8d49c234c19d8b4a6c1f37ff (patch)
tree6449f27072f128a1c39faaaeef1787f754345aaf
parent60eead79ad8750f80384cbe48fc44edcc78a0305 (diff)
KVM: switch irq injection/acking data structures to irq_lock
Protect irq injection/acking data structures with a separate irq_lock mutex. This fixes the following deadlock: CPU A CPU B kvm_vm_ioctl_deassign_dev_irq() mutex_lock(&kvm->lock); worker_thread() -> kvm_deassign_irq() -> kvm_assigned_dev_interrupt_work_handler() -> deassign_host_irq() mutex_lock(&kvm->lock); -> cancel_work_sync() [blocked] [gleb: fix ia64 path] Reported-by: Alex Williamson <alex.williamson@hp.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/ia64/kvm/kvm-ia64.c4
-rw-r--r--arch/x86/kvm/i8254.c4
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/x86.c19
-rw-r--r--include/linux/kvm_host.h3
-rw-r--r--virt/kvm/eventfd.c4
-rw-r--r--virt/kvm/irq_comm.c34
-rw-r--r--virt/kvm/kvm_main.c16
8 files changed, 58 insertions, 30 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 319922137fdd..8dde36953af3 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1000,10 +1000,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
1000 goto out; 1000 goto out;
1001 if (irqchip_in_kernel(kvm)) { 1001 if (irqchip_in_kernel(kvm)) {
1002 __s32 status; 1002 __s32 status;
1003 mutex_lock(&kvm->lock); 1003 mutex_lock(&kvm->irq_lock);
1004 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1004 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1005 irq_event.irq, irq_event.level); 1005 irq_event.irq, irq_event.level);
1006 mutex_unlock(&kvm->lock); 1006 mutex_unlock(&kvm->irq_lock);
1007 if (ioctl == KVM_IRQ_LINE_STATUS) { 1007 if (ioctl == KVM_IRQ_LINE_STATUS) {
1008 irq_event.status = status; 1008 irq_event.status = status;
1009 if (copy_to_user(argp, &irq_event, 1009 if (copy_to_user(argp, &irq_event,
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 977af7ab8193..3837db65d33e 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -654,10 +654,10 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
654 struct kvm_vcpu *vcpu; 654 struct kvm_vcpu *vcpu;
655 int i; 655 int i;
656 656
657 mutex_lock(&kvm->lock); 657 mutex_lock(&kvm->irq_lock);
658 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); 658 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1);
659 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); 659 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0);
660 mutex_unlock(&kvm->lock); 660 mutex_unlock(&kvm->irq_lock);
661 661
662 /* 662 /*
663 * Provides NMI watchdog support via Virtual Wire mode. 663 * Provides NMI watchdog support via Virtual Wire mode.
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a23f42e550af..44f20cdb5709 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -424,7 +424,9 @@ static void apic_set_eoi(struct kvm_lapic *apic)
424 trigger_mode = IOAPIC_LEVEL_TRIG; 424 trigger_mode = IOAPIC_LEVEL_TRIG;
425 else 425 else
426 trigger_mode = IOAPIC_EDGE_TRIG; 426 trigger_mode = IOAPIC_EDGE_TRIG;
427 mutex_lock(&apic->vcpu->kvm->irq_lock);
427 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); 428 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
429 mutex_unlock(&apic->vcpu->kvm->irq_lock);
428} 430}
429 431
430static void apic_send_ipi(struct kvm_lapic *apic) 432static void apic_send_ipi(struct kvm_lapic *apic)
@@ -448,7 +450,9 @@ static void apic_send_ipi(struct kvm_lapic *apic)
448 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, 450 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
449 irq.vector); 451 irq.vector);
450 452
453 mutex_lock(&apic->vcpu->kvm->irq_lock);
451 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq); 454 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq);
455 mutex_unlock(&apic->vcpu->kvm->irq_lock);
452} 456}
453 457
454static u32 apic_get_tmcct(struct kvm_lapic *apic) 458static u32 apic_get_tmcct(struct kvm_lapic *apic)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2ad8c97f58cc..05cbe83c74e2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2136,10 +2136,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
2136 goto out; 2136 goto out;
2137 if (irqchip_in_kernel(kvm)) { 2137 if (irqchip_in_kernel(kvm)) {
2138 __s32 status; 2138 __s32 status;
2139 mutex_lock(&kvm->lock); 2139 mutex_lock(&kvm->irq_lock);
2140 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 2140 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2141 irq_event.irq, irq_event.level); 2141 irq_event.irq, irq_event.level);
2142 mutex_unlock(&kvm->lock); 2142 mutex_unlock(&kvm->irq_lock);
2143 if (ioctl == KVM_IRQ_LINE_STATUS) { 2143 if (ioctl == KVM_IRQ_LINE_STATUS) {
2144 irq_event.status = status; 2144 irq_event.status = status;
2145 if (copy_to_user(argp, &irq_event, 2145 if (copy_to_user(argp, &irq_event,
@@ -2385,12 +2385,11 @@ mmio:
2385 */ 2385 */
2386 mutex_lock(&vcpu->kvm->lock); 2386 mutex_lock(&vcpu->kvm->lock);
2387 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0); 2387 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
2388 mutex_unlock(&vcpu->kvm->lock);
2388 if (mmio_dev) { 2389 if (mmio_dev) {
2389 kvm_iodevice_read(mmio_dev, gpa, bytes, val); 2390 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
2390 mutex_unlock(&vcpu->kvm->lock);
2391 return X86EMUL_CONTINUE; 2391 return X86EMUL_CONTINUE;
2392 } 2392 }
2393 mutex_unlock(&vcpu->kvm->lock);
2394 2393
2395 vcpu->mmio_needed = 1; 2394 vcpu->mmio_needed = 1;
2396 vcpu->mmio_phys_addr = gpa; 2395 vcpu->mmio_phys_addr = gpa;
@@ -2440,12 +2439,11 @@ mmio:
2440 */ 2439 */
2441 mutex_lock(&vcpu->kvm->lock); 2440 mutex_lock(&vcpu->kvm->lock);
2442 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1); 2441 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
2442 mutex_unlock(&vcpu->kvm->lock);
2443 if (mmio_dev) { 2443 if (mmio_dev) {
2444 kvm_iodevice_write(mmio_dev, gpa, bytes, val); 2444 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
2445 mutex_unlock(&vcpu->kvm->lock);
2446 return X86EMUL_CONTINUE; 2445 return X86EMUL_CONTINUE;
2447 } 2446 }
2448 mutex_unlock(&vcpu->kvm->lock);
2449 2447
2450 vcpu->mmio_needed = 1; 2448 vcpu->mmio_needed = 1;
2451 vcpu->mmio_phys_addr = gpa; 2449 vcpu->mmio_phys_addr = gpa;
@@ -2768,7 +2766,6 @@ static void kernel_pio(struct kvm_io_device *pio_dev,
2768{ 2766{
2769 /* TODO: String I/O for in kernel device */ 2767 /* TODO: String I/O for in kernel device */
2770 2768
2771 mutex_lock(&vcpu->kvm->lock);
2772 if (vcpu->arch.pio.in) 2769 if (vcpu->arch.pio.in)
2773 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port, 2770 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2774 vcpu->arch.pio.size, 2771 vcpu->arch.pio.size,
@@ -2777,7 +2774,6 @@ static void kernel_pio(struct kvm_io_device *pio_dev,
2777 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port, 2774 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2778 vcpu->arch.pio.size, 2775 vcpu->arch.pio.size,
2779 pd); 2776 pd);
2780 mutex_unlock(&vcpu->kvm->lock);
2781} 2777}
2782 2778
2783static void pio_string_write(struct kvm_io_device *pio_dev, 2779static void pio_string_write(struct kvm_io_device *pio_dev,
@@ -2787,14 +2783,12 @@ static void pio_string_write(struct kvm_io_device *pio_dev,
2787 void *pd = vcpu->arch.pio_data; 2783 void *pd = vcpu->arch.pio_data;
2788 int i; 2784 int i;
2789 2785
2790 mutex_lock(&vcpu->kvm->lock);
2791 for (i = 0; i < io->cur_count; i++) { 2786 for (i = 0; i < io->cur_count; i++) {
2792 kvm_iodevice_write(pio_dev, io->port, 2787 kvm_iodevice_write(pio_dev, io->port,
2793 io->size, 2788 io->size,
2794 pd); 2789 pd);
2795 pd += io->size; 2790 pd += io->size;
2796 } 2791 }
2797 mutex_unlock(&vcpu->kvm->lock);
2798} 2792}
2799 2793
2800static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu, 2794static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
@@ -2831,7 +2825,9 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2831 val = kvm_register_read(vcpu, VCPU_REGS_RAX); 2825 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2832 memcpy(vcpu->arch.pio_data, &val, 4); 2826 memcpy(vcpu->arch.pio_data, &val, 4);
2833 2827
2828 mutex_lock(&vcpu->kvm->lock);
2834 pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in); 2829 pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
2830 mutex_unlock(&vcpu->kvm->lock);
2835 if (pio_dev) { 2831 if (pio_dev) {
2836 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data); 2832 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
2837 complete_pio(vcpu); 2833 complete_pio(vcpu);
@@ -2895,9 +2891,12 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2895 2891
2896 vcpu->arch.pio.guest_gva = address; 2892 vcpu->arch.pio.guest_gva = address;
2897 2893
2894 mutex_lock(&vcpu->kvm->lock);
2898 pio_dev = vcpu_find_pio_dev(vcpu, port, 2895 pio_dev = vcpu_find_pio_dev(vcpu, port,
2899 vcpu->arch.pio.cur_count, 2896 vcpu->arch.pio.cur_count,
2900 !vcpu->arch.pio.in); 2897 !vcpu->arch.pio.in);
2898 mutex_unlock(&vcpu->kvm->lock);
2899
2901 if (!vcpu->arch.pio.in) { 2900 if (!vcpu->arch.pio.in) {
2902 /* string PIO write */ 2901 /* string PIO write */
2903 ret = pio_copy_data(vcpu); 2902 ret = pio_copy_data(vcpu);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0c71688b1ee3..a29ea030dd8e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -371,7 +371,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
371void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 371void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
372void kvm_register_irq_ack_notifier(struct kvm *kvm, 372void kvm_register_irq_ack_notifier(struct kvm *kvm,
373 struct kvm_irq_ack_notifier *kian); 373 struct kvm_irq_ack_notifier *kian);
374void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); 374void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
375 struct kvm_irq_ack_notifier *kian);
375int kvm_request_irq_source_id(struct kvm *kvm); 376int kvm_request_irq_source_id(struct kvm *kvm);
376void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); 377void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
377 378
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 314012323afe..4092b8dcd510 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -57,10 +57,10 @@ irqfd_inject(struct work_struct *work)
57 struct _irqfd *irqfd = container_of(work, struct _irqfd, inject); 57 struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
58 struct kvm *kvm = irqfd->kvm; 58 struct kvm *kvm = irqfd->kvm;
59 59
60 mutex_lock(&kvm->lock); 60 mutex_lock(&kvm->irq_lock);
61 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); 61 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1);
62 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); 62 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0);
63 mutex_unlock(&kvm->lock); 63 mutex_unlock(&kvm->irq_lock);
64} 64}
65 65
66/* 66/*
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index ddc17f0e2f35..08a9a49481b2 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -62,6 +62,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
62 int i, r = -1; 62 int i, r = -1;
63 struct kvm_vcpu *vcpu, *lowest = NULL; 63 struct kvm_vcpu *vcpu, *lowest = NULL;
64 64
65 WARN_ON(!mutex_is_locked(&kvm->irq_lock));
66
65 if (irq->dest_mode == 0 && irq->dest_id == 0xff && 67 if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
66 kvm_is_dm_lowest_prio(irq)) 68 kvm_is_dm_lowest_prio(irq))
67 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); 69 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
@@ -113,7 +115,7 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
113 return kvm_irq_delivery_to_apic(kvm, NULL, &irq); 115 return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
114} 116}
115 117
116/* This should be called with the kvm->lock mutex held 118/* This should be called with the kvm->irq_lock mutex held
117 * Return value: 119 * Return value:
118 * < 0 Interrupt was ignored (masked or not delivered for other reasons) 120 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
119 * = 0 Interrupt was coalesced (previous irq is still pending) 121 * = 0 Interrupt was coalesced (previous irq is still pending)
@@ -125,6 +127,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
125 unsigned long *irq_state, sig_level; 127 unsigned long *irq_state, sig_level;
126 int ret = -1; 128 int ret = -1;
127 129
130 WARN_ON(!mutex_is_locked(&kvm->irq_lock));
131
128 if (irq < KVM_IOAPIC_NUM_PINS) { 132 if (irq < KVM_IOAPIC_NUM_PINS) {
129 irq_state = (unsigned long *)&kvm->arch.irq_states[irq]; 133 irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
130 134
@@ -175,19 +179,26 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
175void kvm_register_irq_ack_notifier(struct kvm *kvm, 179void kvm_register_irq_ack_notifier(struct kvm *kvm,
176 struct kvm_irq_ack_notifier *kian) 180 struct kvm_irq_ack_notifier *kian)
177{ 181{
182 mutex_lock(&kvm->irq_lock);
178 hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list); 183 hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
184 mutex_unlock(&kvm->irq_lock);
179} 185}
180 186
181void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian) 187void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
188 struct kvm_irq_ack_notifier *kian)
182{ 189{
190 mutex_lock(&kvm->irq_lock);
183 hlist_del_init(&kian->link); 191 hlist_del_init(&kian->link);
192 mutex_unlock(&kvm->irq_lock);
184} 193}
185 194
186/* The caller must hold kvm->lock mutex */
187int kvm_request_irq_source_id(struct kvm *kvm) 195int kvm_request_irq_source_id(struct kvm *kvm)
188{ 196{
189 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; 197 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
190 int irq_source_id = find_first_zero_bit(bitmap, 198 int irq_source_id;
199
200 mutex_lock(&kvm->irq_lock);
201 irq_source_id = find_first_zero_bit(bitmap,
191 sizeof(kvm->arch.irq_sources_bitmap)); 202 sizeof(kvm->arch.irq_sources_bitmap));
192 203
193 if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { 204 if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
@@ -197,6 +208,7 @@ int kvm_request_irq_source_id(struct kvm *kvm)
197 208
198 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); 209 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
199 set_bit(irq_source_id, bitmap); 210 set_bit(irq_source_id, bitmap);
211 mutex_unlock(&kvm->irq_lock);
200 212
201 return irq_source_id; 213 return irq_source_id;
202} 214}
@@ -207,6 +219,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
207 219
208 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); 220 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
209 221
222 mutex_lock(&kvm->irq_lock);
210 if (irq_source_id < 0 || 223 if (irq_source_id < 0 ||
211 irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { 224 irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
212 printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); 225 printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
@@ -215,19 +228,24 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
215 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) 228 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
216 clear_bit(irq_source_id, &kvm->arch.irq_states[i]); 229 clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
217 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); 230 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
231 mutex_unlock(&kvm->irq_lock);
218} 232}
219 233
220void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, 234void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
221 struct kvm_irq_mask_notifier *kimn) 235 struct kvm_irq_mask_notifier *kimn)
222{ 236{
237 mutex_lock(&kvm->irq_lock);
223 kimn->irq = irq; 238 kimn->irq = irq;
224 hlist_add_head(&kimn->link, &kvm->mask_notifier_list); 239 hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
240 mutex_unlock(&kvm->irq_lock);
225} 241}
226 242
227void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, 243void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
228 struct kvm_irq_mask_notifier *kimn) 244 struct kvm_irq_mask_notifier *kimn)
229{ 245{
246 mutex_lock(&kvm->irq_lock);
230 hlist_del(&kimn->link); 247 hlist_del(&kimn->link);
248 mutex_unlock(&kvm->irq_lock);
231} 249}
232 250
233void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) 251void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
@@ -235,6 +253,8 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
235 struct kvm_irq_mask_notifier *kimn; 253 struct kvm_irq_mask_notifier *kimn;
236 struct hlist_node *n; 254 struct hlist_node *n;
237 255
256 WARN_ON(!mutex_is_locked(&kvm->irq_lock));
257
238 hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link) 258 hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
239 if (kimn->irq == irq) 259 if (kimn->irq == irq)
240 kimn->func(kimn, mask); 260 kimn->func(kimn, mask);
@@ -250,7 +270,9 @@ static void __kvm_free_irq_routing(struct list_head *irq_routing)
250 270
251void kvm_free_irq_routing(struct kvm *kvm) 271void kvm_free_irq_routing(struct kvm *kvm)
252{ 272{
273 mutex_lock(&kvm->irq_lock);
253 __kvm_free_irq_routing(&kvm->irq_routing); 274 __kvm_free_irq_routing(&kvm->irq_routing);
275 mutex_unlock(&kvm->irq_lock);
254} 276}
255 277
256static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e, 278static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
@@ -325,13 +347,13 @@ int kvm_set_irq_routing(struct kvm *kvm,
325 e = NULL; 347 e = NULL;
326 } 348 }
327 349
328 mutex_lock(&kvm->lock); 350 mutex_lock(&kvm->irq_lock);
329 list_splice(&kvm->irq_routing, &tmp); 351 list_splice(&kvm->irq_routing, &tmp);
330 INIT_LIST_HEAD(&kvm->irq_routing); 352 INIT_LIST_HEAD(&kvm->irq_routing);
331 list_splice(&irq_list, &kvm->irq_routing); 353 list_splice(&irq_list, &kvm->irq_routing);
332 INIT_LIST_HEAD(&irq_list); 354 INIT_LIST_HEAD(&irq_list);
333 list_splice(&tmp, &irq_list); 355 list_splice(&tmp, &irq_list);
334 mutex_unlock(&kvm->lock); 356 mutex_unlock(&kvm->irq_lock);
335 357
336 r = 0; 358 r = 0;
337 359
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d47e660fb709..0d481b282448 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -62,6 +62,12 @@
62MODULE_AUTHOR("Qumranet"); 62MODULE_AUTHOR("Qumranet");
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64 64
65/*
66 * Ordering of locks:
67 *
68 * kvm->lock --> kvm->irq_lock
69 */
70
65DEFINE_SPINLOCK(kvm_lock); 71DEFINE_SPINLOCK(kvm_lock);
66LIST_HEAD(vm_list); 72LIST_HEAD(vm_list);
67 73
@@ -126,11 +132,7 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
126 interrupt_work); 132 interrupt_work);
127 kvm = assigned_dev->kvm; 133 kvm = assigned_dev->kvm;
128 134
129 /* This is taken to safely inject irq inside the guest. When 135 mutex_lock(&kvm->irq_lock);
130 * the interrupt injection (or the ioapic code) uses a
131 * finer-grained lock, update this
132 */
133 mutex_lock(&kvm->lock);
134 spin_lock_irq(&assigned_dev->assigned_dev_lock); 136 spin_lock_irq(&assigned_dev->assigned_dev_lock);
135 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 137 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
136 struct kvm_guest_msix_entry *guest_entries = 138 struct kvm_guest_msix_entry *guest_entries =
@@ -149,7 +151,7 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
149 assigned_dev->guest_irq, 1); 151 assigned_dev->guest_irq, 1);
150 152
151 spin_unlock_irq(&assigned_dev->assigned_dev_lock); 153 spin_unlock_irq(&assigned_dev->assigned_dev_lock);
152 mutex_unlock(&assigned_dev->kvm->lock); 154 mutex_unlock(&assigned_dev->kvm->irq_lock);
153} 155}
154 156
155static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) 157static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
@@ -207,7 +209,7 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
207static void deassign_guest_irq(struct kvm *kvm, 209static void deassign_guest_irq(struct kvm *kvm,
208 struct kvm_assigned_dev_kernel *assigned_dev) 210 struct kvm_assigned_dev_kernel *assigned_dev)
209{ 211{
210 kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier); 212 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
211 assigned_dev->ack_notifier.gsi = -1; 213 assigned_dev->ack_notifier.gsi = -1;
212 214
213 if (assigned_dev->irq_source_id != -1) 215 if (assigned_dev->irq_source_id != -1)