diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2009-06-04 14:08:24 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 01:32:49 -0400 |
commit | fa40a8214bb9bcae8d49c234c19d8b4a6c1f37ff (patch) | |
tree | 6449f27072f128a1c39faaaeef1787f754345aaf /arch/x86 | |
parent | 60eead79ad8750f80384cbe48fc44edcc78a0305 (diff) |
KVM: switch irq injection/acking data structures to irq_lock
Protect irq injection/acking data structures with a separate irq_lock
mutex. This fixes the following deadlock:
CPU A CPU B
kvm_vm_ioctl_deassign_dev_irq()
mutex_lock(&kvm->lock); worker_thread()
-> kvm_deassign_irq() -> kvm_assigned_dev_interrupt_work_handler()
-> deassign_host_irq() mutex_lock(&kvm->lock);
-> cancel_work_sync() [blocked]
[gleb: fix ia64 path]
Reported-by: Alex Williamson <alex.williamson@hp.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/i8254.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 19 |
3 files changed, 15 insertions, 12 deletions
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 977af7ab8193..3837db65d33e 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -654,10 +654,10 @@ static void __inject_pit_timer_intr(struct kvm *kvm) | |||
654 | struct kvm_vcpu *vcpu; | 654 | struct kvm_vcpu *vcpu; |
655 | int i; | 655 | int i; |
656 | 656 | ||
657 | mutex_lock(&kvm->lock); | 657 | mutex_lock(&kvm->irq_lock); |
658 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); | 658 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); |
659 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); | 659 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); |
660 | mutex_unlock(&kvm->lock); | 660 | mutex_unlock(&kvm->irq_lock); |
661 | 661 | ||
662 | /* | 662 | /* |
663 | * Provides NMI watchdog support via Virtual Wire mode. | 663 | * Provides NMI watchdog support via Virtual Wire mode. |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index a23f42e550af..44f20cdb5709 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -424,7 +424,9 @@ static void apic_set_eoi(struct kvm_lapic *apic) | |||
424 | trigger_mode = IOAPIC_LEVEL_TRIG; | 424 | trigger_mode = IOAPIC_LEVEL_TRIG; |
425 | else | 425 | else |
426 | trigger_mode = IOAPIC_EDGE_TRIG; | 426 | trigger_mode = IOAPIC_EDGE_TRIG; |
427 | mutex_lock(&apic->vcpu->kvm->irq_lock); | ||
427 | kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); | 428 | kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); |
429 | mutex_unlock(&apic->vcpu->kvm->irq_lock); | ||
428 | } | 430 | } |
429 | 431 | ||
430 | static void apic_send_ipi(struct kvm_lapic *apic) | 432 | static void apic_send_ipi(struct kvm_lapic *apic) |
@@ -448,7 +450,9 @@ static void apic_send_ipi(struct kvm_lapic *apic) | |||
448 | irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, | 450 | irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, |
449 | irq.vector); | 451 | irq.vector); |
450 | 452 | ||
453 | mutex_lock(&apic->vcpu->kvm->irq_lock); | ||
451 | kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq); | 454 | kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq); |
455 | mutex_unlock(&apic->vcpu->kvm->irq_lock); | ||
452 | } | 456 | } |
453 | 457 | ||
454 | static u32 apic_get_tmcct(struct kvm_lapic *apic) | 458 | static u32 apic_get_tmcct(struct kvm_lapic *apic) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2ad8c97f58cc..05cbe83c74e2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2136,10 +2136,10 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
2136 | goto out; | 2136 | goto out; |
2137 | if (irqchip_in_kernel(kvm)) { | 2137 | if (irqchip_in_kernel(kvm)) { |
2138 | __s32 status; | 2138 | __s32 status; |
2139 | mutex_lock(&kvm->lock); | 2139 | mutex_lock(&kvm->irq_lock); |
2140 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | 2140 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
2141 | irq_event.irq, irq_event.level); | 2141 | irq_event.irq, irq_event.level); |
2142 | mutex_unlock(&kvm->lock); | 2142 | mutex_unlock(&kvm->irq_lock); |
2143 | if (ioctl == KVM_IRQ_LINE_STATUS) { | 2143 | if (ioctl == KVM_IRQ_LINE_STATUS) { |
2144 | irq_event.status = status; | 2144 | irq_event.status = status; |
2145 | if (copy_to_user(argp, &irq_event, | 2145 | if (copy_to_user(argp, &irq_event, |
@@ -2385,12 +2385,11 @@ mmio: | |||
2385 | */ | 2385 | */ |
2386 | mutex_lock(&vcpu->kvm->lock); | 2386 | mutex_lock(&vcpu->kvm->lock); |
2387 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0); | 2387 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0); |
2388 | mutex_unlock(&vcpu->kvm->lock); | ||
2388 | if (mmio_dev) { | 2389 | if (mmio_dev) { |
2389 | kvm_iodevice_read(mmio_dev, gpa, bytes, val); | 2390 | kvm_iodevice_read(mmio_dev, gpa, bytes, val); |
2390 | mutex_unlock(&vcpu->kvm->lock); | ||
2391 | return X86EMUL_CONTINUE; | 2391 | return X86EMUL_CONTINUE; |
2392 | } | 2392 | } |
2393 | mutex_unlock(&vcpu->kvm->lock); | ||
2394 | 2393 | ||
2395 | vcpu->mmio_needed = 1; | 2394 | vcpu->mmio_needed = 1; |
2396 | vcpu->mmio_phys_addr = gpa; | 2395 | vcpu->mmio_phys_addr = gpa; |
@@ -2440,12 +2439,11 @@ mmio: | |||
2440 | */ | 2439 | */ |
2441 | mutex_lock(&vcpu->kvm->lock); | 2440 | mutex_lock(&vcpu->kvm->lock); |
2442 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1); | 2441 | mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1); |
2442 | mutex_unlock(&vcpu->kvm->lock); | ||
2443 | if (mmio_dev) { | 2443 | if (mmio_dev) { |
2444 | kvm_iodevice_write(mmio_dev, gpa, bytes, val); | 2444 | kvm_iodevice_write(mmio_dev, gpa, bytes, val); |
2445 | mutex_unlock(&vcpu->kvm->lock); | ||
2446 | return X86EMUL_CONTINUE; | 2445 | return X86EMUL_CONTINUE; |
2447 | } | 2446 | } |
2448 | mutex_unlock(&vcpu->kvm->lock); | ||
2449 | 2447 | ||
2450 | vcpu->mmio_needed = 1; | 2448 | vcpu->mmio_needed = 1; |
2451 | vcpu->mmio_phys_addr = gpa; | 2449 | vcpu->mmio_phys_addr = gpa; |
@@ -2768,7 +2766,6 @@ static void kernel_pio(struct kvm_io_device *pio_dev, | |||
2768 | { | 2766 | { |
2769 | /* TODO: String I/O for in kernel device */ | 2767 | /* TODO: String I/O for in kernel device */ |
2770 | 2768 | ||
2771 | mutex_lock(&vcpu->kvm->lock); | ||
2772 | if (vcpu->arch.pio.in) | 2769 | if (vcpu->arch.pio.in) |
2773 | kvm_iodevice_read(pio_dev, vcpu->arch.pio.port, | 2770 | kvm_iodevice_read(pio_dev, vcpu->arch.pio.port, |
2774 | vcpu->arch.pio.size, | 2771 | vcpu->arch.pio.size, |
@@ -2777,7 +2774,6 @@ static void kernel_pio(struct kvm_io_device *pio_dev, | |||
2777 | kvm_iodevice_write(pio_dev, vcpu->arch.pio.port, | 2774 | kvm_iodevice_write(pio_dev, vcpu->arch.pio.port, |
2778 | vcpu->arch.pio.size, | 2775 | vcpu->arch.pio.size, |
2779 | pd); | 2776 | pd); |
2780 | mutex_unlock(&vcpu->kvm->lock); | ||
2781 | } | 2777 | } |
2782 | 2778 | ||
2783 | static void pio_string_write(struct kvm_io_device *pio_dev, | 2779 | static void pio_string_write(struct kvm_io_device *pio_dev, |
@@ -2787,14 +2783,12 @@ static void pio_string_write(struct kvm_io_device *pio_dev, | |||
2787 | void *pd = vcpu->arch.pio_data; | 2783 | void *pd = vcpu->arch.pio_data; |
2788 | int i; | 2784 | int i; |
2789 | 2785 | ||
2790 | mutex_lock(&vcpu->kvm->lock); | ||
2791 | for (i = 0; i < io->cur_count; i++) { | 2786 | for (i = 0; i < io->cur_count; i++) { |
2792 | kvm_iodevice_write(pio_dev, io->port, | 2787 | kvm_iodevice_write(pio_dev, io->port, |
2793 | io->size, | 2788 | io->size, |
2794 | pd); | 2789 | pd); |
2795 | pd += io->size; | 2790 | pd += io->size; |
2796 | } | 2791 | } |
2797 | mutex_unlock(&vcpu->kvm->lock); | ||
2798 | } | 2792 | } |
2799 | 2793 | ||
2800 | static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu, | 2794 | static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu, |
@@ -2831,7 +2825,9 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2831 | val = kvm_register_read(vcpu, VCPU_REGS_RAX); | 2825 | val = kvm_register_read(vcpu, VCPU_REGS_RAX); |
2832 | memcpy(vcpu->arch.pio_data, &val, 4); | 2826 | memcpy(vcpu->arch.pio_data, &val, 4); |
2833 | 2827 | ||
2828 | mutex_lock(&vcpu->kvm->lock); | ||
2834 | pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in); | 2829 | pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in); |
2830 | mutex_unlock(&vcpu->kvm->lock); | ||
2835 | if (pio_dev) { | 2831 | if (pio_dev) { |
2836 | kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data); | 2832 | kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data); |
2837 | complete_pio(vcpu); | 2833 | complete_pio(vcpu); |
@@ -2895,9 +2891,12 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2895 | 2891 | ||
2896 | vcpu->arch.pio.guest_gva = address; | 2892 | vcpu->arch.pio.guest_gva = address; |
2897 | 2893 | ||
2894 | mutex_lock(&vcpu->kvm->lock); | ||
2898 | pio_dev = vcpu_find_pio_dev(vcpu, port, | 2895 | pio_dev = vcpu_find_pio_dev(vcpu, port, |
2899 | vcpu->arch.pio.cur_count, | 2896 | vcpu->arch.pio.cur_count, |
2900 | !vcpu->arch.pio.in); | 2897 | !vcpu->arch.pio.in); |
2898 | mutex_unlock(&vcpu->kvm->lock); | ||
2899 | |||
2901 | if (!vcpu->arch.pio.in) { | 2900 | if (!vcpu->arch.pio.in) { |
2902 | /* string PIO write */ | 2901 | /* string PIO write */ |
2903 | ret = pio_copy_data(vcpu); | 2902 | ret = pio_copy_data(vcpu); |