diff options
author | Stefano Stabellini <stefano.stabellini@eu.citrix.com> | 2010-12-01 09:51:44 -0500 |
---|---|---|
committer | Stefano Stabellini <stefano.stabellini@eu.citrix.com> | 2010-12-02 09:34:25 -0500 |
commit | af42b8d12f8adec6711cb824549a0edac6a4ae8f (patch) | |
tree | e922110d01d85688a03f9ae5d7e31c9f73f2b001 /drivers/xen/events.c | |
parent | e5fc7345412d5e4758fcef55a74354c5cbefd61e (diff) |
xen: fix MSI setup and teardown for PV on HVM guests
When remapping MSIs into pirqs for PV on HVM guests, qemu is responsible
for doing the actual mapping and unmapping.
We only give qemu the desired pirq number when we ask to do the mapping
the first time, after that we should be reading back the pirq number
from qemu every time we want to re-enable the MSI.
This fixes a bug in xen_hvm_setup_msi_irqs that manifests itself when
trying to enable the same MSI for the second time: the old MSI to pirq
mapping is still valid at this point but xen_hvm_setup_msi_irqs would
try to assign a new pirq anyway.
A simple way to reproduce this bug is to assign an MSI capable network
card to a PV on HVM guest, if the user brings down the corresponding
ethernet interface and up again, Linux would fail to enable MSIs on the
device.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r-- | drivers/xen/events.c | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 7ab43c33f746..f78945ce8aeb 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -668,17 +668,21 @@ out: | |||
668 | #include <linux/msi.h> | 668 | #include <linux/msi.h> |
669 | #include "../pci/msi.h" | 669 | #include "../pci/msi.h" |
670 | 670 | ||
671 | void xen_allocate_pirq_msi(char *name, int *irq, int *pirq) | 671 | void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) |
672 | { | 672 | { |
673 | spin_lock(&irq_mapping_update_lock); | 673 | spin_lock(&irq_mapping_update_lock); |
674 | 674 | ||
675 | *irq = find_unbound_irq(); | 675 | if (alloc & XEN_ALLOC_IRQ) { |
676 | if (*irq == -1) | 676 | *irq = find_unbound_irq(); |
677 | goto out; | 677 | if (*irq == -1) |
678 | goto out; | ||
679 | } | ||
678 | 680 | ||
679 | *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI); | 681 | if (alloc & XEN_ALLOC_PIRQ) { |
680 | if (*pirq == -1) | 682 | *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI); |
681 | goto out; | 683 | if (*pirq == -1) |
684 | goto out; | ||
685 | } | ||
682 | 686 | ||
683 | set_irq_chip_and_handler_name(*irq, &xen_pirq_chip, | 687 | set_irq_chip_and_handler_name(*irq, &xen_pirq_chip, |
684 | handle_level_irq, name); | 688 | handle_level_irq, name); |
@@ -766,6 +770,7 @@ int xen_destroy_irq(int irq) | |||
766 | printk(KERN_WARNING "unmap irq failed %d\n", rc); | 770 | printk(KERN_WARNING "unmap irq failed %d\n", rc); |
767 | goto out; | 771 | goto out; |
768 | } | 772 | } |
773 | pirq_to_irq[info->u.pirq.pirq] = -1; | ||
769 | } | 774 | } |
770 | irq_info[irq] = mk_unbound_info(); | 775 | irq_info[irq] = mk_unbound_info(); |
771 | 776 | ||
@@ -786,6 +791,11 @@ int xen_gsi_from_irq(unsigned irq) | |||
786 | return gsi_from_irq(irq); | 791 | return gsi_from_irq(irq); |
787 | } | 792 | } |
788 | 793 | ||
794 | int xen_irq_from_pirq(unsigned pirq) | ||
795 | { | ||
796 | return pirq_to_irq[pirq]; | ||
797 | } | ||
798 | |||
789 | int bind_evtchn_to_irq(unsigned int evtchn) | 799 | int bind_evtchn_to_irq(unsigned int evtchn) |
790 | { | 800 | { |
791 | int irq; | 801 | int irq; |