diff options
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r-- | drivers/xen/events.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index da70f5c32eb9..7523719bf8a4 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -54,7 +54,7 @@ | |||
54 | * This lock protects updates to the following mapping and reference-count | 54 | * This lock protects updates to the following mapping and reference-count |
55 | * arrays. The lock does not need to be acquired to read the mapping tables. | 55 | * arrays. The lock does not need to be acquired to read the mapping tables. |
56 | */ | 56 | */ |
57 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | 57 | static DEFINE_MUTEX(irq_mapping_update_lock); |
58 | 58 | ||
59 | static LIST_HEAD(xen_irq_list_head); | 59 | static LIST_HEAD(xen_irq_list_head); |
60 | 60 | ||
@@ -631,7 +631,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
631 | int irq = -1; | 631 | int irq = -1; |
632 | struct physdev_irq irq_op; | 632 | struct physdev_irq irq_op; |
633 | 633 | ||
634 | spin_lock(&irq_mapping_update_lock); | 634 | mutex_lock(&irq_mapping_update_lock); |
635 | 635 | ||
636 | irq = find_irq_by_gsi(gsi); | 636 | irq = find_irq_by_gsi(gsi); |
637 | if (irq != -1) { | 637 | if (irq != -1) { |
@@ -684,7 +684,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
684 | handle_edge_irq, name); | 684 | handle_edge_irq, name); |
685 | 685 | ||
686 | out: | 686 | out: |
687 | spin_unlock(&irq_mapping_update_lock); | 687 | mutex_unlock(&irq_mapping_update_lock); |
688 | 688 | ||
689 | return irq; | 689 | return irq; |
690 | } | 690 | } |
@@ -710,7 +710,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
710 | { | 710 | { |
711 | int irq, ret; | 711 | int irq, ret; |
712 | 712 | ||
713 | spin_lock(&irq_mapping_update_lock); | 713 | mutex_lock(&irq_mapping_update_lock); |
714 | 714 | ||
715 | irq = xen_allocate_irq_dynamic(); | 715 | irq = xen_allocate_irq_dynamic(); |
716 | if (irq == -1) | 716 | if (irq == -1) |
@@ -724,10 +724,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, | |||
724 | if (ret < 0) | 724 | if (ret < 0) |
725 | goto error_irq; | 725 | goto error_irq; |
726 | out: | 726 | out: |
727 | spin_unlock(&irq_mapping_update_lock); | 727 | mutex_unlock(&irq_mapping_update_lock); |
728 | return irq; | 728 | return irq; |
729 | error_irq: | 729 | error_irq: |
730 | spin_unlock(&irq_mapping_update_lock); | 730 | mutex_unlock(&irq_mapping_update_lock); |
731 | xen_free_irq(irq); | 731 | xen_free_irq(irq); |
732 | return -1; | 732 | return -1; |
733 | } | 733 | } |
@@ -740,7 +740,7 @@ int xen_destroy_irq(int irq) | |||
740 | struct irq_info *info = info_for_irq(irq); | 740 | struct irq_info *info = info_for_irq(irq); |
741 | int rc = -ENOENT; | 741 | int rc = -ENOENT; |
742 | 742 | ||
743 | spin_lock(&irq_mapping_update_lock); | 743 | mutex_lock(&irq_mapping_update_lock); |
744 | 744 | ||
745 | desc = irq_to_desc(irq); | 745 | desc = irq_to_desc(irq); |
746 | if (!desc) | 746 | if (!desc) |
@@ -766,7 +766,7 @@ int xen_destroy_irq(int irq) | |||
766 | xen_free_irq(irq); | 766 | xen_free_irq(irq); |
767 | 767 | ||
768 | out: | 768 | out: |
769 | spin_unlock(&irq_mapping_update_lock); | 769 | mutex_unlock(&irq_mapping_update_lock); |
770 | return rc; | 770 | return rc; |
771 | } | 771 | } |
772 | 772 | ||
@@ -776,7 +776,7 @@ int xen_irq_from_pirq(unsigned pirq) | |||
776 | 776 | ||
777 | struct irq_info *info; | 777 | struct irq_info *info; |
778 | 778 | ||
779 | spin_lock(&irq_mapping_update_lock); | 779 | mutex_lock(&irq_mapping_update_lock); |
780 | 780 | ||
781 | list_for_each_entry(info, &xen_irq_list_head, list) { | 781 | list_for_each_entry(info, &xen_irq_list_head, list) { |
782 | if (info == NULL || info->type != IRQT_PIRQ) | 782 | if (info == NULL || info->type != IRQT_PIRQ) |
@@ -787,7 +787,7 @@ int xen_irq_from_pirq(unsigned pirq) | |||
787 | } | 787 | } |
788 | irq = -1; | 788 | irq = -1; |
789 | out: | 789 | out: |
790 | spin_unlock(&irq_mapping_update_lock); | 790 | mutex_unlock(&irq_mapping_update_lock); |
791 | 791 | ||
792 | return irq; | 792 | return irq; |
793 | } | 793 | } |
@@ -802,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
802 | { | 802 | { |
803 | int irq; | 803 | int irq; |
804 | 804 | ||
805 | spin_lock(&irq_mapping_update_lock); | 805 | mutex_lock(&irq_mapping_update_lock); |
806 | 806 | ||
807 | irq = evtchn_to_irq[evtchn]; | 807 | irq = evtchn_to_irq[evtchn]; |
808 | 808 | ||
@@ -818,7 +818,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
818 | } | 818 | } |
819 | 819 | ||
820 | out: | 820 | out: |
821 | spin_unlock(&irq_mapping_update_lock); | 821 | mutex_unlock(&irq_mapping_update_lock); |
822 | 822 | ||
823 | return irq; | 823 | return irq; |
824 | } | 824 | } |
@@ -829,7 +829,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
829 | struct evtchn_bind_ipi bind_ipi; | 829 | struct evtchn_bind_ipi bind_ipi; |
830 | int evtchn, irq; | 830 | int evtchn, irq; |
831 | 831 | ||
832 | spin_lock(&irq_mapping_update_lock); | 832 | mutex_lock(&irq_mapping_update_lock); |
833 | 833 | ||
834 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | 834 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
835 | 835 | ||
@@ -853,7 +853,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
853 | } | 853 | } |
854 | 854 | ||
855 | out: | 855 | out: |
856 | spin_unlock(&irq_mapping_update_lock); | 856 | mutex_unlock(&irq_mapping_update_lock); |
857 | return irq; | 857 | return irq; |
858 | } | 858 | } |
859 | 859 | ||
@@ -878,7 +878,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
878 | struct evtchn_bind_virq bind_virq; | 878 | struct evtchn_bind_virq bind_virq; |
879 | int evtchn, irq; | 879 | int evtchn, irq; |
880 | 880 | ||
881 | spin_lock(&irq_mapping_update_lock); | 881 | mutex_lock(&irq_mapping_update_lock); |
882 | 882 | ||
883 | irq = per_cpu(virq_to_irq, cpu)[virq]; | 883 | irq = per_cpu(virq_to_irq, cpu)[virq]; |
884 | 884 | ||
@@ -903,7 +903,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
903 | } | 903 | } |
904 | 904 | ||
905 | out: | 905 | out: |
906 | spin_unlock(&irq_mapping_update_lock); | 906 | mutex_unlock(&irq_mapping_update_lock); |
907 | 907 | ||
908 | return irq; | 908 | return irq; |
909 | } | 909 | } |
@@ -913,7 +913,7 @@ static void unbind_from_irq(unsigned int irq) | |||
913 | struct evtchn_close close; | 913 | struct evtchn_close close; |
914 | int evtchn = evtchn_from_irq(irq); | 914 | int evtchn = evtchn_from_irq(irq); |
915 | 915 | ||
916 | spin_lock(&irq_mapping_update_lock); | 916 | mutex_lock(&irq_mapping_update_lock); |
917 | 917 | ||
918 | if (VALID_EVTCHN(evtchn)) { | 918 | if (VALID_EVTCHN(evtchn)) { |
919 | close.port = evtchn; | 919 | close.port = evtchn; |
@@ -943,7 +943,7 @@ static void unbind_from_irq(unsigned int irq) | |||
943 | 943 | ||
944 | xen_free_irq(irq); | 944 | xen_free_irq(irq); |
945 | 945 | ||
946 | spin_unlock(&irq_mapping_update_lock); | 946 | mutex_unlock(&irq_mapping_update_lock); |
947 | } | 947 | } |
948 | 948 | ||
949 | int bind_evtchn_to_irqhandler(unsigned int evtchn, | 949 | int bind_evtchn_to_irqhandler(unsigned int evtchn, |
@@ -1279,7 +1279,7 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
1279 | will also be masked. */ | 1279 | will also be masked. */ |
1280 | disable_irq(irq); | 1280 | disable_irq(irq); |
1281 | 1281 | ||
1282 | spin_lock(&irq_mapping_update_lock); | 1282 | mutex_lock(&irq_mapping_update_lock); |
1283 | 1283 | ||
1284 | /* After resume the irq<->evtchn mappings are all cleared out */ | 1284 | /* After resume the irq<->evtchn mappings are all cleared out */ |
1285 | BUG_ON(evtchn_to_irq[evtchn] != -1); | 1285 | BUG_ON(evtchn_to_irq[evtchn] != -1); |
@@ -1289,7 +1289,7 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
1289 | 1289 | ||
1290 | xen_irq_info_evtchn_init(irq, evtchn); | 1290 | xen_irq_info_evtchn_init(irq, evtchn); |
1291 | 1291 | ||
1292 | spin_unlock(&irq_mapping_update_lock); | 1292 | mutex_unlock(&irq_mapping_update_lock); |
1293 | 1293 | ||
1294 | /* new event channels are always bound to cpu 0 */ | 1294 | /* new event channels are always bound to cpu 0 */ |
1295 | irq_set_affinity(irq, cpumask_of(0)); | 1295 | irq_set_affinity(irq, cpumask_of(0)); |