aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2013-03-18 11:50:17 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2014-01-06 10:07:44 -0500
commit96d4c5881806ebb993a3d84991af9c96fa9cd576 (patch)
tree978093091e68ff210b7c1c2bc7cfbeb336604c7f /drivers/xen
parentab9a1cca3d172876ae9d5edb63abce7986045597 (diff)
xen/events: allow setup of irq_info to fail
The FIFO-based event ABI requires additional setup of newly bound events (it may need to expand the event array) and this setup may fail. xen_irq_info_common_init() is a useful place to put this setup so allow this call to fail. This call and the other similar calls are renamed to be *_setup() to reflect that they may now fail. This failure can only occur with new event channels not on rebind. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/events/events_base.c156
1 files changed, 91 insertions, 65 deletions
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 7c7b744cd13d..4f7d94abe82c 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -99,7 +99,7 @@ struct irq_info *info_for_irq(unsigned irq)
99} 99}
100 100
101/* Constructors for packed IRQ information. */ 101/* Constructors for packed IRQ information. */
102static void xen_irq_info_common_init(struct irq_info *info, 102static int xen_irq_info_common_setup(struct irq_info *info,
103 unsigned irq, 103 unsigned irq,
104 enum xen_irq_type type, 104 enum xen_irq_type type,
105 unsigned short evtchn, 105 unsigned short evtchn,
@@ -116,45 +116,47 @@ static void xen_irq_info_common_init(struct irq_info *info,
116 evtchn_to_irq[evtchn] = irq; 116 evtchn_to_irq[evtchn] = irq;
117 117
118 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); 118 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
119
120 return 0;
119} 121}
120 122
121static void xen_irq_info_evtchn_init(unsigned irq, 123static int xen_irq_info_evtchn_setup(unsigned irq,
122 unsigned short evtchn) 124 unsigned short evtchn)
123{ 125{
124 struct irq_info *info = info_for_irq(irq); 126 struct irq_info *info = info_for_irq(irq);
125 127
126 xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0); 128 return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
127} 129}
128 130
129static void xen_irq_info_ipi_init(unsigned cpu, 131static int xen_irq_info_ipi_setup(unsigned cpu,
130 unsigned irq, 132 unsigned irq,
131 unsigned short evtchn, 133 unsigned short evtchn,
132 enum ipi_vector ipi) 134 enum ipi_vector ipi)
133{ 135{
134 struct irq_info *info = info_for_irq(irq); 136 struct irq_info *info = info_for_irq(irq);
135 137
136 xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
137
138 info->u.ipi = ipi; 138 info->u.ipi = ipi;
139 139
140 per_cpu(ipi_to_irq, cpu)[ipi] = irq; 140 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
141
142 return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
141} 143}
142 144
143static void xen_irq_info_virq_init(unsigned cpu, 145static int xen_irq_info_virq_setup(unsigned cpu,
144 unsigned irq, 146 unsigned irq,
145 unsigned short evtchn, 147 unsigned short evtchn,
146 unsigned short virq) 148 unsigned short virq)
147{ 149{
148 struct irq_info *info = info_for_irq(irq); 150 struct irq_info *info = info_for_irq(irq);
149 151
150 xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
151
152 info->u.virq = virq; 152 info->u.virq = virq;
153 153
154 per_cpu(virq_to_irq, cpu)[virq] = irq; 154 per_cpu(virq_to_irq, cpu)[virq] = irq;
155
156 return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
155} 157}
156 158
157static void xen_irq_info_pirq_init(unsigned irq, 159static int xen_irq_info_pirq_setup(unsigned irq,
158 unsigned short evtchn, 160 unsigned short evtchn,
159 unsigned short pirq, 161 unsigned short pirq,
160 unsigned short gsi, 162 unsigned short gsi,
@@ -163,12 +165,12 @@ static void xen_irq_info_pirq_init(unsigned irq,
163{ 165{
164 struct irq_info *info = info_for_irq(irq); 166 struct irq_info *info = info_for_irq(irq);
165 167
166 xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
167
168 info->u.pirq.pirq = pirq; 168 info->u.pirq.pirq = pirq;
169 info->u.pirq.gsi = gsi; 169 info->u.pirq.gsi = gsi;
170 info->u.pirq.domid = domid; 170 info->u.pirq.domid = domid;
171 info->u.pirq.flags = flags; 171 info->u.pirq.flags = flags;
172
173 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
172} 174}
173 175
174/* 176/*
@@ -521,6 +523,47 @@ int xen_irq_from_gsi(unsigned gsi)
521} 523}
522EXPORT_SYMBOL_GPL(xen_irq_from_gsi); 524EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
523 525
526static void __unbind_from_irq(unsigned int irq)
527{
528 struct evtchn_close close;
529 int evtchn = evtchn_from_irq(irq);
530 struct irq_info *info = irq_get_handler_data(irq);
531
532 if (info->refcnt > 0) {
533 info->refcnt--;
534 if (info->refcnt != 0)
535 return;
536 }
537
538 if (VALID_EVTCHN(evtchn)) {
539 close.port = evtchn;
540 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
541 BUG();
542
543 switch (type_from_irq(irq)) {
544 case IRQT_VIRQ:
545 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
546 [virq_from_irq(irq)] = -1;
547 break;
548 case IRQT_IPI:
549 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
550 [ipi_from_irq(irq)] = -1;
551 break;
552 default:
553 break;
554 }
555
556 /* Closed ports are implicitly re-bound to VCPU0. */
557 bind_evtchn_to_cpu(evtchn, 0);
558
559 evtchn_to_irq[evtchn] = -1;
560 }
561
562 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
563
564 xen_free_irq(irq);
565}
566
524/* 567/*
525 * Do not make any assumptions regarding the relationship between the 568 * Do not make any assumptions regarding the relationship between the
526 * IRQ number returned here and the Xen pirq argument. 569 * IRQ number returned here and the Xen pirq argument.
@@ -536,6 +579,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
536{ 579{
537 int irq = -1; 580 int irq = -1;
538 struct physdev_irq irq_op; 581 struct physdev_irq irq_op;
582 int ret;
539 583
540 mutex_lock(&irq_mapping_update_lock); 584 mutex_lock(&irq_mapping_update_lock);
541 585
@@ -563,8 +607,13 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
563 goto out; 607 goto out;
564 } 608 }
565 609
566 xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF, 610 ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
567 shareable ? PIRQ_SHAREABLE : 0); 611 shareable ? PIRQ_SHAREABLE : 0);
612 if (ret < 0) {
613 __unbind_from_irq(irq);
614 irq = ret;
615 goto out;
616 }
568 617
569 pirq_query_unmask(irq); 618 pirq_query_unmask(irq);
570 /* We try to use the handler with the appropriate semantic for the 619 /* We try to use the handler with the appropriate semantic for the
@@ -624,7 +673,9 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
624 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, 673 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
625 name); 674 name);
626 675
627 xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0); 676 ret = xen_irq_info_pirq_setup(irq, 0, pirq, 0, domid, 0);
677 if (ret < 0)
678 goto error_irq;
628 ret = irq_set_msi_desc(irq, msidesc); 679 ret = irq_set_msi_desc(irq, msidesc);
629 if (ret < 0) 680 if (ret < 0)
630 goto error_irq; 681 goto error_irq;
@@ -632,8 +683,8 @@ out:
632 mutex_unlock(&irq_mapping_update_lock); 683 mutex_unlock(&irq_mapping_update_lock);
633 return irq; 684 return irq;
634error_irq: 685error_irq:
686 __unbind_from_irq(irq);
635 mutex_unlock(&irq_mapping_update_lock); 687 mutex_unlock(&irq_mapping_update_lock);
636 xen_free_irq(irq);
637 return ret; 688 return ret;
638} 689}
639#endif 690#endif
@@ -703,9 +754,11 @@ int xen_pirq_from_irq(unsigned irq)
703 return pirq_from_irq(irq); 754 return pirq_from_irq(irq);
704} 755}
705EXPORT_SYMBOL_GPL(xen_pirq_from_irq); 756EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
757
706int bind_evtchn_to_irq(unsigned int evtchn) 758int bind_evtchn_to_irq(unsigned int evtchn)
707{ 759{
708 int irq; 760 int irq;
761 int ret;
709 762
710 mutex_lock(&irq_mapping_update_lock); 763 mutex_lock(&irq_mapping_update_lock);
711 764
@@ -719,7 +772,12 @@ int bind_evtchn_to_irq(unsigned int evtchn)
719 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, 772 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
720 handle_edge_irq, "event"); 773 handle_edge_irq, "event");
721 774
722 xen_irq_info_evtchn_init(irq, evtchn); 775 ret = xen_irq_info_evtchn_setup(irq, evtchn);
776 if (ret < 0) {
777 __unbind_from_irq(irq);
778 irq = ret;
779 goto out;
780 }
723 } else { 781 } else {
724 struct irq_info *info = info_for_irq(irq); 782 struct irq_info *info = info_for_irq(irq);
725 WARN_ON(info == NULL || info->type != IRQT_EVTCHN); 783 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
@@ -736,6 +794,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
736{ 794{
737 struct evtchn_bind_ipi bind_ipi; 795 struct evtchn_bind_ipi bind_ipi;
738 int evtchn, irq; 796 int evtchn, irq;
797 int ret;
739 798
740 mutex_lock(&irq_mapping_update_lock); 799 mutex_lock(&irq_mapping_update_lock);
741 800
@@ -755,8 +814,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
755 BUG(); 814 BUG();
756 evtchn = bind_ipi.port; 815 evtchn = bind_ipi.port;
757 816
758 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); 817 ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
759 818 if (ret < 0) {
819 __unbind_from_irq(irq);
820 irq = ret;
821 goto out;
822 }
760 bind_evtchn_to_cpu(evtchn, cpu); 823 bind_evtchn_to_cpu(evtchn, cpu);
761 } else { 824 } else {
762 struct irq_info *info = info_for_irq(irq); 825 struct irq_info *info = info_for_irq(irq);
@@ -835,7 +898,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
835 evtchn = ret; 898 evtchn = ret;
836 } 899 }
837 900
838 xen_irq_info_virq_init(cpu, irq, evtchn, virq); 901 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
902 if (ret < 0) {
903 __unbind_from_irq(irq);
904 irq = ret;
905 goto out;
906 }
839 907
840 bind_evtchn_to_cpu(evtchn, cpu); 908 bind_evtchn_to_cpu(evtchn, cpu);
841 } else { 909 } else {
@@ -851,50 +919,8 @@ out:
851 919
852static void unbind_from_irq(unsigned int irq) 920static void unbind_from_irq(unsigned int irq)
853{ 921{
854 struct evtchn_close close;
855 int evtchn = evtchn_from_irq(irq);
856 struct irq_info *info = irq_get_handler_data(irq);
857
858 if (WARN_ON(!info))
859 return;
860
861 mutex_lock(&irq_mapping_update_lock); 922 mutex_lock(&irq_mapping_update_lock);
862 923 __unbind_from_irq(irq);
863 if (info->refcnt > 0) {
864 info->refcnt--;
865 if (info->refcnt != 0)
866 goto done;
867 }
868
869 if (VALID_EVTCHN(evtchn)) {
870 close.port = evtchn;
871 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
872 BUG();
873
874 switch (type_from_irq(irq)) {
875 case IRQT_VIRQ:
876 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
877 [virq_from_irq(irq)] = -1;
878 break;
879 case IRQT_IPI:
880 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
881 [ipi_from_irq(irq)] = -1;
882 break;
883 default:
884 break;
885 }
886
887 /* Closed ports are implicitly re-bound to VCPU0. */
888 bind_evtchn_to_cpu(evtchn, 0);
889
890 evtchn_to_irq[evtchn] = -1;
891 }
892
893 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
894
895 xen_free_irq(irq);
896
897 done:
898 mutex_unlock(&irq_mapping_update_lock); 924 mutex_unlock(&irq_mapping_update_lock);
899} 925}
900 926
@@ -1142,7 +1168,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
1142 so there should be a proper type */ 1168 so there should be a proper type */
1143 BUG_ON(info->type == IRQT_UNBOUND); 1169 BUG_ON(info->type == IRQT_UNBOUND);
1144 1170
1145 xen_irq_info_evtchn_init(irq, evtchn); 1171 (void)xen_irq_info_evtchn_setup(irq, evtchn);
1146 1172
1147 mutex_unlock(&irq_mapping_update_lock); 1173 mutex_unlock(&irq_mapping_update_lock);
1148 1174
@@ -1317,7 +1343,7 @@ static void restore_cpu_virqs(unsigned int cpu)
1317 evtchn = bind_virq.port; 1343 evtchn = bind_virq.port;
1318 1344
1319 /* Record the new mapping. */ 1345 /* Record the new mapping. */
1320 xen_irq_info_virq_init(cpu, irq, evtchn, virq); 1346 (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1321 bind_evtchn_to_cpu(evtchn, cpu); 1347 bind_evtchn_to_cpu(evtchn, cpu);
1322 } 1348 }
1323} 1349}
@@ -1341,7 +1367,7 @@ static void restore_cpu_ipis(unsigned int cpu)
1341 evtchn = bind_ipi.port; 1367 evtchn = bind_ipi.port;
1342 1368
1343 /* Record the new mapping. */ 1369 /* Record the new mapping. */
1344 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); 1370 (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
1345 bind_evtchn_to_cpu(evtchn, cpu); 1371 bind_evtchn_to_cpu(evtchn, cpu);
1346 } 1372 }
1347} 1373}