aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2013-10-17 10:23:15 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2014-01-06 10:07:47 -0500
commitd0b075ffeede257342c3afdbeadd2fda8504ecee (patch)
tree1c512a95b2e335301917ba5f77553545c14ae215 /drivers/xen
parent083858758f67bb20ef6be5bc8442be91cca8ee2d (diff)
xen/events: Refactor evtchn_to_irq array to be dynamically allocated
Refactor static array evtchn_to_irq array to be dynamically allocated by implementing get and set functions for accesses to the array. Two new port ops are added: max_channels (maximum supported number of event channels) and nr_channels (number of currently usable event channels). For the 2-level ABI, these numbers are both the same as the shared data structure is a fixed size. For the FIFO ABI, these will be different as the event array is expanded dynamically. This allows more than 65000 event channels so an unsigned short is no longer sufficient for an event channel port number and unsigned int is used instead. Signed-off-by: Malcolm Crossley <malcolm.crossley@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/events/events_2l.c11
-rw-r--r--drivers/xen/events/events_base.c175
-rw-r--r--drivers/xen/events/events_internal.h18
3 files changed, 149 insertions, 55 deletions
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index e55677cca745..ecb402a149e3 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -41,6 +41,11 @@
41static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD], 41static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
42 cpu_evtchn_mask); 42 cpu_evtchn_mask);
43 43
44static unsigned evtchn_2l_max_channels(void)
45{
46 return NR_EVENT_CHANNELS;
47}
48
44static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu) 49static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
45{ 50{
46 clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); 51 clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
@@ -238,7 +243,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
238 243
239 /* Process port. */ 244 /* Process port. */
240 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx; 245 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
241 irq = evtchn_to_irq[port]; 246 irq = get_evtchn_to_irq(port);
242 247
243 if (irq != -1) { 248 if (irq != -1) {
244 desc = irq_to_desc(irq); 249 desc = irq_to_desc(irq);
@@ -332,7 +337,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
332 int word_idx = i / BITS_PER_EVTCHN_WORD; 337 int word_idx = i / BITS_PER_EVTCHN_WORD;
333 printk(" %d: event %d -> irq %d%s%s%s\n", 338 printk(" %d: event %d -> irq %d%s%s%s\n",
334 cpu_from_evtchn(i), i, 339 cpu_from_evtchn(i), i,
335 evtchn_to_irq[i], 340 get_evtchn_to_irq(i),
336 sync_test_bit(word_idx, BM(&v->evtchn_pending_sel)) 341 sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
337 ? "" : " l2-clear", 342 ? "" : " l2-clear",
338 !sync_test_bit(i, BM(sh->evtchn_mask)) 343 !sync_test_bit(i, BM(sh->evtchn_mask))
@@ -348,6 +353,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
348} 353}
349 354
350static const struct evtchn_ops evtchn_ops_2l = { 355static const struct evtchn_ops evtchn_ops_2l = {
356 .max_channels = evtchn_2l_max_channels,
357 .nr_channels = evtchn_2l_max_channels,
351 .bind_to_cpu = evtchn_2l_bind_to_cpu, 358 .bind_to_cpu = evtchn_2l_bind_to_cpu,
352 .clear_pending = evtchn_2l_clear_pending, 359 .clear_pending = evtchn_2l_clear_pending,
353 .set_pending = evtchn_2l_set_pending, 360 .set_pending = evtchn_2l_set_pending,
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 929eccb77270..a6906665de53 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -77,12 +77,16 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
77/* IRQ <-> IPI mapping */ 77/* IRQ <-> IPI mapping */
78static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; 78static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
79 79
80int *evtchn_to_irq; 80int **evtchn_to_irq;
81#ifdef CONFIG_X86 81#ifdef CONFIG_X86
82static unsigned long *pirq_eoi_map; 82static unsigned long *pirq_eoi_map;
83#endif 83#endif
84static bool (*pirq_needs_eoi)(unsigned irq); 84static bool (*pirq_needs_eoi)(unsigned irq);
85 85
86#define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
87#define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
88#define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
89
86/* Xen will never allocate port zero for any purpose. */ 90/* Xen will never allocate port zero for any purpose. */
87#define VALID_EVTCHN(chn) ((chn) != 0) 91#define VALID_EVTCHN(chn) ((chn) != 0)
88 92
@@ -92,6 +96,61 @@ static struct irq_chip xen_pirq_chip;
92static void enable_dynirq(struct irq_data *data); 96static void enable_dynirq(struct irq_data *data);
93static void disable_dynirq(struct irq_data *data); 97static void disable_dynirq(struct irq_data *data);
94 98
99static void clear_evtchn_to_irq_row(unsigned row)
100{
101 unsigned col;
102
103 for (col = 0; col < EVTCHN_PER_ROW; col++)
104 evtchn_to_irq[row][col] = -1;
105}
106
107static void clear_evtchn_to_irq_all(void)
108{
109 unsigned row;
110
111 for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
112 if (evtchn_to_irq[row] == NULL)
113 continue;
114 clear_evtchn_to_irq_row(row);
115 }
116}
117
118static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
119{
120 unsigned row;
121 unsigned col;
122
123 if (evtchn >= xen_evtchn_max_channels())
124 return -EINVAL;
125
126 row = EVTCHN_ROW(evtchn);
127 col = EVTCHN_COL(evtchn);
128
129 if (evtchn_to_irq[row] == NULL) {
130 /* Unallocated irq entries return -1 anyway */
131 if (irq == -1)
132 return 0;
133
134 evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
135 if (evtchn_to_irq[row] == NULL)
136 return -ENOMEM;
137
138 clear_evtchn_to_irq_row(row);
139 }
140
141 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
142 return 0;
143}
144
145int get_evtchn_to_irq(unsigned evtchn)
146{
147 if (evtchn >= xen_evtchn_max_channels())
148 return -1;
149 if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
150 return -1;
151 return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];
152}
153
95/* Get info for IRQ */ 154/* Get info for IRQ */
96struct irq_info *info_for_irq(unsigned irq) 155struct irq_info *info_for_irq(unsigned irq)
97{ 156{
@@ -102,9 +161,10 @@ struct irq_info *info_for_irq(unsigned irq)
102static int xen_irq_info_common_setup(struct irq_info *info, 161static int xen_irq_info_common_setup(struct irq_info *info,
103 unsigned irq, 162 unsigned irq,
104 enum xen_irq_type type, 163 enum xen_irq_type type,
105 unsigned short evtchn, 164 unsigned evtchn,
106 unsigned short cpu) 165 unsigned short cpu)
107{ 166{
167 int ret;
108 168
109 BUG_ON(info->type != IRQT_UNBOUND && info->type != type); 169 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
110 170
@@ -113,7 +173,9 @@ static int xen_irq_info_common_setup(struct irq_info *info,
113 info->evtchn = evtchn; 173 info->evtchn = evtchn;
114 info->cpu = cpu; 174 info->cpu = cpu;
115 175
116 evtchn_to_irq[evtchn] = irq; 176 ret = set_evtchn_to_irq(evtchn, irq);
177 if (ret < 0)
178 return ret;
117 179
118 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); 180 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
119 181
@@ -121,7 +183,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
121} 183}
122 184
123static int xen_irq_info_evtchn_setup(unsigned irq, 185static int xen_irq_info_evtchn_setup(unsigned irq,
124 unsigned short evtchn) 186 unsigned evtchn)
125{ 187{
126 struct irq_info *info = info_for_irq(irq); 188 struct irq_info *info = info_for_irq(irq);
127 189
@@ -130,7 +192,7 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
130 192
131static int xen_irq_info_ipi_setup(unsigned cpu, 193static int xen_irq_info_ipi_setup(unsigned cpu,
132 unsigned irq, 194 unsigned irq,
133 unsigned short evtchn, 195 unsigned evtchn,
134 enum ipi_vector ipi) 196 enum ipi_vector ipi)
135{ 197{
136 struct irq_info *info = info_for_irq(irq); 198 struct irq_info *info = info_for_irq(irq);
@@ -144,8 +206,8 @@ static int xen_irq_info_ipi_setup(unsigned cpu,
144 206
145static int xen_irq_info_virq_setup(unsigned cpu, 207static int xen_irq_info_virq_setup(unsigned cpu,
146 unsigned irq, 208 unsigned irq,
147 unsigned short evtchn, 209 unsigned evtchn,
148 unsigned short virq) 210 unsigned virq)
149{ 211{
150 struct irq_info *info = info_for_irq(irq); 212 struct irq_info *info = info_for_irq(irq);
151 213
@@ -157,9 +219,9 @@ static int xen_irq_info_virq_setup(unsigned cpu,
157} 219}
158 220
159static int xen_irq_info_pirq_setup(unsigned irq, 221static int xen_irq_info_pirq_setup(unsigned irq,
160 unsigned short evtchn, 222 unsigned evtchn,
161 unsigned short pirq, 223 unsigned pirq,
162 unsigned short gsi, 224 unsigned gsi,
163 uint16_t domid, 225 uint16_t domid,
164 unsigned char flags) 226 unsigned char flags)
165{ 227{
@@ -173,6 +235,12 @@ static int xen_irq_info_pirq_setup(unsigned irq,
173 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); 235 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
174} 236}
175 237
238static void xen_irq_info_cleanup(struct irq_info *info)
239{
240 set_evtchn_to_irq(info->evtchn, -1);
241 info->evtchn = 0;
242}
243
176/* 244/*
177 * Accessors for packed IRQ information. 245 * Accessors for packed IRQ information.
178 */ 246 */
@@ -186,7 +254,7 @@ unsigned int evtchn_from_irq(unsigned irq)
186 254
187unsigned irq_from_evtchn(unsigned int evtchn) 255unsigned irq_from_evtchn(unsigned int evtchn)
188{ 256{
189 return evtchn_to_irq[evtchn]; 257 return get_evtchn_to_irq(evtchn);
190} 258}
191EXPORT_SYMBOL_GPL(irq_from_evtchn); 259EXPORT_SYMBOL_GPL(irq_from_evtchn);
192 260
@@ -237,7 +305,7 @@ unsigned cpu_from_irq(unsigned irq)
237 305
238unsigned int cpu_from_evtchn(unsigned int evtchn) 306unsigned int cpu_from_evtchn(unsigned int evtchn)
239{ 307{
240 int irq = evtchn_to_irq[evtchn]; 308 int irq = get_evtchn_to_irq(evtchn);
241 unsigned ret = 0; 309 unsigned ret = 0;
242 310
243 if (irq != -1) 311 if (irq != -1)
@@ -263,7 +331,7 @@ static bool pirq_needs_eoi_flag(unsigned irq)
263 331
264static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) 332static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
265{ 333{
266 int irq = evtchn_to_irq[chn]; 334 int irq = get_evtchn_to_irq(chn);
267 struct irq_info *info = info_for_irq(irq); 335 struct irq_info *info = info_for_irq(irq);
268 336
269 BUG_ON(irq == -1); 337 BUG_ON(irq == -1);
@@ -386,6 +454,18 @@ static void xen_free_irq(unsigned irq)
386 irq_free_desc(irq); 454 irq_free_desc(irq);
387} 455}
388 456
457static void xen_evtchn_close(unsigned int port)
458{
459 struct evtchn_close close;
460
461 close.port = port;
462 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
463 BUG();
464
465 /* Closed ports are implicitly re-bound to VCPU0. */
466 bind_evtchn_to_cpu(port, 0);
467}
468
389static void pirq_query_unmask(int irq) 469static void pirq_query_unmask(int irq)
390{ 470{
391 struct physdev_irq_status_query irq_status; 471 struct physdev_irq_status_query irq_status;
@@ -458,7 +538,13 @@ static unsigned int __startup_pirq(unsigned int irq)
458 538
459 pirq_query_unmask(irq); 539 pirq_query_unmask(irq);
460 540
461 evtchn_to_irq[evtchn] = irq; 541 rc = set_evtchn_to_irq(evtchn, irq);
542 if (rc != 0) {
543 pr_err("irq%d: Failed to set port to irq mapping (%d)\n",
544 irq, rc);
545 xen_evtchn_close(evtchn);
546 return 0;
547 }
462 bind_evtchn_to_cpu(evtchn, 0); 548 bind_evtchn_to_cpu(evtchn, 0);
463 info->evtchn = evtchn; 549 info->evtchn = evtchn;
464 550
@@ -476,10 +562,9 @@ static unsigned int startup_pirq(struct irq_data *data)
476 562
477static void shutdown_pirq(struct irq_data *data) 563static void shutdown_pirq(struct irq_data *data)
478{ 564{
479 struct evtchn_close close;
480 unsigned int irq = data->irq; 565 unsigned int irq = data->irq;
481 struct irq_info *info = info_for_irq(irq); 566 struct irq_info *info = info_for_irq(irq);
482 int evtchn = evtchn_from_irq(irq); 567 unsigned evtchn = evtchn_from_irq(irq);
483 568
484 BUG_ON(info->type != IRQT_PIRQ); 569 BUG_ON(info->type != IRQT_PIRQ);
485 570
@@ -487,14 +572,8 @@ static void shutdown_pirq(struct irq_data *data)
487 return; 572 return;
488 573
489 mask_evtchn(evtchn); 574 mask_evtchn(evtchn);
490 575 xen_evtchn_close(evtchn);
491 close.port = evtchn; 576 xen_irq_info_cleanup(info);
492 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
493 BUG();
494
495 bind_evtchn_to_cpu(evtchn, 0);
496 evtchn_to_irq[evtchn] = -1;
497 info->evtchn = 0;
498} 577}
499 578
500static void enable_pirq(struct irq_data *data) 579static void enable_pirq(struct irq_data *data)
@@ -525,7 +604,6 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
525 604
526static void __unbind_from_irq(unsigned int irq) 605static void __unbind_from_irq(unsigned int irq)
527{ 606{
528 struct evtchn_close close;
529 int evtchn = evtchn_from_irq(irq); 607 int evtchn = evtchn_from_irq(irq);
530 struct irq_info *info = irq_get_handler_data(irq); 608 struct irq_info *info = irq_get_handler_data(irq);
531 609
@@ -536,27 +614,22 @@ static void __unbind_from_irq(unsigned int irq)
536 } 614 }
537 615
538 if (VALID_EVTCHN(evtchn)) { 616 if (VALID_EVTCHN(evtchn)) {
539 close.port = evtchn; 617 unsigned int cpu = cpu_from_irq(irq);
540 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 618
541 BUG(); 619 xen_evtchn_close(evtchn);
542 620
543 switch (type_from_irq(irq)) { 621 switch (type_from_irq(irq)) {
544 case IRQT_VIRQ: 622 case IRQT_VIRQ:
545 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) 623 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
546 [virq_from_irq(irq)] = -1;
547 break; 624 break;
548 case IRQT_IPI: 625 case IRQT_IPI:
549 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) 626 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
550 [ipi_from_irq(irq)] = -1;
551 break; 627 break;
552 default: 628 default:
553 break; 629 break;
554 } 630 }
555 631
556 /* Closed ports are implicitly re-bound to VCPU0. */ 632 xen_irq_info_cleanup(info);
557 bind_evtchn_to_cpu(evtchn, 0);
558
559 evtchn_to_irq[evtchn] = -1;
560 } 633 }
561 634
562 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); 635 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
@@ -760,9 +833,12 @@ int bind_evtchn_to_irq(unsigned int evtchn)
760 int irq; 833 int irq;
761 int ret; 834 int ret;
762 835
836 if (evtchn >= xen_evtchn_max_channels())
837 return -ENOMEM;
838
763 mutex_lock(&irq_mapping_update_lock); 839 mutex_lock(&irq_mapping_update_lock);
764 840
765 irq = evtchn_to_irq[evtchn]; 841 irq = get_evtchn_to_irq(evtchn);
766 842
767 if (irq == -1) { 843 if (irq == -1) {
768 irq = xen_allocate_irq_dynamic(); 844 irq = xen_allocate_irq_dynamic();
@@ -852,7 +928,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
852 int port, rc = -ENOENT; 928 int port, rc = -ENOENT;
853 929
854 memset(&status, 0, sizeof(status)); 930 memset(&status, 0, sizeof(status));
855 for (port = 0; port <= NR_EVENT_CHANNELS; port++) { 931 for (port = 0; port < xen_evtchn_max_channels(); port++) {
856 status.dom = DOMID_SELF; 932 status.dom = DOMID_SELF;
857 status.port = port; 933 status.port = port;
858 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); 934 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
@@ -1022,7 +1098,7 @@ EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1022 1098
1023int evtchn_make_refcounted(unsigned int evtchn) 1099int evtchn_make_refcounted(unsigned int evtchn)
1024{ 1100{
1025 int irq = evtchn_to_irq[evtchn]; 1101 int irq = get_evtchn_to_irq(evtchn);
1026 struct irq_info *info; 1102 struct irq_info *info;
1027 1103
1028 if (irq == -1) 1104 if (irq == -1)
@@ -1047,12 +1123,12 @@ int evtchn_get(unsigned int evtchn)
1047 struct irq_info *info; 1123 struct irq_info *info;
1048 int err = -ENOENT; 1124 int err = -ENOENT;
1049 1125
1050 if (evtchn >= NR_EVENT_CHANNELS) 1126 if (evtchn >= xen_evtchn_max_channels())
1051 return -EINVAL; 1127 return -EINVAL;
1052 1128
1053 mutex_lock(&irq_mapping_update_lock); 1129 mutex_lock(&irq_mapping_update_lock);
1054 1130
1055 irq = evtchn_to_irq[evtchn]; 1131 irq = get_evtchn_to_irq(evtchn);
1056 if (irq == -1) 1132 if (irq == -1)
1057 goto done; 1133 goto done;
1058 1134
@@ -1076,7 +1152,7 @@ EXPORT_SYMBOL_GPL(evtchn_get);
1076 1152
1077void evtchn_put(unsigned int evtchn) 1153void evtchn_put(unsigned int evtchn)
1078{ 1154{
1079 int irq = evtchn_to_irq[evtchn]; 1155 int irq = get_evtchn_to_irq(evtchn);
1080 if (WARN_ON(irq == -1)) 1156 if (WARN_ON(irq == -1))
1081 return; 1157 return;
1082 unbind_from_irq(irq); 1158 unbind_from_irq(irq);
@@ -1163,7 +1239,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
1163 mutex_lock(&irq_mapping_update_lock); 1239 mutex_lock(&irq_mapping_update_lock);
1164 1240
1165 /* After resume the irq<->evtchn mappings are all cleared out */ 1241 /* After resume the irq<->evtchn mappings are all cleared out */
1166 BUG_ON(evtchn_to_irq[evtchn] != -1); 1242 BUG_ON(get_evtchn_to_irq(evtchn) != -1);
1167 /* Expect irq to have been bound before, 1243 /* Expect irq to have been bound before,
1168 so there should be a proper type */ 1244 so there should be a proper type */
1169 BUG_ON(info->type == IRQT_UNBOUND); 1245 BUG_ON(info->type == IRQT_UNBOUND);
@@ -1448,15 +1524,14 @@ void xen_irq_resume(void)
1448 struct irq_info *info; 1524 struct irq_info *info;
1449 1525
1450 /* New event-channel space is not 'live' yet. */ 1526 /* New event-channel space is not 'live' yet. */
1451 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 1527 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
1452 mask_evtchn(evtchn); 1528 mask_evtchn(evtchn);
1453 1529
1454 /* No IRQ <-> event-channel mappings. */ 1530 /* No IRQ <-> event-channel mappings. */
1455 list_for_each_entry(info, &xen_irq_list_head, list) 1531 list_for_each_entry(info, &xen_irq_list_head, list)
1456 info->evtchn = 0; /* zap event-channel binding */ 1532 info->evtchn = 0; /* zap event-channel binding */
1457 1533
1458 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) 1534 clear_evtchn_to_irq_all();
1459 evtchn_to_irq[evtchn] = -1;
1460 1535
1461 for_each_possible_cpu(cpu) { 1536 for_each_possible_cpu(cpu) {
1462 restore_cpu_virqs(cpu); 1537 restore_cpu_virqs(cpu);
@@ -1553,14 +1628,12 @@ void __init xen_init_IRQ(void)
1553 1628
1554 xen_evtchn_2l_init(); 1629 xen_evtchn_2l_init();
1555 1630
1556 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), 1631 evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
1557 GFP_KERNEL); 1632 sizeof(*evtchn_to_irq), GFP_KERNEL);
1558 BUG_ON(!evtchn_to_irq); 1633 BUG_ON(!evtchn_to_irq);
1559 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1560 evtchn_to_irq[i] = -1;
1561 1634
1562 /* No event channels are 'live' right now. */ 1635 /* No event channels are 'live' right now. */
1563 for (i = 0; i < NR_EVENT_CHANNELS; i++) 1636 for (i = 0; i < xen_evtchn_nr_channels(); i++)
1564 mask_evtchn(i); 1637 mask_evtchn(i);
1565 1638
1566 pirq_needs_eoi = pirq_needs_eoi_flag; 1639 pirq_needs_eoi = pirq_needs_eoi_flag;
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index dc9650265e04..a3d9aeceda1a 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -35,7 +35,7 @@ struct irq_info {
35 int refcnt; 35 int refcnt;
36 enum xen_irq_type type; /* type */ 36 enum xen_irq_type type; /* type */
37 unsigned irq; 37 unsigned irq;
38 unsigned short evtchn; /* event channel */ 38 unsigned int evtchn; /* event channel */
39 unsigned short cpu; /* cpu bound */ 39 unsigned short cpu; /* cpu bound */
40 40
41 union { 41 union {
@@ -55,6 +55,9 @@ struct irq_info {
55#define PIRQ_SHAREABLE (1 << 1) 55#define PIRQ_SHAREABLE (1 << 1)
56 56
57struct evtchn_ops { 57struct evtchn_ops {
58 unsigned (*max_channels)(void);
59 unsigned (*nr_channels)(void);
60
58 int (*setup)(struct irq_info *info); 61 int (*setup)(struct irq_info *info);
59 void (*bind_to_cpu)(struct irq_info *info, unsigned cpu); 62 void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
60 63
@@ -70,12 +73,23 @@ struct evtchn_ops {
70 73
71extern const struct evtchn_ops *evtchn_ops; 74extern const struct evtchn_ops *evtchn_ops;
72 75
73extern int *evtchn_to_irq; 76extern int **evtchn_to_irq;
77int get_evtchn_to_irq(unsigned int evtchn);
74 78
75struct irq_info *info_for_irq(unsigned irq); 79struct irq_info *info_for_irq(unsigned irq);
76unsigned cpu_from_irq(unsigned irq); 80unsigned cpu_from_irq(unsigned irq);
77unsigned cpu_from_evtchn(unsigned int evtchn); 81unsigned cpu_from_evtchn(unsigned int evtchn);
78 82
83static inline unsigned xen_evtchn_max_channels(void)
84{
85 return evtchn_ops->max_channels();
86}
87
88static inline unsigned xen_evtchn_nr_channels(void)
89{
90 return evtchn_ops->nr_channels();
91}
92
79/* 93/*
80 * Do any ABI specific setup for a bound event channel before it can 94 * Do any ABI specific setup for a bound event channel before it can
81 * be unmasked and used. 95 * be unmasked and used.