diff options
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r-- | drivers/xen/events.c | 227 |
1 files changed, 156 insertions, 71 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 3141e149d595..30963af5dba0 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #include <asm/ptrace.h> | 31 | #include <asm/ptrace.h> |
32 | #include <asm/irq.h> | 32 | #include <asm/irq.h> |
33 | #include <asm/idle.h> | ||
33 | #include <asm/sync_bitops.h> | 34 | #include <asm/sync_bitops.h> |
34 | #include <asm/xen/hypercall.h> | 35 | #include <asm/xen/hypercall.h> |
35 | #include <asm/xen/hypervisor.h> | 36 | #include <asm/xen/hypervisor.h> |
@@ -51,27 +52,43 @@ static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; | |||
51 | /* IRQ <-> IPI mapping */ | 52 | /* IRQ <-> IPI mapping */ |
52 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; | 53 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; |
53 | 54 | ||
54 | /* Packed IRQ information: binding type, sub-type index, and event channel. */ | 55 | /* Interrupt types. */ |
55 | struct packed_irq | 56 | enum xen_irq_type { |
56 | { | 57 | IRQT_UNBOUND = 0, |
57 | unsigned short evtchn; | ||
58 | unsigned char index; | ||
59 | unsigned char type; | ||
60 | }; | ||
61 | |||
62 | static struct packed_irq irq_info[NR_IRQS]; | ||
63 | |||
64 | /* Binding types. */ | ||
65 | enum { | ||
66 | IRQT_UNBOUND, | ||
67 | IRQT_PIRQ, | 58 | IRQT_PIRQ, |
68 | IRQT_VIRQ, | 59 | IRQT_VIRQ, |
69 | IRQT_IPI, | 60 | IRQT_IPI, |
70 | IRQT_EVTCHN | 61 | IRQT_EVTCHN |
71 | }; | 62 | }; |
72 | 63 | ||
73 | /* Convenient shorthand for packed representation of an unbound IRQ. */ | 64 | /* |
74 | #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0) | 65 | * Packed IRQ information: |
66 | * type - enum xen_irq_type | ||
67 | * event channel - irq->event channel mapping | ||
68 | * cpu - cpu this event channel is bound to | ||
69 | * index - type-specific information: | ||
70 | * PIRQ - vector, with MSB being "needs EIO" | ||
71 | * VIRQ - virq number | ||
72 | * IPI - IPI vector | ||
73 | * EVTCHN - | ||
74 | */ | ||
75 | struct irq_info | ||
76 | { | ||
77 | enum xen_irq_type type; /* type */ | ||
78 | unsigned short evtchn; /* event channel */ | ||
79 | unsigned short cpu; /* cpu bound */ | ||
80 | |||
81 | union { | ||
82 | unsigned short virq; | ||
83 | enum ipi_vector ipi; | ||
84 | struct { | ||
85 | unsigned short gsi; | ||
86 | unsigned short vector; | ||
87 | } pirq; | ||
88 | } u; | ||
89 | }; | ||
90 | |||
91 | static struct irq_info irq_info[NR_IRQS]; | ||
75 | 92 | ||
76 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { | 93 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { |
77 | [0 ... NR_EVENT_CHANNELS-1] = -1 | 94 | [0 ... NR_EVENT_CHANNELS-1] = -1 |
@@ -84,10 +101,6 @@ static inline unsigned long *cpu_evtchn_mask(int cpu) | |||
84 | { | 101 | { |
85 | return cpu_evtchn_mask_p[cpu].bits; | 102 | return cpu_evtchn_mask_p[cpu].bits; |
86 | } | 103 | } |
87 | static u8 cpu_evtchn[NR_EVENT_CHANNELS]; | ||
88 | |||
89 | /* Reference counts for bindings to IRQs. */ | ||
90 | static int irq_bindcount[NR_IRQS]; | ||
91 | 104 | ||
92 | /* Xen will never allocate port zero for any purpose. */ | 105 | /* Xen will never allocate port zero for any purpose. */ |
93 | #define VALID_EVTCHN(chn) ((chn) != 0) | 106 | #define VALID_EVTCHN(chn) ((chn) != 0) |
@@ -95,27 +108,108 @@ static int irq_bindcount[NR_IRQS]; | |||
95 | static struct irq_chip xen_dynamic_chip; | 108 | static struct irq_chip xen_dynamic_chip; |
96 | 109 | ||
97 | /* Constructor for packed IRQ information. */ | 110 | /* Constructor for packed IRQ information. */ |
98 | static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn) | 111 | static struct irq_info mk_unbound_info(void) |
112 | { | ||
113 | return (struct irq_info) { .type = IRQT_UNBOUND }; | ||
114 | } | ||
115 | |||
116 | static struct irq_info mk_evtchn_info(unsigned short evtchn) | ||
117 | { | ||
118 | return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, | ||
119 | .cpu = 0 }; | ||
120 | } | ||
121 | |||
122 | static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) | ||
123 | { | ||
124 | return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, | ||
125 | .cpu = 0, .u.ipi = ipi }; | ||
126 | } | ||
127 | |||
128 | static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) | ||
99 | { | 129 | { |
100 | return (struct packed_irq) { evtchn, index, type }; | 130 | return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, |
131 | .cpu = 0, .u.virq = virq }; | ||
132 | } | ||
133 | |||
134 | static struct irq_info mk_pirq_info(unsigned short evtchn, | ||
135 | unsigned short gsi, unsigned short vector) | ||
136 | { | ||
137 | return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, | ||
138 | .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } }; | ||
101 | } | 139 | } |
102 | 140 | ||
103 | /* | 141 | /* |
104 | * Accessors for packed IRQ information. | 142 | * Accessors for packed IRQ information. |
105 | */ | 143 | */ |
106 | static inline unsigned int evtchn_from_irq(int irq) | 144 | static struct irq_info *info_for_irq(unsigned irq) |
107 | { | 145 | { |
108 | return irq_info[irq].evtchn; | 146 | return &irq_info[irq]; |
109 | } | 147 | } |
110 | 148 | ||
111 | static inline unsigned int index_from_irq(int irq) | 149 | static unsigned int evtchn_from_irq(unsigned irq) |
112 | { | 150 | { |
113 | return irq_info[irq].index; | 151 | return info_for_irq(irq)->evtchn; |
114 | } | 152 | } |
115 | 153 | ||
116 | static inline unsigned int type_from_irq(int irq) | 154 | static enum ipi_vector ipi_from_irq(unsigned irq) |
117 | { | 155 | { |
118 | return irq_info[irq].type; | 156 | struct irq_info *info = info_for_irq(irq); |
157 | |||
158 | BUG_ON(info == NULL); | ||
159 | BUG_ON(info->type != IRQT_IPI); | ||
160 | |||
161 | return info->u.ipi; | ||
162 | } | ||
163 | |||
164 | static unsigned virq_from_irq(unsigned irq) | ||
165 | { | ||
166 | struct irq_info *info = info_for_irq(irq); | ||
167 | |||
168 | BUG_ON(info == NULL); | ||
169 | BUG_ON(info->type != IRQT_VIRQ); | ||
170 | |||
171 | return info->u.virq; | ||
172 | } | ||
173 | |||
174 | static unsigned gsi_from_irq(unsigned irq) | ||
175 | { | ||
176 | struct irq_info *info = info_for_irq(irq); | ||
177 | |||
178 | BUG_ON(info == NULL); | ||
179 | BUG_ON(info->type != IRQT_PIRQ); | ||
180 | |||
181 | return info->u.pirq.gsi; | ||
182 | } | ||
183 | |||
184 | static unsigned vector_from_irq(unsigned irq) | ||
185 | { | ||
186 | struct irq_info *info = info_for_irq(irq); | ||
187 | |||
188 | BUG_ON(info == NULL); | ||
189 | BUG_ON(info->type != IRQT_PIRQ); | ||
190 | |||
191 | return info->u.pirq.vector; | ||
192 | } | ||
193 | |||
194 | static enum xen_irq_type type_from_irq(unsigned irq) | ||
195 | { | ||
196 | return info_for_irq(irq)->type; | ||
197 | } | ||
198 | |||
199 | static unsigned cpu_from_irq(unsigned irq) | ||
200 | { | ||
201 | return info_for_irq(irq)->cpu; | ||
202 | } | ||
203 | |||
204 | static unsigned int cpu_from_evtchn(unsigned int evtchn) | ||
205 | { | ||
206 | int irq = evtchn_to_irq[evtchn]; | ||
207 | unsigned ret = 0; | ||
208 | |||
209 | if (irq != -1) | ||
210 | ret = cpu_from_irq(irq); | ||
211 | |||
212 | return ret; | ||
119 | } | 213 | } |
120 | 214 | ||
121 | static inline unsigned long active_evtchns(unsigned int cpu, | 215 | static inline unsigned long active_evtchns(unsigned int cpu, |
@@ -136,10 +230,10 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
136 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); | 230 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); |
137 | #endif | 231 | #endif |
138 | 232 | ||
139 | __clear_bit(chn, cpu_evtchn_mask(cpu_evtchn[chn])); | 233 | __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
140 | __set_bit(chn, cpu_evtchn_mask(cpu)); | 234 | __set_bit(chn, cpu_evtchn_mask(cpu)); |
141 | 235 | ||
142 | cpu_evtchn[chn] = cpu; | 236 | irq_info[irq].cpu = cpu; |
143 | } | 237 | } |
144 | 238 | ||
145 | static void init_evtchn_cpu_bindings(void) | 239 | static void init_evtchn_cpu_bindings(void) |
@@ -154,15 +248,9 @@ static void init_evtchn_cpu_bindings(void) | |||
154 | } | 248 | } |
155 | #endif | 249 | #endif |
156 | 250 | ||
157 | memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); | ||
158 | memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); | 251 | memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); |
159 | } | 252 | } |
160 | 253 | ||
161 | static inline unsigned int cpu_from_evtchn(unsigned int evtchn) | ||
162 | { | ||
163 | return cpu_evtchn[evtchn]; | ||
164 | } | ||
165 | |||
166 | static inline void clear_evtchn(int port) | 254 | static inline void clear_evtchn(int port) |
167 | { | 255 | { |
168 | struct shared_info *s = HYPERVISOR_shared_info; | 256 | struct shared_info *s = HYPERVISOR_shared_info; |
@@ -240,9 +328,8 @@ static int find_unbound_irq(void) | |||
240 | int irq; | 328 | int irq; |
241 | struct irq_desc *desc; | 329 | struct irq_desc *desc; |
242 | 330 | ||
243 | /* Only allocate from dynirq range */ | ||
244 | for (irq = 0; irq < nr_irqs; irq++) | 331 | for (irq = 0; irq < nr_irqs; irq++) |
245 | if (irq_bindcount[irq] == 0) | 332 | if (irq_info[irq].type == IRQT_UNBOUND) |
246 | break; | 333 | break; |
247 | 334 | ||
248 | if (irq == nr_irqs) | 335 | if (irq == nr_irqs) |
@@ -252,6 +339,8 @@ static int find_unbound_irq(void) | |||
252 | if (WARN_ON(desc == NULL)) | 339 | if (WARN_ON(desc == NULL)) |
253 | return -1; | 340 | return -1; |
254 | 341 | ||
342 | dynamic_irq_init(irq); | ||
343 | |||
255 | return irq; | 344 | return irq; |
256 | } | 345 | } |
257 | 346 | ||
@@ -266,16 +355,13 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
266 | if (irq == -1) { | 355 | if (irq == -1) { |
267 | irq = find_unbound_irq(); | 356 | irq = find_unbound_irq(); |
268 | 357 | ||
269 | dynamic_irq_init(irq); | ||
270 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 358 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
271 | handle_level_irq, "event"); | 359 | handle_level_irq, "event"); |
272 | 360 | ||
273 | evtchn_to_irq[evtchn] = irq; | 361 | evtchn_to_irq[evtchn] = irq; |
274 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | 362 | irq_info[irq] = mk_evtchn_info(evtchn); |
275 | } | 363 | } |
276 | 364 | ||
277 | irq_bindcount[irq]++; | ||
278 | |||
279 | spin_unlock(&irq_mapping_update_lock); | 365 | spin_unlock(&irq_mapping_update_lock); |
280 | 366 | ||
281 | return irq; | 367 | return irq; |
@@ -290,12 +376,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
290 | spin_lock(&irq_mapping_update_lock); | 376 | spin_lock(&irq_mapping_update_lock); |
291 | 377 | ||
292 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | 378 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
379 | |||
293 | if (irq == -1) { | 380 | if (irq == -1) { |
294 | irq = find_unbound_irq(); | 381 | irq = find_unbound_irq(); |
295 | if (irq < 0) | 382 | if (irq < 0) |
296 | goto out; | 383 | goto out; |
297 | 384 | ||
298 | dynamic_irq_init(irq); | ||
299 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 385 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
300 | handle_level_irq, "ipi"); | 386 | handle_level_irq, "ipi"); |
301 | 387 | ||
@@ -306,15 +392,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
306 | evtchn = bind_ipi.port; | 392 | evtchn = bind_ipi.port; |
307 | 393 | ||
308 | evtchn_to_irq[evtchn] = irq; | 394 | evtchn_to_irq[evtchn] = irq; |
309 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | 395 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
310 | |||
311 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; | 396 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; |
312 | 397 | ||
313 | bind_evtchn_to_cpu(evtchn, cpu); | 398 | bind_evtchn_to_cpu(evtchn, cpu); |
314 | } | 399 | } |
315 | 400 | ||
316 | irq_bindcount[irq]++; | ||
317 | |||
318 | out: | 401 | out: |
319 | spin_unlock(&irq_mapping_update_lock); | 402 | spin_unlock(&irq_mapping_update_lock); |
320 | return irq; | 403 | return irq; |
@@ -340,20 +423,17 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
340 | 423 | ||
341 | irq = find_unbound_irq(); | 424 | irq = find_unbound_irq(); |
342 | 425 | ||
343 | dynamic_irq_init(irq); | ||
344 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 426 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
345 | handle_level_irq, "virq"); | 427 | handle_level_irq, "virq"); |
346 | 428 | ||
347 | evtchn_to_irq[evtchn] = irq; | 429 | evtchn_to_irq[evtchn] = irq; |
348 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | 430 | irq_info[irq] = mk_virq_info(evtchn, virq); |
349 | 431 | ||
350 | per_cpu(virq_to_irq, cpu)[virq] = irq; | 432 | per_cpu(virq_to_irq, cpu)[virq] = irq; |
351 | 433 | ||
352 | bind_evtchn_to_cpu(evtchn, cpu); | 434 | bind_evtchn_to_cpu(evtchn, cpu); |
353 | } | 435 | } |
354 | 436 | ||
355 | irq_bindcount[irq]++; | ||
356 | |||
357 | spin_unlock(&irq_mapping_update_lock); | 437 | spin_unlock(&irq_mapping_update_lock); |
358 | 438 | ||
359 | return irq; | 439 | return irq; |
@@ -366,7 +446,7 @@ static void unbind_from_irq(unsigned int irq) | |||
366 | 446 | ||
367 | spin_lock(&irq_mapping_update_lock); | 447 | spin_lock(&irq_mapping_update_lock); |
368 | 448 | ||
369 | if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { | 449 | if (VALID_EVTCHN(evtchn)) { |
370 | close.port = evtchn; | 450 | close.port = evtchn; |
371 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | 451 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) |
372 | BUG(); | 452 | BUG(); |
@@ -374,11 +454,11 @@ static void unbind_from_irq(unsigned int irq) | |||
374 | switch (type_from_irq(irq)) { | 454 | switch (type_from_irq(irq)) { |
375 | case IRQT_VIRQ: | 455 | case IRQT_VIRQ: |
376 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | 456 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) |
377 | [index_from_irq(irq)] = -1; | 457 | [virq_from_irq(irq)] = -1; |
378 | break; | 458 | break; |
379 | case IRQT_IPI: | 459 | case IRQT_IPI: |
380 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) | 460 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) |
381 | [index_from_irq(irq)] = -1; | 461 | [ipi_from_irq(irq)] = -1; |
382 | break; | 462 | break; |
383 | default: | 463 | default: |
384 | break; | 464 | break; |
@@ -388,7 +468,7 @@ static void unbind_from_irq(unsigned int irq) | |||
388 | bind_evtchn_to_cpu(evtchn, 0); | 468 | bind_evtchn_to_cpu(evtchn, 0); |
389 | 469 | ||
390 | evtchn_to_irq[evtchn] = -1; | 470 | evtchn_to_irq[evtchn] = -1; |
391 | irq_info[irq] = IRQ_UNBOUND; | 471 | irq_info[irq] = mk_unbound_info(); |
392 | 472 | ||
393 | dynamic_irq_cleanup(irq); | 473 | dynamic_irq_cleanup(irq); |
394 | } | 474 | } |
@@ -506,8 +586,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
506 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { | 586 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { |
507 | if (sync_test_bit(i, sh->evtchn_pending)) { | 587 | if (sync_test_bit(i, sh->evtchn_pending)) { |
508 | printk(" %d: event %d -> irq %d\n", | 588 | printk(" %d: event %d -> irq %d\n", |
509 | cpu_evtchn[i], i, | 589 | cpu_from_evtchn(i), i, |
510 | evtchn_to_irq[i]); | 590 | evtchn_to_irq[i]); |
511 | } | 591 | } |
512 | } | 592 | } |
513 | 593 | ||
@@ -516,7 +596,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
516 | return IRQ_HANDLED; | 596 | return IRQ_HANDLED; |
517 | } | 597 | } |
518 | 598 | ||
519 | |||
520 | /* | 599 | /* |
521 | * Search the CPUs pending events bitmasks. For each one found, map | 600 | * Search the CPUs pending events bitmasks. For each one found, map |
522 | * the event number to an irq, and feed it into do_IRQ() for | 601 | * the event number to an irq, and feed it into do_IRQ() for |
@@ -529,11 +608,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
529 | void xen_evtchn_do_upcall(struct pt_regs *regs) | 608 | void xen_evtchn_do_upcall(struct pt_regs *regs) |
530 | { | 609 | { |
531 | int cpu = get_cpu(); | 610 | int cpu = get_cpu(); |
611 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
532 | struct shared_info *s = HYPERVISOR_shared_info; | 612 | struct shared_info *s = HYPERVISOR_shared_info; |
533 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | 613 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); |
534 | static DEFINE_PER_CPU(unsigned, nesting_count); | 614 | static DEFINE_PER_CPU(unsigned, nesting_count); |
535 | unsigned count; | 615 | unsigned count; |
536 | 616 | ||
617 | exit_idle(); | ||
618 | irq_enter(); | ||
619 | |||
537 | do { | 620 | do { |
538 | unsigned long pending_words; | 621 | unsigned long pending_words; |
539 | 622 | ||
@@ -558,7 +641,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) | |||
558 | int irq = evtchn_to_irq[port]; | 641 | int irq = evtchn_to_irq[port]; |
559 | 642 | ||
560 | if (irq != -1) | 643 | if (irq != -1) |
561 | xen_do_IRQ(irq, regs); | 644 | handle_irq(irq, regs); |
562 | } | 645 | } |
563 | } | 646 | } |
564 | 647 | ||
@@ -569,12 +652,17 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) | |||
569 | } while(count != 1); | 652 | } while(count != 1); |
570 | 653 | ||
571 | out: | 654 | out: |
655 | irq_exit(); | ||
656 | set_irq_regs(old_regs); | ||
657 | |||
572 | put_cpu(); | 658 | put_cpu(); |
573 | } | 659 | } |
574 | 660 | ||
575 | /* Rebind a new event channel to an existing irq. */ | 661 | /* Rebind a new event channel to an existing irq. */ |
576 | void rebind_evtchn_irq(int evtchn, int irq) | 662 | void rebind_evtchn_irq(int evtchn, int irq) |
577 | { | 663 | { |
664 | struct irq_info *info = info_for_irq(irq); | ||
665 | |||
578 | /* Make sure the irq is masked, since the new event channel | 666 | /* Make sure the irq is masked, since the new event channel |
579 | will also be masked. */ | 667 | will also be masked. */ |
580 | disable_irq(irq); | 668 | disable_irq(irq); |
@@ -584,11 +672,11 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
584 | /* After resume the irq<->evtchn mappings are all cleared out */ | 672 | /* After resume the irq<->evtchn mappings are all cleared out */ |
585 | BUG_ON(evtchn_to_irq[evtchn] != -1); | 673 | BUG_ON(evtchn_to_irq[evtchn] != -1); |
586 | /* Expect irq to have been bound before, | 674 | /* Expect irq to have been bound before, |
587 | so the bindcount should be non-0 */ | 675 | so there should be a proper type */ |
588 | BUG_ON(irq_bindcount[irq] == 0); | 676 | BUG_ON(info->type == IRQT_UNBOUND); |
589 | 677 | ||
590 | evtchn_to_irq[evtchn] = irq; | 678 | evtchn_to_irq[evtchn] = irq; |
591 | irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn); | 679 | irq_info[irq] = mk_evtchn_info(evtchn); |
592 | 680 | ||
593 | spin_unlock(&irq_mapping_update_lock); | 681 | spin_unlock(&irq_mapping_update_lock); |
594 | 682 | ||
@@ -698,8 +786,7 @@ static void restore_cpu_virqs(unsigned int cpu) | |||
698 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) | 786 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) |
699 | continue; | 787 | continue; |
700 | 788 | ||
701 | BUG_ON(irq_info[irq].type != IRQT_VIRQ); | 789 | BUG_ON(virq_from_irq(irq) != virq); |
702 | BUG_ON(irq_info[irq].index != virq); | ||
703 | 790 | ||
704 | /* Get a new binding from Xen. */ | 791 | /* Get a new binding from Xen. */ |
705 | bind_virq.virq = virq; | 792 | bind_virq.virq = virq; |
@@ -711,7 +798,7 @@ static void restore_cpu_virqs(unsigned int cpu) | |||
711 | 798 | ||
712 | /* Record the new mapping. */ | 799 | /* Record the new mapping. */ |
713 | evtchn_to_irq[evtchn] = irq; | 800 | evtchn_to_irq[evtchn] = irq; |
714 | irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); | 801 | irq_info[irq] = mk_virq_info(evtchn, virq); |
715 | bind_evtchn_to_cpu(evtchn, cpu); | 802 | bind_evtchn_to_cpu(evtchn, cpu); |
716 | 803 | ||
717 | /* Ready for use. */ | 804 | /* Ready for use. */ |
@@ -728,8 +815,7 @@ static void restore_cpu_ipis(unsigned int cpu) | |||
728 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) | 815 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) |
729 | continue; | 816 | continue; |
730 | 817 | ||
731 | BUG_ON(irq_info[irq].type != IRQT_IPI); | 818 | BUG_ON(ipi_from_irq(irq) != ipi); |
732 | BUG_ON(irq_info[irq].index != ipi); | ||
733 | 819 | ||
734 | /* Get a new binding from Xen. */ | 820 | /* Get a new binding from Xen. */ |
735 | bind_ipi.vcpu = cpu; | 821 | bind_ipi.vcpu = cpu; |
@@ -740,7 +826,7 @@ static void restore_cpu_ipis(unsigned int cpu) | |||
740 | 826 | ||
741 | /* Record the new mapping. */ | 827 | /* Record the new mapping. */ |
742 | evtchn_to_irq[evtchn] = irq; | 828 | evtchn_to_irq[evtchn] = irq; |
743 | irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); | 829 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
744 | bind_evtchn_to_cpu(evtchn, cpu); | 830 | bind_evtchn_to_cpu(evtchn, cpu); |
745 | 831 | ||
746 | /* Ready for use. */ | 832 | /* Ready for use. */ |
@@ -820,8 +906,11 @@ void xen_irq_resume(void) | |||
820 | 906 | ||
821 | static struct irq_chip xen_dynamic_chip __read_mostly = { | 907 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
822 | .name = "xen-dyn", | 908 | .name = "xen-dyn", |
909 | |||
910 | .disable = disable_dynirq, | ||
823 | .mask = disable_dynirq, | 911 | .mask = disable_dynirq, |
824 | .unmask = enable_dynirq, | 912 | .unmask = enable_dynirq, |
913 | |||
825 | .ack = ack_dynirq, | 914 | .ack = ack_dynirq, |
826 | .set_affinity = set_affinity_irq, | 915 | .set_affinity = set_affinity_irq, |
827 | .retrigger = retrigger_dynirq, | 916 | .retrigger = retrigger_dynirq, |
@@ -841,9 +930,5 @@ void __init xen_init_IRQ(void) | |||
841 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | 930 | for (i = 0; i < NR_EVENT_CHANNELS; i++) |
842 | mask_evtchn(i); | 931 | mask_evtchn(i); |
843 | 932 | ||
844 | /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ | ||
845 | for (i = 0; i < nr_irqs; i++) | ||
846 | irq_bindcount[i] = 0; | ||
847 | |||
848 | irq_ctx_init(smp_processor_id()); | 933 | irq_ctx_init(smp_processor_id()); |
849 | } | 934 | } |