aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/events.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2009-02-06 17:09:45 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-09 06:18:00 -0500
commitd77bbd4db475e2edc78edb7f94a258159c140b54 (patch)
tree8dfa75977211479a73faac5371a548a3e521bd1e /drivers/xen/events.c
parentced40d0f3e8833bb8d7d8e2cbfac7da0bf7008c4 (diff)
xen: remove irq bindcount
There should be no need for us to maintain our own bind count for irqs, since the surrounding irq system should keep track of shared irqs for us. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r--drivers/xen/events.c26
1 files changed, 7 insertions, 19 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0541e07d4f67..459121c53251 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -54,7 +54,7 @@ static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -
54 54
55/* Interrupt types. */ 55/* Interrupt types. */
56enum xen_irq_type { 56enum xen_irq_type {
57 IRQT_UNBOUND, 57 IRQT_UNBOUND = 0,
58 IRQT_PIRQ, 58 IRQT_PIRQ,
59 IRQT_VIRQ, 59 IRQT_VIRQ,
60 IRQT_IPI, 60 IRQT_IPI,
@@ -102,9 +102,6 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
102 return cpu_evtchn_mask_p[cpu].bits; 102 return cpu_evtchn_mask_p[cpu].bits;
103} 103}
104 104
105/* Reference counts for bindings to IRQs. */
106static int irq_bindcount[NR_IRQS];
107
108/* Xen will never allocate port zero for any purpose. */ 105/* Xen will never allocate port zero for any purpose. */
109#define VALID_EVTCHN(chn) ((chn) != 0) 106#define VALID_EVTCHN(chn) ((chn) != 0)
110 107
@@ -330,9 +327,8 @@ static int find_unbound_irq(void)
330 int irq; 327 int irq;
331 struct irq_desc *desc; 328 struct irq_desc *desc;
332 329
333 /* Only allocate from dynirq range */
334 for (irq = 0; irq < nr_irqs; irq++) 330 for (irq = 0; irq < nr_irqs; irq++)
335 if (irq_bindcount[irq] == 0) 331 if (irq_info[irq].type == IRQT_UNBOUND)
336 break; 332 break;
337 333
338 if (irq == nr_irqs) 334 if (irq == nr_irqs)
@@ -365,8 +361,6 @@ int bind_evtchn_to_irq(unsigned int evtchn)
365 irq_info[irq] = mk_evtchn_info(evtchn); 361 irq_info[irq] = mk_evtchn_info(evtchn);
366 } 362 }
367 363
368 irq_bindcount[irq]++;
369
370 spin_unlock(&irq_mapping_update_lock); 364 spin_unlock(&irq_mapping_update_lock);
371 365
372 return irq; 366 return irq;
@@ -403,8 +397,6 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
403 bind_evtchn_to_cpu(evtchn, cpu); 397 bind_evtchn_to_cpu(evtchn, cpu);
404 } 398 }
405 399
406 irq_bindcount[irq]++;
407
408 out: 400 out:
409 spin_unlock(&irq_mapping_update_lock); 401 spin_unlock(&irq_mapping_update_lock);
410 return irq; 402 return irq;
@@ -441,8 +433,6 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
441 bind_evtchn_to_cpu(evtchn, cpu); 433 bind_evtchn_to_cpu(evtchn, cpu);
442 } 434 }
443 435
444 irq_bindcount[irq]++;
445
446 spin_unlock(&irq_mapping_update_lock); 436 spin_unlock(&irq_mapping_update_lock);
447 437
448 return irq; 438 return irq;
@@ -455,7 +445,7 @@ static void unbind_from_irq(unsigned int irq)
455 445
456 spin_lock(&irq_mapping_update_lock); 446 spin_lock(&irq_mapping_update_lock);
457 447
458 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { 448 if (VALID_EVTCHN(evtchn)) {
459 close.port = evtchn; 449 close.port = evtchn;
460 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) 450 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
461 BUG(); 451 BUG();
@@ -681,6 +671,8 @@ out:
681/* Rebind a new event channel to an existing irq. */ 671/* Rebind a new event channel to an existing irq. */
682void rebind_evtchn_irq(int evtchn, int irq) 672void rebind_evtchn_irq(int evtchn, int irq)
683{ 673{
674 struct irq_info *info = info_for_irq(irq);
675
684 /* Make sure the irq is masked, since the new event channel 676 /* Make sure the irq is masked, since the new event channel
685 will also be masked. */ 677 will also be masked. */
686 disable_irq(irq); 678 disable_irq(irq);
@@ -690,8 +682,8 @@ void rebind_evtchn_irq(int evtchn, int irq)
690 /* After resume the irq<->evtchn mappings are all cleared out */ 682 /* After resume the irq<->evtchn mappings are all cleared out */
691 BUG_ON(evtchn_to_irq[evtchn] != -1); 683 BUG_ON(evtchn_to_irq[evtchn] != -1);
692 /* Expect irq to have been bound before, 684 /* Expect irq to have been bound before,
693 so the bindcount should be non-0 */ 685 so there should be a proper type */
694 BUG_ON(irq_bindcount[irq] == 0); 686 BUG_ON(info->type == IRQT_UNBOUND);
695 687
696 evtchn_to_irq[evtchn] = irq; 688 evtchn_to_irq[evtchn] = irq;
697 irq_info[irq] = mk_evtchn_info(evtchn); 689 irq_info[irq] = mk_evtchn_info(evtchn);
@@ -948,9 +940,5 @@ void __init xen_init_IRQ(void)
948 for (i = 0; i < NR_EVENT_CHANNELS; i++) 940 for (i = 0; i < NR_EVENT_CHANNELS; i++)
949 mask_evtchn(i); 941 mask_evtchn(i);
950 942
951 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
952 for (i = 0; i < nr_irqs; i++)
953 irq_bindcount[i] = 0;
954
955 irq_ctx_init(smp_processor_id()); 943 irq_ctx_init(smp_processor_id());
956} 944}