diff options
author | Ian Campbell <ian.campbell@citrix.com> | 2011-03-10 11:08:12 -0500 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-03-10 14:48:42 -0500 |
commit | ca62ce8cde36cde5a31af8f9bb23e6430700e437 (patch) | |
tree | 7009cf74736a3c97519707162d1ba119772ad6b3 /drivers/xen | |
parent | 6cb6537d34a146c8f3e232da9ffd79ae1265fed3 (diff) |
xen: events: dynamically allocate irq info structures
Removes nr_irq sized array allocation at start of day.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/events.c | 31 |
1 files changed, 16 insertions, 15 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index e119989ec15..002283e0174 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -107,7 +107,6 @@ struct irq_info | |||
107 | #define PIRQ_NEEDS_EOI (1 << 0) | 107 | #define PIRQ_NEEDS_EOI (1 << 0) |
108 | #define PIRQ_SHAREABLE (1 << 1) | 108 | #define PIRQ_SHAREABLE (1 << 1) |
109 | 109 | ||
110 | static struct irq_info *irq_info; | ||
111 | static int *pirq_to_irq; | 110 | static int *pirq_to_irq; |
112 | 111 | ||
113 | static int *evtchn_to_irq; | 112 | static int *evtchn_to_irq; |
@@ -125,7 +124,7 @@ static struct irq_chip xen_pirq_chip; | |||
125 | /* Get info for IRQ */ | 124 | /* Get info for IRQ */ |
126 | static struct irq_info *info_for_irq(unsigned irq) | 125 | static struct irq_info *info_for_irq(unsigned irq) |
127 | { | 126 | { |
128 | return &irq_info[irq]; | 127 | return get_irq_data(irq); |
129 | } | 128 | } |
130 | 129 | ||
131 | /* Constructors for packed IRQ information. */ | 130 | /* Constructors for packed IRQ information. */ |
@@ -309,7 +308,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
309 | clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))); | 308 | clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))); |
310 | set_bit(chn, per_cpu(cpu_evtchn_mask, cpu)); | 309 | set_bit(chn, per_cpu(cpu_evtchn_mask, cpu)); |
311 | 310 | ||
312 | irq_info[irq].cpu = cpu; | 311 | info_for_irq(irq)->cpu = cpu; |
313 | } | 312 | } |
314 | 313 | ||
315 | static void init_evtchn_cpu_bindings(void) | 314 | static void init_evtchn_cpu_bindings(void) |
@@ -328,7 +327,6 @@ static void init_evtchn_cpu_bindings(void) | |||
328 | for_each_possible_cpu(i) | 327 | for_each_possible_cpu(i) |
329 | memset(per_cpu(cpu_evtchn_mask, i), | 328 | memset(per_cpu(cpu_evtchn_mask, i), |
330 | (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); | 329 | (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); |
331 | |||
332 | } | 330 | } |
333 | 331 | ||
334 | static inline void clear_evtchn(int port) | 332 | static inline void clear_evtchn(int port) |
@@ -411,10 +409,14 @@ static void xen_irq_init(unsigned irq) | |||
411 | /* By default all event channels notify CPU#0. */ | 409 | /* By default all event channels notify CPU#0. */ |
412 | cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); | 410 | cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); |
413 | 411 | ||
414 | info = &irq_info[irq]; | 412 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
413 | if (info == NULL) | ||
414 | panic("Unable to allocate metadata for IRQ%d\n", irq); | ||
415 | 415 | ||
416 | info->type = IRQT_UNBOUND; | 416 | info->type = IRQT_UNBOUND; |
417 | 417 | ||
418 | set_irq_data(irq, info); | ||
419 | |||
418 | list_add_tail(&info->list, &xen_irq_list_head); | 420 | list_add_tail(&info->list, &xen_irq_list_head); |
419 | } | 421 | } |
420 | 422 | ||
@@ -481,12 +483,14 @@ static int xen_allocate_irq_gsi(unsigned gsi) | |||
481 | 483 | ||
482 | static void xen_free_irq(unsigned irq) | 484 | static void xen_free_irq(unsigned irq) |
483 | { | 485 | { |
484 | struct irq_info *info = &irq_info[irq]; | 486 | struct irq_info *info = get_irq_data(irq); |
485 | |||
486 | info->type = IRQT_UNBOUND; | ||
487 | 487 | ||
488 | list_del(&info->list); | 488 | list_del(&info->list); |
489 | 489 | ||
490 | set_irq_data(irq, NULL); | ||
491 | |||
492 | kfree(info); | ||
493 | |||
490 | /* Legacy IRQ descriptors are managed by the arch. */ | 494 | /* Legacy IRQ descriptors are managed by the arch. */ |
491 | if (irq < NR_IRQS_LEGACY) | 495 | if (irq < NR_IRQS_LEGACY) |
492 | return; | 496 | return; |
@@ -649,10 +653,9 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, | |||
649 | 653 | ||
650 | spin_lock(&irq_mapping_update_lock); | 654 | spin_lock(&irq_mapping_update_lock); |
651 | 655 | ||
652 | if ((pirq > nr_irqs) || (gsi > nr_irqs)) { | 656 | if (pirq > nr_irqs) { |
653 | printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n", | 657 | printk(KERN_WARNING "xen_map_pirq_gsi: pirq %d > nr_irqs %d!\n", |
654 | pirq > nr_irqs ? "pirq" :"", | 658 | pirq, nr_irqs); |
655 | gsi > nr_irqs ? "gsi" : ""); | ||
656 | goto out; | 659 | goto out; |
657 | } | 660 | } |
658 | 661 | ||
@@ -889,7 +892,7 @@ static void unbind_from_irq(unsigned int irq) | |||
889 | evtchn_to_irq[evtchn] = -1; | 892 | evtchn_to_irq[evtchn] = -1; |
890 | } | 893 | } |
891 | 894 | ||
892 | BUG_ON(irq_info[irq].type == IRQT_UNBOUND); | 895 | BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); |
893 | 896 | ||
894 | xen_free_irq(irq); | 897 | xen_free_irq(irq); |
895 | 898 | ||
@@ -1509,8 +1512,6 @@ void __init xen_init_IRQ(void) | |||
1509 | { | 1512 | { |
1510 | int i; | 1513 | int i; |
1511 | 1514 | ||
1512 | irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL); | ||
1513 | |||
1514 | /* We are using nr_irqs as the maximum number of pirq available but | 1515 | /* We are using nr_irqs as the maximum number of pirq available but |
1515 | * that number is actually chosen by Xen and we don't know exactly | 1516 | * that number is actually chosen by Xen and we don't know exactly |
1516 | * what it is. Be careful choosing high pirq numbers. */ | 1517 | * what it is. Be careful choosing high pirq numbers. */ |