summaryrefslogtreecommitdiffstats
path: root/kernel/irq/irqdesc.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/irqdesc.c')
-rw-r--r--kernel/irq/irqdesc.c36
1 files changed, 26 insertions, 10 deletions
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 00bb0aeea1d0..948b50e78549 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -54,14 +54,25 @@ static void __init init_irq_default_affinity(void)
54#endif 54#endif
55 55
56#ifdef CONFIG_SMP 56#ifdef CONFIG_SMP
57static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) 57static int alloc_masks(struct irq_desc *desc, int node)
58{ 58{
59 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, 59 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
60 gfp, node)) 60 GFP_KERNEL, node))
61 return -ENOMEM; 61 return -ENOMEM;
62 62
63#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
64 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
65 GFP_KERNEL, node)) {
66 free_cpumask_var(desc->irq_common_data.affinity);
67 return -ENOMEM;
68 }
69#endif
70
63#ifdef CONFIG_GENERIC_PENDING_IRQ 71#ifdef CONFIG_GENERIC_PENDING_IRQ
64 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { 72 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
73#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
74 free_cpumask_var(desc->irq_common_data.effective_affinity);
75#endif
65 free_cpumask_var(desc->irq_common_data.affinity); 76 free_cpumask_var(desc->irq_common_data.affinity);
66 return -ENOMEM; 77 return -ENOMEM;
67 } 78 }
@@ -86,7 +97,7 @@ static void desc_smp_init(struct irq_desc *desc, int node,
86 97
87#else 98#else
88static inline int 99static inline int
89alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } 100alloc_masks(struct irq_desc *desc, int node) { return 0; }
90static inline void 101static inline void
91desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } 102desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
92#endif 103#endif
@@ -105,6 +116,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
105 desc->irq_data.chip_data = NULL; 116 desc->irq_data.chip_data = NULL;
106 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 117 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
107 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 118 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
119 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
108 desc->handle_irq = handle_bad_irq; 120 desc->handle_irq = handle_bad_irq;
109 desc->depth = 1; 121 desc->depth = 1;
110 desc->irq_count = 0; 122 desc->irq_count = 0;
@@ -324,6 +336,9 @@ static void free_masks(struct irq_desc *desc)
324 free_cpumask_var(desc->pending_mask); 336 free_cpumask_var(desc->pending_mask);
325#endif 337#endif
326 free_cpumask_var(desc->irq_common_data.affinity); 338 free_cpumask_var(desc->irq_common_data.affinity);
339#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
340 free_cpumask_var(desc->irq_common_data.effective_affinity);
341#endif
327} 342}
328#else 343#else
329static inline void free_masks(struct irq_desc *desc) { } 344static inline void free_masks(struct irq_desc *desc) { }
@@ -344,9 +359,8 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
344 struct module *owner) 359 struct module *owner)
345{ 360{
346 struct irq_desc *desc; 361 struct irq_desc *desc;
347 gfp_t gfp = GFP_KERNEL;
348 362
349 desc = kzalloc_node(sizeof(*desc), gfp, node); 363 desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
350 if (!desc) 364 if (!desc)
351 return NULL; 365 return NULL;
352 /* allocate based on nr_cpu_ids */ 366 /* allocate based on nr_cpu_ids */
@@ -354,7 +368,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
354 if (!desc->kstat_irqs) 368 if (!desc->kstat_irqs)
355 goto err_desc; 369 goto err_desc;
356 370
357 if (alloc_masks(desc, gfp, node)) 371 if (alloc_masks(desc, node))
358 goto err_kstat; 372 goto err_kstat;
359 373
360 raw_spin_lock_init(&desc->lock); 374 raw_spin_lock_init(&desc->lock);
@@ -394,6 +408,7 @@ static void free_desc(unsigned int irq)
394{ 408{
395 struct irq_desc *desc = irq_to_desc(irq); 409 struct irq_desc *desc = irq_to_desc(irq);
396 410
411 irq_remove_debugfs_entry(desc);
397 unregister_irq_proc(irq, desc); 412 unregister_irq_proc(irq, desc);
398 413
399 /* 414 /*
@@ -480,7 +495,8 @@ int __init early_irq_init(void)
480 495
481 /* Let arch update nr_irqs and return the nr of preallocated irqs */ 496 /* Let arch update nr_irqs and return the nr of preallocated irqs */
482 initcnt = arch_probe_nr_irqs(); 497 initcnt = arch_probe_nr_irqs();
483 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); 498 printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
499 NR_IRQS, nr_irqs, initcnt);
484 500
485 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) 501 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
486 nr_irqs = IRQ_BITMAP_BITS; 502 nr_irqs = IRQ_BITMAP_BITS;
@@ -516,14 +532,14 @@ int __init early_irq_init(void)
516 532
517 init_irq_default_affinity(); 533 init_irq_default_affinity();
518 534
519 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 535 printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
520 536
521 desc = irq_desc; 537 desc = irq_desc;
522 count = ARRAY_SIZE(irq_desc); 538 count = ARRAY_SIZE(irq_desc);
523 539
524 for (i = 0; i < count; i++) { 540 for (i = 0; i < count; i++) {
525 desc[i].kstat_irqs = alloc_percpu(unsigned int); 541 desc[i].kstat_irqs = alloc_percpu(unsigned int);
526 alloc_masks(&desc[i], GFP_KERNEL, node); 542 alloc_masks(&desc[i], node);
527 raw_spin_lock_init(&desc[i].lock); 543 raw_spin_lock_init(&desc[i].lock);
528 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 544 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
529 desc_set_defaults(i, &desc[i], node, NULL, NULL); 545 desc_set_defaults(i, &desc[i], node, NULL, NULL);