aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/chip.c8
-rw-r--r--kernel/irq/handle.c16
-rw-r--r--kernel/irq/manage.c22
-rw-r--r--kernel/irq/numa_migrate.c7
-rw-r--r--kernel/irq/spurious.c14
5 files changed, 48 insertions, 19 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 1310856cb22b..03d0bed2b8d9 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -291,7 +291,8 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
291 desc->chip->mask_ack(irq); 291 desc->chip->mask_ack(irq);
292 else { 292 else {
293 desc->chip->mask(irq); 293 desc->chip->mask(irq);
294 desc->chip->ack(irq); 294 if (desc->chip->ack)
295 desc->chip->ack(irq);
295 } 296 }
296} 297}
297 298
@@ -384,6 +385,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
384out_unlock: 385out_unlock:
385 spin_unlock(&desc->lock); 386 spin_unlock(&desc->lock);
386} 387}
388EXPORT_SYMBOL_GPL(handle_level_irq);
387 389
388/** 390/**
389 * handle_fasteoi_irq - irq handler for transparent controllers 391 * handle_fasteoi_irq - irq handler for transparent controllers
@@ -476,7 +478,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
476 kstat_incr_irqs_this_cpu(irq, desc); 478 kstat_incr_irqs_this_cpu(irq, desc);
477 479
478 /* Start handling the irq */ 480 /* Start handling the irq */
479 desc->chip->ack(irq); 481 if (desc->chip->ack)
482 desc->chip->ack(irq);
480 desc = irq_remap_to_desc(irq, desc); 483 desc = irq_remap_to_desc(irq, desc);
481 484
482 /* Mark the IRQ currently in progress.*/ 485 /* Mark the IRQ currently in progress.*/
@@ -594,6 +597,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
594 } 597 }
595 spin_unlock_irqrestore(&desc->lock, flags); 598 spin_unlock_irqrestore(&desc->lock, flags);
596} 599}
600EXPORT_SYMBOL_GPL(__set_irq_handler);
597 601
598void 602void
599set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, 603set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 49d642b62c64..759b8b04d294 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -39,6 +39,18 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
39 ack_bad_irq(irq); 39 ack_bad_irq(irq);
40} 40}
41 41
42#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
43static void __init init_irq_default_affinity(void)
44{
45 alloc_bootmem_cpumask_var(&irq_default_affinity);
46 cpumask_setall(irq_default_affinity);
47}
48#else
49static void __init init_irq_default_affinity(void)
50{
51}
52#endif
53
42/* 54/*
43 * Linux has a controller-independent interrupt architecture. 55 * Linux has a controller-independent interrupt architecture.
44 * Every controller has a 'controller-template', that is used 56 * Every controller has a 'controller-template', that is used
@@ -136,6 +148,8 @@ int __init early_irq_init(void)
136 int legacy_count; 148 int legacy_count;
137 int i; 149 int i;
138 150
151 init_irq_default_affinity();
152
139 desc = irq_desc_legacy; 153 desc = irq_desc_legacy;
140 legacy_count = ARRAY_SIZE(irq_desc_legacy); 154 legacy_count = ARRAY_SIZE(irq_desc_legacy);
141 155
@@ -222,6 +236,8 @@ int __init early_irq_init(void)
222 int count; 236 int count;
223 int i; 237 int i;
224 238
239 init_irq_default_affinity();
240
225 desc = irq_desc; 241 desc = irq_desc;
226 count = ARRAY_SIZE(irq_desc); 242 count = ARRAY_SIZE(irq_desc);
227 243
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index cd0cd8dcb345..cbc3828faf5f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -15,17 +15,9 @@
15 15
16#include "internals.h" 16#include "internals.h"
17 17
18#ifdef CONFIG_SMP 18#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
19cpumask_var_t irq_default_affinity; 19cpumask_var_t irq_default_affinity;
20 20
21static int init_irq_default_affinity(void)
22{
23 alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
24 cpumask_setall(irq_default_affinity);
25 return 0;
26}
27core_initcall(init_irq_default_affinity);
28
29/** 21/**
30 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 22 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
31 * @irq: interrupt number to wait for 23 * @irq: interrupt number to wait for
@@ -117,7 +109,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
117/* 109/*
118 * Generic version of the affinity autoselector. 110 * Generic version of the affinity autoselector.
119 */ 111 */
120int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) 112static int setup_affinity(unsigned int irq, struct irq_desc *desc)
121{ 113{
122 if (!irq_can_set_affinity(irq)) 114 if (!irq_can_set_affinity(irq))
123 return 0; 115 return 0;
@@ -141,7 +133,7 @@ set_affinity:
141 return 0; 133 return 0;
142} 134}
143#else 135#else
144static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) 136static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
145{ 137{
146 return irq_select_affinity(irq); 138 return irq_select_affinity(irq);
147} 139}
@@ -157,14 +149,14 @@ int irq_select_affinity_usr(unsigned int irq)
157 int ret; 149 int ret;
158 150
159 spin_lock_irqsave(&desc->lock, flags); 151 spin_lock_irqsave(&desc->lock, flags);
160 ret = do_irq_select_affinity(irq, desc); 152 ret = setup_affinity(irq, desc);
161 spin_unlock_irqrestore(&desc->lock, flags); 153 spin_unlock_irqrestore(&desc->lock, flags);
162 154
163 return ret; 155 return ret;
164} 156}
165 157
166#else 158#else
167static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) 159static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
168{ 160{
169 return 0; 161 return 0;
170} 162}
@@ -496,7 +488,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
496 desc->status |= IRQ_NO_BALANCING; 488 desc->status |= IRQ_NO_BALANCING;
497 489
498 /* Set default affinity mask once everything is setup */ 490 /* Set default affinity mask once everything is setup */
499 do_irq_select_affinity(irq, desc); 491 setup_affinity(irq, desc);
500 492
501 } else if ((new->flags & IRQF_TRIGGER_MASK) 493 } else if ((new->flags & IRQF_TRIGGER_MASK)
502 && (new->flags & IRQF_TRIGGER_MASK) 494 && (new->flags & IRQF_TRIGGER_MASK)
@@ -717,7 +709,7 @@ int request_irq(unsigned int irq, irq_handler_t handler,
717 if (!handler) 709 if (!handler)
718 return -EINVAL; 710 return -EINVAL;
719 711
720 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); 712 action = kmalloc(sizeof(struct irqaction), GFP_KERNEL);
721 if (!action) 713 if (!action)
722 return -ENOMEM; 714 return -ENOMEM;
723 715
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index c500cfe422b6..aef18ab6b75b 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -66,7 +66,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
66 desc = irq_desc_ptrs[irq]; 66 desc = irq_desc_ptrs[irq];
67 67
68 if (desc && old_desc != desc) 68 if (desc && old_desc != desc)
69 goto out_unlock; 69 goto out_unlock;
70 70
71 node = cpu_to_node(cpu); 71 node = cpu_to_node(cpu);
72 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 72 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
@@ -79,10 +79,15 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
79 init_copy_one_irq_desc(irq, old_desc, desc, cpu); 79 init_copy_one_irq_desc(irq, old_desc, desc, cpu);
80 80
81 irq_desc_ptrs[irq] = desc; 81 irq_desc_ptrs[irq] = desc;
82 spin_unlock_irqrestore(&sparse_irq_lock, flags);
82 83
83 /* free the old one */ 84 /* free the old one */
84 free_one_irq_desc(old_desc, desc); 85 free_one_irq_desc(old_desc, desc);
86 spin_unlock(&old_desc->lock);
85 kfree(old_desc); 87 kfree(old_desc);
88 spin_lock(&desc->lock);
89
90 return desc;
86 91
87out_unlock: 92out_unlock:
88 spin_unlock_irqrestore(&sparse_irq_lock, flags); 93 spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dd364c11e56e..4d568294de3e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq)
104 return ok; 104 return ok;
105} 105}
106 106
107static void poll_spurious_irqs(unsigned long dummy) 107static void poll_all_shared_irqs(void)
108{ 108{
109 struct irq_desc *desc; 109 struct irq_desc *desc;
110 int i; 110 int i;
@@ -123,11 +123,23 @@ static void poll_spurious_irqs(unsigned long dummy)
123 123
124 try_one_irq(i, desc); 124 try_one_irq(i, desc);
125 } 125 }
126}
127
128static void poll_spurious_irqs(unsigned long dummy)
129{
130 poll_all_shared_irqs();
126 131
127 mod_timer(&poll_spurious_irq_timer, 132 mod_timer(&poll_spurious_irq_timer,
128 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 133 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
129} 134}
130 135
136#ifdef CONFIG_DEBUG_SHIRQ
137void debug_poll_all_shared_irqs(void)
138{
139 poll_all_shared_irqs();
140}
141#endif
142
131/* 143/*
132 * If 99,900 of the previous 100,000 interrupts have not been handled 144 * If 99,900 of the previous 100,000 interrupts have not been handled
133 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 145 * then assume that the IRQ is stuck in some manner. Drop a diagnostic