aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/chip.c12
-rw-r--r--kernel/irq/handle.c93
-rw-r--r--kernel/irq/internals.h8
-rw-r--r--kernel/irq/manage.c204
-rw-r--r--kernel/irq/migration.c12
-rw-r--r--kernel/irq/numa_migrate.c30
-rw-r--r--kernel/irq/proc.c4
-rw-r--r--kernel/irq/spurious.c14
8 files changed, 232 insertions, 145 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 7de11bd64dfe..c687ba4363f2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
46 desc->irq_count = 0; 46 desc->irq_count = 0;
47 desc->irqs_unhandled = 0; 47 desc->irqs_unhandled = 0;
48#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
49 cpumask_setall(&desc->affinity); 49 cpumask_setall(desc->affinity);
50#ifdef CONFIG_GENERIC_PENDING_IRQ
51 cpumask_clear(desc->pending_mask);
52#endif
50#endif 53#endif
51 spin_unlock_irqrestore(&desc->lock, flags); 54 spin_unlock_irqrestore(&desc->lock, flags);
52} 55}
@@ -78,6 +81,7 @@ void dynamic_irq_cleanup(unsigned int irq)
78 desc->handle_irq = handle_bad_irq; 81 desc->handle_irq = handle_bad_irq;
79 desc->chip = &no_irq_chip; 82 desc->chip = &no_irq_chip;
80 desc->name = NULL; 83 desc->name = NULL;
84 clear_kstat_irqs(desc);
81 spin_unlock_irqrestore(&desc->lock, flags); 85 spin_unlock_irqrestore(&desc->lock, flags);
82} 86}
83 87
@@ -290,7 +294,8 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
290 desc->chip->mask_ack(irq); 294 desc->chip->mask_ack(irq);
291 else { 295 else {
292 desc->chip->mask(irq); 296 desc->chip->mask(irq);
293 desc->chip->ack(irq); 297 if (desc->chip->ack)
298 desc->chip->ack(irq);
294 } 299 }
295} 300}
296 301
@@ -476,7 +481,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
476 kstat_incr_irqs_this_cpu(irq, desc); 481 kstat_incr_irqs_this_cpu(irq, desc);
477 482
478 /* Start handling the irq */ 483 /* Start handling the irq */
479 desc->chip->ack(irq); 484 if (desc->chip->ack)
485 desc->chip->ack(irq);
480 desc = irq_remap_to_desc(irq, desc); 486 desc = irq_remap_to_desc(irq, desc);
481 487
482 /* Mark the IRQ currently in progress.*/ 488 /* Mark the IRQ currently in progress.*/
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3aba8d12f328..9ebf77968871 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -17,6 +17,7 @@
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/rculist.h> 18#include <linux/rculist.h>
19#include <linux/hash.h> 19#include <linux/hash.h>
20#include <linux/bootmem.h>
20 21
21#include "internals.h" 22#include "internals.h"
22 23
@@ -69,6 +70,7 @@ int nr_irqs = NR_IRQS;
69EXPORT_SYMBOL_GPL(nr_irqs); 70EXPORT_SYMBOL_GPL(nr_irqs);
70 71
71#ifdef CONFIG_SPARSE_IRQ 72#ifdef CONFIG_SPARSE_IRQ
73
72static struct irq_desc irq_desc_init = { 74static struct irq_desc irq_desc_init = {
73 .irq = -1, 75 .irq = -1,
74 .status = IRQ_DISABLED, 76 .status = IRQ_DISABLED,
@@ -76,26 +78,25 @@ static struct irq_desc irq_desc_init = {
76 .handle_irq = handle_bad_irq, 78 .handle_irq = handle_bad_irq,
77 .depth = 1, 79 .depth = 1,
78 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 80 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
79#ifdef CONFIG_SMP
80 .affinity = CPU_MASK_ALL
81#endif
82}; 81};
83 82
84void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 83void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
85{ 84{
86 unsigned long bytes;
87 char *ptr;
88 int node; 85 int node;
89 86 void *ptr;
90 /* Compute how many bytes we need per irq and allocate them */
91 bytes = nr * sizeof(unsigned int);
92 87
93 node = cpu_to_node(cpu); 88 node = cpu_to_node(cpu);
94 ptr = kzalloc_node(bytes, GFP_ATOMIC, node); 89 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
95 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
96 90
97 if (ptr) 91 /*
98 desc->kstat_irqs = (unsigned int *)ptr; 92 * don't overwite if can not get new one
93 * init_copy_kstat_irqs() could still use old one
94 */
95 if (ptr) {
96 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
97 cpu, node);
98 desc->kstat_irqs = ptr;
99 }
99} 100}
100 101
101static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 102static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
@@ -113,6 +114,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
113 printk(KERN_ERR "can not alloc kstat_irqs\n"); 114 printk(KERN_ERR "can not alloc kstat_irqs\n");
114 BUG_ON(1); 115 BUG_ON(1);
115 } 116 }
117 if (!init_alloc_desc_masks(desc, cpu, false)) {
118 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
119 BUG_ON(1);
120 }
116 arch_init_chip_data(desc, cpu); 121 arch_init_chip_data(desc, cpu);
117} 122}
118 123
@@ -121,7 +126,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
121 */ 126 */
122DEFINE_SPINLOCK(sparse_irq_lock); 127DEFINE_SPINLOCK(sparse_irq_lock);
123 128
124struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; 129struct irq_desc **irq_desc_ptrs __read_mostly;
125 130
126static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 131static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
127 [0 ... NR_IRQS_LEGACY-1] = { 132 [0 ... NR_IRQS_LEGACY-1] = {
@@ -131,14 +136,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
131 .handle_irq = handle_bad_irq, 136 .handle_irq = handle_bad_irq,
132 .depth = 1, 137 .depth = 1,
133 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 138 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
134#ifdef CONFIG_SMP
135 .affinity = CPU_MASK_ALL
136#endif
137 } 139 }
138}; 140};
139 141
140/* FIXME: use bootmem alloc ...*/ 142static unsigned int *kstat_irqs_legacy;
141static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
142 143
143int __init early_irq_init(void) 144int __init early_irq_init(void)
144{ 145{
@@ -148,18 +149,30 @@ int __init early_irq_init(void)
148 149
149 init_irq_default_affinity(); 150 init_irq_default_affinity();
150 151
152 /* initialize nr_irqs based on nr_cpu_ids */
153 arch_probe_nr_irqs();
154 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
155
151 desc = irq_desc_legacy; 156 desc = irq_desc_legacy;
152 legacy_count = ARRAY_SIZE(irq_desc_legacy); 157 legacy_count = ARRAY_SIZE(irq_desc_legacy);
153 158
159 /* allocate irq_desc_ptrs array based on nr_irqs */
160 irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
161
162 /* allocate based on nr_cpu_ids */
163 /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
164 kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
165 sizeof(int));
166
154 for (i = 0; i < legacy_count; i++) { 167 for (i = 0; i < legacy_count; i++) {
155 desc[i].irq = i; 168 desc[i].irq = i;
156 desc[i].kstat_irqs = kstat_irqs_legacy[i]; 169 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
157 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 170 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
158 171 init_alloc_desc_masks(&desc[i], 0, true);
159 irq_desc_ptrs[i] = desc + i; 172 irq_desc_ptrs[i] = desc + i;
160 } 173 }
161 174
162 for (i = legacy_count; i < NR_IRQS; i++) 175 for (i = legacy_count; i < nr_irqs; i++)
163 irq_desc_ptrs[i] = NULL; 176 irq_desc_ptrs[i] = NULL;
164 177
165 return arch_early_irq_init(); 178 return arch_early_irq_init();
@@ -167,7 +180,10 @@ int __init early_irq_init(void)
167 180
168struct irq_desc *irq_to_desc(unsigned int irq) 181struct irq_desc *irq_to_desc(unsigned int irq)
169{ 182{
170 return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; 183 if (irq_desc_ptrs && irq < nr_irqs)
184 return irq_desc_ptrs[irq];
185
186 return NULL;
171} 187}
172 188
173struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 189struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
@@ -176,10 +192,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
176 unsigned long flags; 192 unsigned long flags;
177 int node; 193 int node;
178 194
179 if (irq >= NR_IRQS) { 195 if (irq >= nr_irqs) {
180 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", 196 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
181 irq, NR_IRQS); 197 irq, nr_irqs);
182 WARN_ON(1);
183 return NULL; 198 return NULL;
184 } 199 }
185 200
@@ -221,12 +236,10 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
221 .handle_irq = handle_bad_irq, 236 .handle_irq = handle_bad_irq,
222 .depth = 1, 237 .depth = 1,
223 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 238 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
224#ifdef CONFIG_SMP
225 .affinity = CPU_MASK_ALL
226#endif
227 } 239 }
228}; 240};
229 241
242static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
230int __init early_irq_init(void) 243int __init early_irq_init(void)
231{ 244{
232 struct irq_desc *desc; 245 struct irq_desc *desc;
@@ -235,12 +248,16 @@ int __init early_irq_init(void)
235 248
236 init_irq_default_affinity(); 249 init_irq_default_affinity();
237 250
251 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
252
238 desc = irq_desc; 253 desc = irq_desc;
239 count = ARRAY_SIZE(irq_desc); 254 count = ARRAY_SIZE(irq_desc);
240 255
241 for (i = 0; i < count; i++) 256 for (i = 0; i < count; i++) {
242 desc[i].irq = i; 257 desc[i].irq = i;
243 258 init_alloc_desc_masks(&desc[i], 0, true);
259 desc[i].kstat_irqs = kstat_irqs_all[i];
260 }
244 return arch_early_irq_init(); 261 return arch_early_irq_init();
245} 262}
246 263
@@ -255,6 +272,11 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
255} 272}
256#endif /* !CONFIG_SPARSE_IRQ */ 273#endif /* !CONFIG_SPARSE_IRQ */
257 274
275void clear_kstat_irqs(struct irq_desc *desc)
276{
277 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
278}
279
258/* 280/*
259 * What should we do if we get a hw irq event on an illegal vector? 281 * What should we do if we get a hw irq event on an illegal vector?
260 * Each architecture has to answer this themself. 282 * Each architecture has to answer this themself.
@@ -328,6 +350,8 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
328 irqreturn_t ret, retval = IRQ_NONE; 350 irqreturn_t ret, retval = IRQ_NONE;
329 unsigned int status = 0; 351 unsigned int status = 0;
330 352
353 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
354
331 if (!(action->flags & IRQF_DISABLED)) 355 if (!(action->flags & IRQF_DISABLED))
332 local_irq_enable_in_hardirq(); 356 local_irq_enable_in_hardirq();
333 357
@@ -347,6 +371,11 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
347} 371}
348 372
349#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 373#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
374
375#ifdef CONFIG_ENABLE_WARN_DEPRECATED
376# warning __do_IRQ is deprecated. Please convert to proper flow handlers
377#endif
378
350/** 379/**
351 * __do_IRQ - original all in one highlevel IRQ handler 380 * __do_IRQ - original all in one highlevel IRQ handler
352 * @irq: the interrupt number 381 * @irq: the interrupt number
@@ -467,12 +496,10 @@ void early_init_irq_lock_class(void)
467 } 496 }
468} 497}
469 498
470#ifdef CONFIG_SPARSE_IRQ
471unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 499unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
472{ 500{
473 struct irq_desc *desc = irq_to_desc(irq); 501 struct irq_desc *desc = irq_to_desc(irq);
474 return desc ? desc->kstat_irqs[cpu] : 0; 502 return desc ? desc->kstat_irqs[cpu] : 0;
475} 503}
476#endif
477EXPORT_SYMBOL(kstat_irqs_cpu); 504EXPORT_SYMBOL(kstat_irqs_cpu);
478 505
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index e6d0a43cc125..ee1aa9f8e8b9 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -15,8 +15,16 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
15 15
16extern struct lock_class_key irq_desc_lock_class; 16extern struct lock_class_key irq_desc_lock_class;
17extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); 17extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
18extern void clear_kstat_irqs(struct irq_desc *desc);
18extern spinlock_t sparse_irq_lock; 19extern spinlock_t sparse_irq_lock;
20
21#ifdef CONFIG_SPARSE_IRQ
22/* irq_desc_ptrs allocated at boot time */
23extern struct irq_desc **irq_desc_ptrs;
24#else
25/* irq_desc_ptrs is a fixed size array */
19extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; 26extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
27#endif
20 28
21#ifdef CONFIG_PROC_FS 29#ifdef CONFIG_PROC_FS
22extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); 30extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 291f03664552..6458e99984c0 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -90,14 +90,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
90 90
91#ifdef CONFIG_GENERIC_PENDING_IRQ 91#ifdef CONFIG_GENERIC_PENDING_IRQ
92 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 92 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
93 cpumask_copy(&desc->affinity, cpumask); 93 cpumask_copy(desc->affinity, cpumask);
94 desc->chip->set_affinity(irq, cpumask); 94 desc->chip->set_affinity(irq, cpumask);
95 } else { 95 } else {
96 desc->status |= IRQ_MOVE_PENDING; 96 desc->status |= IRQ_MOVE_PENDING;
97 cpumask_copy(&desc->pending_mask, cpumask); 97 cpumask_copy(desc->pending_mask, cpumask);
98 } 98 }
99#else 99#else
100 cpumask_copy(&desc->affinity, cpumask); 100 cpumask_copy(desc->affinity, cpumask);
101 desc->chip->set_affinity(irq, cpumask); 101 desc->chip->set_affinity(irq, cpumask);
102#endif 102#endif
103 desc->status |= IRQ_AFFINITY_SET; 103 desc->status |= IRQ_AFFINITY_SET;
@@ -109,7 +109,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
109/* 109/*
110 * Generic version of the affinity autoselector. 110 * Generic version of the affinity autoselector.
111 */ 111 */
112int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) 112static int setup_affinity(unsigned int irq, struct irq_desc *desc)
113{ 113{
114 if (!irq_can_set_affinity(irq)) 114 if (!irq_can_set_affinity(irq))
115 return 0; 115 return 0;
@@ -119,21 +119,21 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
119 * one of the targets is online. 119 * one of the targets is online.
120 */ 120 */
121 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 121 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
122 if (cpumask_any_and(&desc->affinity, cpu_online_mask) 122 if (cpumask_any_and(desc->affinity, cpu_online_mask)
123 < nr_cpu_ids) 123 < nr_cpu_ids)
124 goto set_affinity; 124 goto set_affinity;
125 else 125 else
126 desc->status &= ~IRQ_AFFINITY_SET; 126 desc->status &= ~IRQ_AFFINITY_SET;
127 } 127 }
128 128
129 cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); 129 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
130set_affinity: 130set_affinity:
131 desc->chip->set_affinity(irq, &desc->affinity); 131 desc->chip->set_affinity(irq, desc->affinity);
132 132
133 return 0; 133 return 0;
134} 134}
135#else 135#else
136static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) 136static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
137{ 137{
138 return irq_select_affinity(irq); 138 return irq_select_affinity(irq);
139} 139}
@@ -149,14 +149,14 @@ int irq_select_affinity_usr(unsigned int irq)
149 int ret; 149 int ret;
150 150
151 spin_lock_irqsave(&desc->lock, flags); 151 spin_lock_irqsave(&desc->lock, flags);
152 ret = do_irq_select_affinity(irq, desc); 152 ret = setup_affinity(irq, desc);
153 spin_unlock_irqrestore(&desc->lock, flags); 153 spin_unlock_irqrestore(&desc->lock, flags);
154 154
155 return ret; 155 return ret;
156} 156}
157 157
158#else 158#else
159static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) 159static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
160{ 160{
161 return 0; 161 return 0;
162} 162}
@@ -389,9 +389,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
389 * allocate special interrupts that are part of the architecture. 389 * allocate special interrupts that are part of the architecture.
390 */ 390 */
391static int 391static int
392__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) 392__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
393{ 393{
394 struct irqaction *old, **p; 394 struct irqaction *old, **old_ptr;
395 const char *old_name = NULL; 395 const char *old_name = NULL;
396 unsigned long flags; 396 unsigned long flags;
397 int shared = 0; 397 int shared = 0;
@@ -423,8 +423,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
423 * The following block of code has to be executed atomically 423 * The following block of code has to be executed atomically
424 */ 424 */
425 spin_lock_irqsave(&desc->lock, flags); 425 spin_lock_irqsave(&desc->lock, flags);
426 p = &desc->action; 426 old_ptr = &desc->action;
427 old = *p; 427 old = *old_ptr;
428 if (old) { 428 if (old) {
429 /* 429 /*
430 * Can't share interrupts unless both agree to and are 430 * Can't share interrupts unless both agree to and are
@@ -447,8 +447,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
447 447
448 /* add new interrupt at end of irq queue */ 448 /* add new interrupt at end of irq queue */
449 do { 449 do {
450 p = &old->next; 450 old_ptr = &old->next;
451 old = *p; 451 old = *old_ptr;
452 } while (old); 452 } while (old);
453 shared = 1; 453 shared = 1;
454 } 454 }
@@ -488,7 +488,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
488 desc->status |= IRQ_NO_BALANCING; 488 desc->status |= IRQ_NO_BALANCING;
489 489
490 /* Set default affinity mask once everything is setup */ 490 /* Set default affinity mask once everything is setup */
491 do_irq_select_affinity(irq, desc); 491 setup_affinity(irq, desc);
492 492
493 } else if ((new->flags & IRQF_TRIGGER_MASK) 493 } else if ((new->flags & IRQF_TRIGGER_MASK)
494 && (new->flags & IRQF_TRIGGER_MASK) 494 && (new->flags & IRQF_TRIGGER_MASK)
@@ -499,7 +499,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
499 (int)(new->flags & IRQF_TRIGGER_MASK)); 499 (int)(new->flags & IRQF_TRIGGER_MASK));
500 } 500 }
501 501
502 *p = new; 502 *old_ptr = new;
503 503
504 /* Reset broken irq detection when installing new handler */ 504 /* Reset broken irq detection when installing new handler */
505 desc->irq_count = 0; 505 desc->irq_count = 0;
@@ -549,90 +549,117 @@ int setup_irq(unsigned int irq, struct irqaction *act)
549 549
550 return __setup_irq(irq, desc, act); 550 return __setup_irq(irq, desc, act);
551} 551}
552EXPORT_SYMBOL_GPL(setup_irq);
552 553
553/** 554 /*
554 * free_irq - free an interrupt 555 * Internal function to unregister an irqaction - used to free
555 * @irq: Interrupt line to free 556 * regular and special interrupts that are part of the architecture.
556 * @dev_id: Device identity to free
557 *
558 * Remove an interrupt handler. The handler is removed and if the
559 * interrupt line is no longer in use by any driver it is disabled.
560 * On a shared IRQ the caller must ensure the interrupt is disabled
561 * on the card it drives before calling this function. The function
562 * does not return until any executing interrupts for this IRQ
563 * have completed.
564 *
565 * This function must not be called from interrupt context.
566 */ 557 */
567void free_irq(unsigned int irq, void *dev_id) 558static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
568{ 559{
569 struct irq_desc *desc = irq_to_desc(irq); 560 struct irq_desc *desc = irq_to_desc(irq);
570 struct irqaction **p; 561 struct irqaction *action, **action_ptr;
571 unsigned long flags; 562 unsigned long flags;
572 563
573 WARN_ON(in_interrupt()); 564 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
574 565
575 if (!desc) 566 if (!desc)
576 return; 567 return NULL;
577 568
578 spin_lock_irqsave(&desc->lock, flags); 569 spin_lock_irqsave(&desc->lock, flags);
579 p = &desc->action; 570
571 /*
572 * There can be multiple actions per IRQ descriptor, find the right
573 * one based on the dev_id:
574 */
575 action_ptr = &desc->action;
580 for (;;) { 576 for (;;) {
581 struct irqaction *action = *p; 577 action = *action_ptr;
582 578
583 if (action) { 579 if (!action) {
584 struct irqaction **pp = p; 580 WARN(1, "Trying to free already-free IRQ %d\n", irq);
581 spin_unlock_irqrestore(&desc->lock, flags);
585 582
586 p = &action->next; 583 return NULL;
587 if (action->dev_id != dev_id) 584 }
588 continue;
589 585
590 /* Found it - now remove it from the list of entries */ 586 if (action->dev_id == dev_id)
591 *pp = action->next; 587 break;
588 action_ptr = &action->next;
589 }
592 590
593 /* Currently used only by UML, might disappear one day.*/ 591 /* Found it - now remove it from the list of entries: */
592 *action_ptr = action->next;
593
594 /* Currently used only by UML, might disappear one day: */
594#ifdef CONFIG_IRQ_RELEASE_METHOD 595#ifdef CONFIG_IRQ_RELEASE_METHOD
595 if (desc->chip->release) 596 if (desc->chip->release)
596 desc->chip->release(irq, dev_id); 597 desc->chip->release(irq, dev_id);
597#endif 598#endif
598 599
599 if (!desc->action) { 600 /* If this was the last handler, shut down the IRQ line: */
600 desc->status |= IRQ_DISABLED; 601 if (!desc->action) {
601 if (desc->chip->shutdown) 602 desc->status |= IRQ_DISABLED;
602 desc->chip->shutdown(irq); 603 if (desc->chip->shutdown)
603 else 604 desc->chip->shutdown(irq);
604 desc->chip->disable(irq); 605 else
605 } 606 desc->chip->disable(irq);
606 spin_unlock_irqrestore(&desc->lock, flags); 607 }
607 unregister_handler_proc(irq, action); 608 spin_unlock_irqrestore(&desc->lock, flags);
609
610 unregister_handler_proc(irq, action);
611
612 /* Make sure it's not being used on another CPU: */
613 synchronize_irq(irq);
608 614
609 /* Make sure it's not being used on another CPU */
610 synchronize_irq(irq);
611#ifdef CONFIG_DEBUG_SHIRQ
612 /*
613 * It's a shared IRQ -- the driver ought to be
614 * prepared for it to happen even now it's
615 * being freed, so let's make sure.... We do
616 * this after actually deregistering it, to
617 * make sure that a 'real' IRQ doesn't run in
618 * parallel with our fake
619 */
620 if (action->flags & IRQF_SHARED) {
621 local_irq_save(flags);
622 action->handler(irq, dev_id);
623 local_irq_restore(flags);
624 }
625#endif
626 kfree(action);
627 return;
628 }
629 printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
630#ifdef CONFIG_DEBUG_SHIRQ 615#ifdef CONFIG_DEBUG_SHIRQ
631 dump_stack(); 616 /*
632#endif 617 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
633 spin_unlock_irqrestore(&desc->lock, flags); 618 * event to happen even now it's being freed, so let's make sure that
634 return; 619 * is so by doing an extra call to the handler ....
620 *
621 * ( We do this after actually deregistering it, to make sure that a
622 * 'real' IRQ doesn't run in * parallel with our fake. )
623 */
624 if (action->flags & IRQF_SHARED) {
625 local_irq_save(flags);
626 action->handler(irq, dev_id);
627 local_irq_restore(flags);
635 } 628 }
629#endif
630 return action;
631}
632
633/**
634 * remove_irq - free an interrupt
635 * @irq: Interrupt line to free
636 * @act: irqaction for the interrupt
637 *
638 * Used to remove interrupts statically setup by the early boot process.
639 */
640void remove_irq(unsigned int irq, struct irqaction *act)
641{
642 __free_irq(irq, act->dev_id);
643}
644EXPORT_SYMBOL_GPL(remove_irq);
645
646/**
647 * free_irq - free an interrupt allocated with request_irq
648 * @irq: Interrupt line to free
649 * @dev_id: Device identity to free
650 *
651 * Remove an interrupt handler. The handler is removed and if the
652 * interrupt line is no longer in use by any driver it is disabled.
653 * On a shared IRQ the caller must ensure the interrupt is disabled
654 * on the card it drives before calling this function. The function
655 * does not return until any executing interrupts for this IRQ
656 * have completed.
657 *
658 * This function must not be called from interrupt context.
659 */
660void free_irq(unsigned int irq, void *dev_id)
661{
662 kfree(__free_irq(irq, dev_id));
636} 663}
637EXPORT_SYMBOL(free_irq); 664EXPORT_SYMBOL(free_irq);
638 665
@@ -679,11 +706,12 @@ int request_irq(unsigned int irq, irq_handler_t handler,
679 * the behavior is classified as "will not fix" so we need to 706 * the behavior is classified as "will not fix" so we need to
680 * start nudging drivers away from using that idiom. 707 * start nudging drivers away from using that idiom.
681 */ 708 */
682 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) 709 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
683 == (IRQF_SHARED|IRQF_DISABLED)) 710 (IRQF_SHARED|IRQF_DISABLED)) {
684 pr_warning("IRQ %d/%s: IRQF_DISABLED is not " 711 pr_warning(
685 "guaranteed on shared IRQs\n", 712 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
686 irq, devname); 713 irq, devname);
714 }
687 715
688#ifdef CONFIG_LOCKDEP 716#ifdef CONFIG_LOCKDEP
689 /* 717 /*
@@ -709,15 +737,13 @@ int request_irq(unsigned int irq, irq_handler_t handler,
709 if (!handler) 737 if (!handler)
710 return -EINVAL; 738 return -EINVAL;
711 739
712 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); 740 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
713 if (!action) 741 if (!action)
714 return -ENOMEM; 742 return -ENOMEM;
715 743
716 action->handler = handler; 744 action->handler = handler;
717 action->flags = irqflags; 745 action->flags = irqflags;
718 cpus_clear(action->mask);
719 action->name = devname; 746 action->name = devname;
720 action->next = NULL;
721 action->dev_id = dev_id; 747 action->dev_id = dev_id;
722 748
723 retval = __setup_irq(irq, desc, action); 749 retval = __setup_irq(irq, desc, action);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index bd72329e630c..e05ad9be43b7 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -18,7 +18,7 @@ void move_masked_irq(int irq)
18 18
19 desc->status &= ~IRQ_MOVE_PENDING; 19 desc->status &= ~IRQ_MOVE_PENDING;
20 20
21 if (unlikely(cpumask_empty(&desc->pending_mask))) 21 if (unlikely(cpumask_empty(desc->pending_mask)))
22 return; 22 return;
23 23
24 if (!desc->chip->set_affinity) 24 if (!desc->chip->set_affinity)
@@ -38,13 +38,13 @@ void move_masked_irq(int irq)
38 * For correct operation this depends on the caller 38 * For correct operation this depends on the caller
39 * masking the irqs. 39 * masking the irqs.
40 */ 40 */
41 if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) 41 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
42 < nr_cpu_ids)) { 42 < nr_cpu_ids)) {
43 cpumask_and(&desc->affinity, 43 cpumask_and(desc->affinity,
44 &desc->pending_mask, cpu_online_mask); 44 desc->pending_mask, cpu_online_mask);
45 desc->chip->set_affinity(irq, &desc->affinity); 45 desc->chip->set_affinity(irq, desc->affinity);
46 } 46 }
47 cpumask_clear(&desc->pending_mask); 47 cpumask_clear(desc->pending_mask);
48} 48}
49 49
50void move_native_irq(int irq) 50void move_native_irq(int irq)
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index acd88356ac76..243d6121e50e 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -17,16 +17,11 @@ static void init_copy_kstat_irqs(struct irq_desc *old_desc,
17 struct irq_desc *desc, 17 struct irq_desc *desc,
18 int cpu, int nr) 18 int cpu, int nr)
19{ 19{
20 unsigned long bytes;
21
22 init_kstat_irqs(desc, cpu, nr); 20 init_kstat_irqs(desc, cpu, nr);
23 21
24 if (desc->kstat_irqs != old_desc->kstat_irqs) { 22 if (desc->kstat_irqs != old_desc->kstat_irqs)
25 /* Compute how many bytes we need per irq and allocate them */ 23 memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
26 bytes = nr * sizeof(unsigned int); 24 nr * sizeof(*desc->kstat_irqs));
27
28 memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
29 }
30} 25}
31 26
32static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) 27static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -38,15 +33,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
38 old_desc->kstat_irqs = NULL; 33 old_desc->kstat_irqs = NULL;
39} 34}
40 35
41static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, 36static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
42 struct irq_desc *desc, int cpu) 37 struct irq_desc *desc, int cpu)
43{ 38{
44 memcpy(desc, old_desc, sizeof(struct irq_desc)); 39 memcpy(desc, old_desc, sizeof(struct irq_desc));
40 if (!init_alloc_desc_masks(desc, cpu, false)) {
41 printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
42 "for migration.\n", irq);
43 return false;
44 }
45 spin_lock_init(&desc->lock); 45 spin_lock_init(&desc->lock);
46 desc->cpu = cpu; 46 desc->cpu = cpu;
47 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 47 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
48 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); 48 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
49 init_copy_desc_masks(old_desc, desc);
49 arch_init_copy_chip_data(old_desc, desc, cpu); 50 arch_init_copy_chip_data(old_desc, desc, cpu);
51 return true;
50} 52}
51 53
52static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) 54static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -76,12 +78,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
76 node = cpu_to_node(cpu); 78 node = cpu_to_node(cpu);
77 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 79 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
78 if (!desc) { 80 if (!desc) {
79 printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); 81 printk(KERN_ERR "irq %d: can not get new irq_desc "
82 "for migration.\n", irq);
83 /* still use old one */
84 desc = old_desc;
85 goto out_unlock;
86 }
87 if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
80 /* still use old one */ 88 /* still use old one */
89 kfree(desc);
81 desc = old_desc; 90 desc = old_desc;
82 goto out_unlock; 91 goto out_unlock;
83 } 92 }
84 init_copy_one_irq_desc(irq, old_desc, desc, cpu);
85 93
86 irq_desc_ptrs[irq] = desc; 94 irq_desc_ptrs[irq] = desc;
87 spin_unlock_irqrestore(&sparse_irq_lock, flags); 95 spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index aae3f742bcec..692363dd591f 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
20static int irq_affinity_proc_show(struct seq_file *m, void *v) 20static int irq_affinity_proc_show(struct seq_file *m, void *v)
21{ 21{
22 struct irq_desc *desc = irq_to_desc((long)m->private); 22 struct irq_desc *desc = irq_to_desc((long)m->private);
23 const struct cpumask *mask = &desc->affinity; 23 const struct cpumask *mask = desc->affinity;
24 24
25#ifdef CONFIG_GENERIC_PENDING_IRQ 25#ifdef CONFIG_GENERIC_PENDING_IRQ
26 if (desc->status & IRQ_MOVE_PENDING) 26 if (desc->status & IRQ_MOVE_PENDING)
27 mask = &desc->pending_mask; 27 mask = desc->pending_mask;
28#endif 28#endif
29 seq_cpumask(m, mask); 29 seq_cpumask(m, mask);
30 seq_putc(m, '\n'); 30 seq_putc(m, '\n');
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dd364c11e56e..4d568294de3e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq)
104 return ok; 104 return ok;
105} 105}
106 106
107static void poll_spurious_irqs(unsigned long dummy) 107static void poll_all_shared_irqs(void)
108{ 108{
109 struct irq_desc *desc; 109 struct irq_desc *desc;
110 int i; 110 int i;
@@ -123,11 +123,23 @@ static void poll_spurious_irqs(unsigned long dummy)
123 123
124 try_one_irq(i, desc); 124 try_one_irq(i, desc);
125 } 125 }
126}
127
128static void poll_spurious_irqs(unsigned long dummy)
129{
130 poll_all_shared_irqs();
126 131
127 mod_timer(&poll_spurious_irq_timer, 132 mod_timer(&poll_spurious_irq_timer,
128 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 133 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
129} 134}
130 135
136#ifdef CONFIG_DEBUG_SHIRQ
137void debug_poll_all_shared_irqs(void)
138{
139 poll_all_shared_irqs();
140}
141#endif
142
131/* 143/*
132 * If 99,900 of the previous 100,000 interrupts have not been handled 144 * If 99,900 of the previous 100,000 interrupts have not been handled
133 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 145 * then assume that the IRQ is stuck in some manner. Drop a diagnostic