aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/handle.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/handle.c')
-rw-r--r--kernel/irq/handle.c149
1 files changed, 115 insertions, 34 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3aba8d12f328..d82142be8dd2 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -17,6 +17,8 @@
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/rculist.h> 18#include <linux/rculist.h>
19#include <linux/hash.h> 19#include <linux/hash.h>
20#include <trace/irq.h>
21#include <linux/bootmem.h>
20 22
21#include "internals.h" 23#include "internals.h"
22 24
@@ -69,6 +71,7 @@ int nr_irqs = NR_IRQS;
69EXPORT_SYMBOL_GPL(nr_irqs); 71EXPORT_SYMBOL_GPL(nr_irqs);
70 72
71#ifdef CONFIG_SPARSE_IRQ 73#ifdef CONFIG_SPARSE_IRQ
74
72static struct irq_desc irq_desc_init = { 75static struct irq_desc irq_desc_init = {
73 .irq = -1, 76 .irq = -1,
74 .status = IRQ_DISABLED, 77 .status = IRQ_DISABLED,
@@ -76,26 +79,25 @@ static struct irq_desc irq_desc_init = {
76 .handle_irq = handle_bad_irq, 79 .handle_irq = handle_bad_irq,
77 .depth = 1, 80 .depth = 1,
78 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 81 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
79#ifdef CONFIG_SMP
80 .affinity = CPU_MASK_ALL
81#endif
82}; 82};
83 83
84void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 84void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
85{ 85{
86 unsigned long bytes;
87 char *ptr;
88 int node; 86 int node;
89 87 void *ptr;
90 /* Compute how many bytes we need per irq and allocate them */
91 bytes = nr * sizeof(unsigned int);
92 88
93 node = cpu_to_node(cpu); 89 node = cpu_to_node(cpu);
94 ptr = kzalloc_node(bytes, GFP_ATOMIC, node); 90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
95 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
96 91
97 if (ptr) 92 /*
98 desc->kstat_irqs = (unsigned int *)ptr; 93 * don't overwite if can not get new one
94 * init_copy_kstat_irqs() could still use old one
95 */
96 if (ptr) {
97 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
98 cpu, node);
99 desc->kstat_irqs = ptr;
100 }
99} 101}
100 102
101static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 103static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
@@ -113,6 +115,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
113 printk(KERN_ERR "can not alloc kstat_irqs\n"); 115 printk(KERN_ERR "can not alloc kstat_irqs\n");
114 BUG_ON(1); 116 BUG_ON(1);
115 } 117 }
118 if (!init_alloc_desc_masks(desc, cpu, false)) {
119 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
120 BUG_ON(1);
121 }
116 arch_init_chip_data(desc, cpu); 122 arch_init_chip_data(desc, cpu);
117} 123}
118 124
@@ -121,7 +127,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
121 */ 127 */
122DEFINE_SPINLOCK(sparse_irq_lock); 128DEFINE_SPINLOCK(sparse_irq_lock);
123 129
124struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; 130struct irq_desc **irq_desc_ptrs __read_mostly;
125 131
126static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 132static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
127 [0 ... NR_IRQS_LEGACY-1] = { 133 [0 ... NR_IRQS_LEGACY-1] = {
@@ -131,14 +137,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
131 .handle_irq = handle_bad_irq, 137 .handle_irq = handle_bad_irq,
132 .depth = 1, 138 .depth = 1,
133 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 139 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
134#ifdef CONFIG_SMP
135 .affinity = CPU_MASK_ALL
136#endif
137 } 140 }
138}; 141};
139 142
140/* FIXME: use bootmem alloc ...*/ 143static unsigned int *kstat_irqs_legacy;
141static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
142 144
143int __init early_irq_init(void) 145int __init early_irq_init(void)
144{ 146{
@@ -148,18 +150,30 @@ int __init early_irq_init(void)
148 150
149 init_irq_default_affinity(); 151 init_irq_default_affinity();
150 152
153 /* initialize nr_irqs based on nr_cpu_ids */
154 arch_probe_nr_irqs();
155 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
156
151 desc = irq_desc_legacy; 157 desc = irq_desc_legacy;
152 legacy_count = ARRAY_SIZE(irq_desc_legacy); 158 legacy_count = ARRAY_SIZE(irq_desc_legacy);
153 159
160 /* allocate irq_desc_ptrs array based on nr_irqs */
161 irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
162
163 /* allocate based on nr_cpu_ids */
164 /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
165 kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
166 sizeof(int));
167
154 for (i = 0; i < legacy_count; i++) { 168 for (i = 0; i < legacy_count; i++) {
155 desc[i].irq = i; 169 desc[i].irq = i;
156 desc[i].kstat_irqs = kstat_irqs_legacy[i]; 170 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
157 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 171 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
158 172 init_alloc_desc_masks(&desc[i], 0, true);
159 irq_desc_ptrs[i] = desc + i; 173 irq_desc_ptrs[i] = desc + i;
160 } 174 }
161 175
162 for (i = legacy_count; i < NR_IRQS; i++) 176 for (i = legacy_count; i < nr_irqs; i++)
163 irq_desc_ptrs[i] = NULL; 177 irq_desc_ptrs[i] = NULL;
164 178
165 return arch_early_irq_init(); 179 return arch_early_irq_init();
@@ -167,7 +181,10 @@ int __init early_irq_init(void)
167 181
168struct irq_desc *irq_to_desc(unsigned int irq) 182struct irq_desc *irq_to_desc(unsigned int irq)
169{ 183{
170 return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; 184 if (irq_desc_ptrs && irq < nr_irqs)
185 return irq_desc_ptrs[irq];
186
187 return NULL;
171} 188}
172 189
173struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 190struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
@@ -176,10 +193,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
176 unsigned long flags; 193 unsigned long flags;
177 int node; 194 int node;
178 195
179 if (irq >= NR_IRQS) { 196 if (irq >= nr_irqs) {
180 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", 197 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
181 irq, NR_IRQS); 198 irq, nr_irqs);
182 WARN_ON(1);
183 return NULL; 199 return NULL;
184 } 200 }
185 201
@@ -221,12 +237,10 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
221 .handle_irq = handle_bad_irq, 237 .handle_irq = handle_bad_irq,
222 .depth = 1, 238 .depth = 1,
223 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 239 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
224#ifdef CONFIG_SMP
225 .affinity = CPU_MASK_ALL
226#endif
227 } 240 }
228}; 241};
229 242
243static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
230int __init early_irq_init(void) 244int __init early_irq_init(void)
231{ 245{
232 struct irq_desc *desc; 246 struct irq_desc *desc;
@@ -235,12 +249,16 @@ int __init early_irq_init(void)
235 249
236 init_irq_default_affinity(); 250 init_irq_default_affinity();
237 251
252 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
253
238 desc = irq_desc; 254 desc = irq_desc;
239 count = ARRAY_SIZE(irq_desc); 255 count = ARRAY_SIZE(irq_desc);
240 256
241 for (i = 0; i < count; i++) 257 for (i = 0; i < count; i++) {
242 desc[i].irq = i; 258 desc[i].irq = i;
243 259 init_alloc_desc_masks(&desc[i], 0, true);
260 desc[i].kstat_irqs = kstat_irqs_all[i];
261 }
244 return arch_early_irq_init(); 262 return arch_early_irq_init();
245} 263}
246 264
@@ -255,6 +273,11 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
255} 273}
256#endif /* !CONFIG_SPARSE_IRQ */ 274#endif /* !CONFIG_SPARSE_IRQ */
257 275
276void clear_kstat_irqs(struct irq_desc *desc)
277{
278 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
279}
280
258/* 281/*
259 * What should we do if we get a hw irq event on an illegal vector? 282 * What should we do if we get a hw irq event on an illegal vector?
260 * Each architecture has to answer this themself. 283 * Each architecture has to answer this themself.
@@ -316,6 +339,18 @@ irqreturn_t no_action(int cpl, void *dev_id)
316 return IRQ_NONE; 339 return IRQ_NONE;
317} 340}
318 341
342static void warn_no_thread(unsigned int irq, struct irqaction *action)
343{
344 if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
345 return;
346
347 printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
348 "but no thread function available.", irq, action->name);
349}
350
351DEFINE_TRACE(irq_handler_entry);
352DEFINE_TRACE(irq_handler_exit);
353
319/** 354/**
320 * handle_IRQ_event - irq action chain handler 355 * handle_IRQ_event - irq action chain handler
321 * @irq: the interrupt number 356 * @irq: the interrupt number
@@ -328,13 +363,56 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
328 irqreturn_t ret, retval = IRQ_NONE; 363 irqreturn_t ret, retval = IRQ_NONE;
329 unsigned int status = 0; 364 unsigned int status = 0;
330 365
366 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
367
331 if (!(action->flags & IRQF_DISABLED)) 368 if (!(action->flags & IRQF_DISABLED))
332 local_irq_enable_in_hardirq(); 369 local_irq_enable_in_hardirq();
333 370
334 do { 371 do {
372 trace_irq_handler_entry(irq, action);
335 ret = action->handler(irq, action->dev_id); 373 ret = action->handler(irq, action->dev_id);
336 if (ret == IRQ_HANDLED) 374 trace_irq_handler_exit(irq, action, ret);
375
376 switch (ret) {
377 case IRQ_WAKE_THREAD:
378 /*
379 * Set result to handled so the spurious check
380 * does not trigger.
381 */
382 ret = IRQ_HANDLED;
383
384 /*
385 * Catch drivers which return WAKE_THREAD but
386 * did not set up a thread function
387 */
388 if (unlikely(!action->thread_fn)) {
389 warn_no_thread(irq, action);
390 break;
391 }
392
393 /*
394 * Wake up the handler thread for this
395 * action. In case the thread crashed and was
396 * killed we just pretend that we handled the
397 * interrupt. The hardirq handler above has
398 * disabled the device interrupt, so no irq
399 * storm is lurking.
400 */
401 if (likely(!test_bit(IRQTF_DIED,
402 &action->thread_flags))) {
403 set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
404 wake_up_process(action->thread);
405 }
406
407 /* Fall through to add to randomness */
408 case IRQ_HANDLED:
337 status |= action->flags; 409 status |= action->flags;
410 break;
411
412 default:
413 break;
414 }
415
338 retval |= ret; 416 retval |= ret;
339 action = action->next; 417 action = action->next;
340 } while (action); 418 } while (action);
@@ -347,6 +425,11 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
347} 425}
348 426
349#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 427#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
428
429#ifdef CONFIG_ENABLE_WARN_DEPRECATED
430# warning __do_IRQ is deprecated. Please convert to proper flow handlers
431#endif
432
350/** 433/**
351 * __do_IRQ - original all in one highlevel IRQ handler 434 * __do_IRQ - original all in one highlevel IRQ handler
352 * @irq: the interrupt number 435 * @irq: the interrupt number
@@ -467,12 +550,10 @@ void early_init_irq_lock_class(void)
467 } 550 }
468} 551}
469 552
470#ifdef CONFIG_SPARSE_IRQ
471unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 553unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
472{ 554{
473 struct irq_desc *desc = irq_to_desc(irq); 555 struct irq_desc *desc = irq_to_desc(irq);
474 return desc ? desc->kstat_irqs[cpu] : 0; 556 return desc ? desc->kstat_irqs[cpu] : 0;
475} 557}
476#endif
477EXPORT_SYMBOL(kstat_irqs_cpu); 558EXPORT_SYMBOL(kstat_irqs_cpu);
478 559