aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/handle.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/handle.c')
-rw-r--r--kernel/irq/handle.c93
1 files changed, 60 insertions, 33 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3aba8d12f328..9ebf77968871 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -17,6 +17,7 @@
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/rculist.h> 18#include <linux/rculist.h>
19#include <linux/hash.h> 19#include <linux/hash.h>
20#include <linux/bootmem.h>
20 21
21#include "internals.h" 22#include "internals.h"
22 23
@@ -69,6 +70,7 @@ int nr_irqs = NR_IRQS;
69EXPORT_SYMBOL_GPL(nr_irqs); 70EXPORT_SYMBOL_GPL(nr_irqs);
70 71
71#ifdef CONFIG_SPARSE_IRQ 72#ifdef CONFIG_SPARSE_IRQ
73
72static struct irq_desc irq_desc_init = { 74static struct irq_desc irq_desc_init = {
73 .irq = -1, 75 .irq = -1,
74 .status = IRQ_DISABLED, 76 .status = IRQ_DISABLED,
@@ -76,26 +78,25 @@ static struct irq_desc irq_desc_init = {
76 .handle_irq = handle_bad_irq, 78 .handle_irq = handle_bad_irq,
77 .depth = 1, 79 .depth = 1,
78 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 80 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
79#ifdef CONFIG_SMP
80 .affinity = CPU_MASK_ALL
81#endif
82}; 81};
83 82
84void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 83void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
85{ 84{
86 unsigned long bytes;
87 char *ptr;
88 int node; 85 int node;
89 86 void *ptr;
90 /* Compute how many bytes we need per irq and allocate them */
91 bytes = nr * sizeof(unsigned int);
92 87
93 node = cpu_to_node(cpu); 88 node = cpu_to_node(cpu);
94 ptr = kzalloc_node(bytes, GFP_ATOMIC, node); 89 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
95 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
96 90
97 if (ptr) 91 /*
98 desc->kstat_irqs = (unsigned int *)ptr; 92 * don't overwite if can not get new one
93 * init_copy_kstat_irqs() could still use old one
94 */
95 if (ptr) {
96 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
97 cpu, node);
98 desc->kstat_irqs = ptr;
99 }
99} 100}
100 101
101static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 102static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
@@ -113,6 +114,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
113 printk(KERN_ERR "can not alloc kstat_irqs\n"); 114 printk(KERN_ERR "can not alloc kstat_irqs\n");
114 BUG_ON(1); 115 BUG_ON(1);
115 } 116 }
117 if (!init_alloc_desc_masks(desc, cpu, false)) {
118 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
119 BUG_ON(1);
120 }
116 arch_init_chip_data(desc, cpu); 121 arch_init_chip_data(desc, cpu);
117} 122}
118 123
@@ -121,7 +126,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
121 */ 126 */
122DEFINE_SPINLOCK(sparse_irq_lock); 127DEFINE_SPINLOCK(sparse_irq_lock);
123 128
124struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; 129struct irq_desc **irq_desc_ptrs __read_mostly;
125 130
126static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 131static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
127 [0 ... NR_IRQS_LEGACY-1] = { 132 [0 ... NR_IRQS_LEGACY-1] = {
@@ -131,14 +136,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
131 .handle_irq = handle_bad_irq, 136 .handle_irq = handle_bad_irq,
132 .depth = 1, 137 .depth = 1,
133 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 138 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
134#ifdef CONFIG_SMP
135 .affinity = CPU_MASK_ALL
136#endif
137 } 139 }
138}; 140};
139 141
140/* FIXME: use bootmem alloc ...*/ 142static unsigned int *kstat_irqs_legacy;
141static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
142 143
143int __init early_irq_init(void) 144int __init early_irq_init(void)
144{ 145{
@@ -148,18 +149,30 @@ int __init early_irq_init(void)
148 149
149 init_irq_default_affinity(); 150 init_irq_default_affinity();
150 151
152 /* initialize nr_irqs based on nr_cpu_ids */
153 arch_probe_nr_irqs();
154 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
155
151 desc = irq_desc_legacy; 156 desc = irq_desc_legacy;
152 legacy_count = ARRAY_SIZE(irq_desc_legacy); 157 legacy_count = ARRAY_SIZE(irq_desc_legacy);
153 158
159 /* allocate irq_desc_ptrs array based on nr_irqs */
160 irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
161
162 /* allocate based on nr_cpu_ids */
163 /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
164 kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
165 sizeof(int));
166
154 for (i = 0; i < legacy_count; i++) { 167 for (i = 0; i < legacy_count; i++) {
155 desc[i].irq = i; 168 desc[i].irq = i;
156 desc[i].kstat_irqs = kstat_irqs_legacy[i]; 169 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
157 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 170 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
158 171 init_alloc_desc_masks(&desc[i], 0, true);
159 irq_desc_ptrs[i] = desc + i; 172 irq_desc_ptrs[i] = desc + i;
160 } 173 }
161 174
162 for (i = legacy_count; i < NR_IRQS; i++) 175 for (i = legacy_count; i < nr_irqs; i++)
163 irq_desc_ptrs[i] = NULL; 176 irq_desc_ptrs[i] = NULL;
164 177
165 return arch_early_irq_init(); 178 return arch_early_irq_init();
@@ -167,7 +180,10 @@ int __init early_irq_init(void)
167 180
168struct irq_desc *irq_to_desc(unsigned int irq) 181struct irq_desc *irq_to_desc(unsigned int irq)
169{ 182{
170 return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; 183 if (irq_desc_ptrs && irq < nr_irqs)
184 return irq_desc_ptrs[irq];
185
186 return NULL;
171} 187}
172 188
173struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 189struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
@@ -176,10 +192,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
176 unsigned long flags; 192 unsigned long flags;
177 int node; 193 int node;
178 194
179 if (irq >= NR_IRQS) { 195 if (irq >= nr_irqs) {
180 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", 196 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
181 irq, NR_IRQS); 197 irq, nr_irqs);
182 WARN_ON(1);
183 return NULL; 198 return NULL;
184 } 199 }
185 200
@@ -221,12 +236,10 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
221 .handle_irq = handle_bad_irq, 236 .handle_irq = handle_bad_irq,
222 .depth = 1, 237 .depth = 1,
223 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 238 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
224#ifdef CONFIG_SMP
225 .affinity = CPU_MASK_ALL
226#endif
227 } 239 }
228}; 240};
229 241
242static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
230int __init early_irq_init(void) 243int __init early_irq_init(void)
231{ 244{
232 struct irq_desc *desc; 245 struct irq_desc *desc;
@@ -235,12 +248,16 @@ int __init early_irq_init(void)
235 248
236 init_irq_default_affinity(); 249 init_irq_default_affinity();
237 250
251 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
252
238 desc = irq_desc; 253 desc = irq_desc;
239 count = ARRAY_SIZE(irq_desc); 254 count = ARRAY_SIZE(irq_desc);
240 255
241 for (i = 0; i < count; i++) 256 for (i = 0; i < count; i++) {
242 desc[i].irq = i; 257 desc[i].irq = i;
243 258 init_alloc_desc_masks(&desc[i], 0, true);
259 desc[i].kstat_irqs = kstat_irqs_all[i];
260 }
244 return arch_early_irq_init(); 261 return arch_early_irq_init();
245} 262}
246 263
@@ -255,6 +272,11 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
255} 272}
256#endif /* !CONFIG_SPARSE_IRQ */ 273#endif /* !CONFIG_SPARSE_IRQ */
257 274
275void clear_kstat_irqs(struct irq_desc *desc)
276{
277 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
278}
279
258/* 280/*
259 * What should we do if we get a hw irq event on an illegal vector? 281 * What should we do if we get a hw irq event on an illegal vector?
260 * Each architecture has to answer this themself. 282 * Each architecture has to answer this themself.
@@ -328,6 +350,8 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
328 irqreturn_t ret, retval = IRQ_NONE; 350 irqreturn_t ret, retval = IRQ_NONE;
329 unsigned int status = 0; 351 unsigned int status = 0;
330 352
353 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
354
331 if (!(action->flags & IRQF_DISABLED)) 355 if (!(action->flags & IRQF_DISABLED))
332 local_irq_enable_in_hardirq(); 356 local_irq_enable_in_hardirq();
333 357
@@ -347,6 +371,11 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
347} 371}
348 372
349#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 373#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
374
375#ifdef CONFIG_ENABLE_WARN_DEPRECATED
376# warning __do_IRQ is deprecated. Please convert to proper flow handlers
377#endif
378
350/** 379/**
351 * __do_IRQ - original all in one highlevel IRQ handler 380 * __do_IRQ - original all in one highlevel IRQ handler
352 * @irq: the interrupt number 381 * @irq: the interrupt number
@@ -467,12 +496,10 @@ void early_init_irq_lock_class(void)
467 } 496 }
468} 497}
469 498
470#ifdef CONFIG_SPARSE_IRQ
471unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 499unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
472{ 500{
473 struct irq_desc *desc = irq_to_desc(irq); 501 struct irq_desc *desc = irq_to_desc(irq);
474 return desc ? desc->kstat_irqs[cpu] : 0; 502 return desc ? desc->kstat_irqs[cpu] : 0;
475} 503}
476#endif
477EXPORT_SYMBOL(kstat_irqs_cpu); 504EXPORT_SYMBOL(kstat_irqs_cpu);
478 505