aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/handle.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-09-22 11:09:43 -0400
committerThomas Gleixner <tglx@linutronix.de>2010-10-12 10:39:05 -0400
commit3795de236d67a05994a1a12759db9d4dd9ffc42c (patch)
tree75e5a2a8922e114de60f468494c879ba4f65ebee /kernel/irq/handle.c
parentf303a6dd127b5ec6de90d1cd79ed19820c7e9658 (diff)
genirq: Distangle kernel/irq/handle.c
kernel/irq/handle.c has become a dumpground for random code in random order. Split out the irq descriptor management and the dummy irq_chip implementation into separate files. Cleanup the include maze while at it. No code change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/irq/handle.c')
-rw-r--r--kernel/irq/handle.c333
1 files changed, 2 insertions, 331 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3fcef37154a1..e2347eb63306 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -11,24 +11,15 @@
11 */ 11 */
12 12
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/random.h> 14#include <linux/random.h>
15#include <linux/sched.h>
18#include <linux/interrupt.h> 16#include <linux/interrupt.h>
19#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
20#include <linux/rculist.h> 18
21#include <linux/hash.h>
22#include <linux/radix-tree.h>
23#include <trace/events/irq.h> 19#include <trace/events/irq.h>
24 20
25#include "internals.h" 21#include "internals.h"
26 22
27/*
28 * lockdep: we want to handle all irq_desc locks as a single lock-class:
29 */
30struct lock_class_key irq_desc_lock_class;
31
32/** 23/**
33 * handle_bad_irq - handle spurious and unhandled irqs 24 * handle_bad_irq - handle spurious and unhandled irqs
34 * @irq: the interrupt number 25 * @irq: the interrupt number
@@ -43,308 +34,6 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
43 ack_bad_irq(irq); 34 ack_bad_irq(irq);
44} 35}
45 36
46#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
47static void __init init_irq_default_affinity(void)
48{
49 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
50 cpumask_setall(irq_default_affinity);
51}
52#else
53static void __init init_irq_default_affinity(void)
54{
55}
56#endif
57
58/*
59 * Linux has a controller-independent interrupt architecture.
60 * Every controller has a 'controller-template', that is used
61 * by the main code to do the right thing. Each driver-visible
62 * interrupt source is transparently wired to the appropriate
63 * controller. Thus drivers need not be aware of the
64 * interrupt-controller.
65 *
66 * The code is designed to be easily extended with new/different
67 * interrupt controllers, without having to do assembly magic or
68 * having to touch the generic code.
69 *
70 * Controller mappings for all interrupt sources:
71 */
72int nr_irqs = NR_IRQS;
73EXPORT_SYMBOL_GPL(nr_irqs);
74
75#ifdef CONFIG_SPARSE_IRQ
76
77static struct irq_desc irq_desc_init = {
78 .status = IRQ_DISABLED,
79 .handle_irq = handle_bad_irq,
80 .depth = 1,
81 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
82};
83
84void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
85{
86 void *ptr;
87
88 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
89 GFP_ATOMIC, node);
90
91 /*
92 * don't overwite if can not get new one
93 * init_copy_kstat_irqs() could still use old one
94 */
95 if (ptr) {
96 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
97 desc->kstat_irqs = ptr;
98 }
99}
100
101static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
102{
103 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
104
105 raw_spin_lock_init(&desc->lock);
106 desc->irq_data.irq = irq;
107#ifdef CONFIG_SMP
108 desc->irq_data.node = node;
109#endif
110 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
111 init_kstat_irqs(desc, node, nr_cpu_ids);
112 if (!desc->kstat_irqs) {
113 printk(KERN_ERR "can not alloc kstat_irqs\n");
114 BUG_ON(1);
115 }
116 if (!alloc_desc_masks(desc, node, false)) {
117 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
118 BUG_ON(1);
119 }
120 init_desc_masks(desc);
121 arch_init_chip_data(desc, node);
122}
123
124/*
125 * Protect the sparse_irqs:
126 */
127DEFINE_RAW_SPINLOCK(sparse_irq_lock);
128
129static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
130
131static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
132{
133 radix_tree_insert(&irq_desc_tree, irq, desc);
134}
135
136struct irq_desc *irq_to_desc(unsigned int irq)
137{
138 return radix_tree_lookup(&irq_desc_tree, irq);
139}
140
141void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
142{
143 void **ptr;
144
145 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
146 if (ptr)
147 radix_tree_replace_slot(ptr, desc);
148}
149
150static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
151 [0 ... NR_IRQS_LEGACY-1] = {
152 .status = IRQ_DISABLED,
153 .handle_irq = handle_bad_irq,
154 .depth = 1,
155 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
156 }
157};
158
159static unsigned int *kstat_irqs_legacy;
160
161int __init early_irq_init(void)
162{
163 struct irq_desc *desc;
164 int legacy_count;
165 int node;
166 int i;
167
168 init_irq_default_affinity();
169
170 /* initialize nr_irqs based on nr_cpu_ids */
171 arch_probe_nr_irqs();
172 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
173
174 desc = irq_desc_legacy;
175 legacy_count = ARRAY_SIZE(irq_desc_legacy);
176 node = first_online_node;
177
178 /* allocate based on nr_cpu_ids */
179 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
180 sizeof(int), GFP_NOWAIT, node);
181
182 irq_desc_init.irq_data.chip = &no_irq_chip;
183
184 for (i = 0; i < legacy_count; i++) {
185 desc[i].irq_data.irq = i;
186 desc[i].irq_data.chip = &no_irq_chip;
187#ifdef CONFIG_SMP
188 desc[i].irq_data.node = node;
189#endif
190 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
191 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
192 alloc_desc_masks(&desc[i], node, true);
193 init_desc_masks(&desc[i]);
194 set_irq_desc(i, &desc[i]);
195 }
196
197 return arch_early_irq_init();
198}
199
200struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
201{
202 struct irq_desc *desc;
203 unsigned long flags;
204
205 if (irq >= nr_irqs) {
206 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
207 irq, nr_irqs);
208 return NULL;
209 }
210
211 desc = irq_to_desc(irq);
212 if (desc)
213 return desc;
214
215 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
216
217 /* We have to check it to avoid races with another CPU */
218 desc = irq_to_desc(irq);
219 if (desc)
220 goto out_unlock;
221
222 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
223
224 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
225 if (!desc) {
226 printk(KERN_ERR "can not alloc irq_desc\n");
227 BUG_ON(1);
228 }
229 init_one_irq_desc(irq, desc, node);
230
231 set_irq_desc(irq, desc);
232
233out_unlock:
234 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
235
236 return desc;
237}
238
239#else /* !CONFIG_SPARSE_IRQ */
240
241struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
242 [0 ... NR_IRQS-1] = {
243 .status = IRQ_DISABLED,
244 .handle_irq = handle_bad_irq,
245 .depth = 1,
246 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
247 }
248};
249
250static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
251int __init early_irq_init(void)
252{
253 struct irq_desc *desc;
254 int count;
255 int i;
256
257 init_irq_default_affinity();
258
259 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
260
261 desc = irq_desc;
262 count = ARRAY_SIZE(irq_desc);
263
264 for (i = 0; i < count; i++) {
265 desc[i].irq_data.irq = i;
266 desc[i].irq_data.chip = &no_irq_chip;
267 alloc_desc_masks(&desc[i], 0, true);
268 init_desc_masks(&desc[i]);
269 desc[i].kstat_irqs = kstat_irqs_all[i];
270 }
271 return arch_early_irq_init();
272}
273
274struct irq_desc *irq_to_desc(unsigned int irq)
275{
276 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
277}
278
279struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
280{
281 return irq_to_desc(irq);
282}
283#endif /* !CONFIG_SPARSE_IRQ */
284
285void clear_kstat_irqs(struct irq_desc *desc)
286{
287 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
288}
289
290/*
291 * What should we do if we get a hw irq event on an illegal vector?
292 * Each architecture has to answer this themself.
293 */
294static void ack_bad(struct irq_data *data)
295{
296 struct irq_desc *desc = irq_data_to_desc(data);
297
298 print_irq_desc(data->irq, desc);
299 ack_bad_irq(data->irq);
300}
301
302/*
303 * NOP functions
304 */
305static void noop(struct irq_data *data) { }
306
307static unsigned int noop_ret(struct irq_data *data)
308{
309 return 0;
310}
311
312#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
313static void compat_noop(unsigned int irq) { }
314#define END_INIT .end = compat_noop
315#else
316#define END_INIT
317#endif
318
319/*
320 * Generic no controller implementation
321 */
322struct irq_chip no_irq_chip = {
323 .name = "none",
324 .irq_startup = noop_ret,
325 .irq_shutdown = noop,
326 .irq_enable = noop,
327 .irq_disable = noop,
328 .irq_ack = ack_bad,
329 END_INIT
330};
331
332/*
333 * Generic dummy implementation which can be used for
334 * real dumb interrupt sources
335 */
336struct irq_chip dummy_irq_chip = {
337 .name = "dummy",
338 .irq_startup = noop_ret,
339 .irq_shutdown = noop,
340 .irq_enable = noop,
341 .irq_disable = noop,
342 .irq_ack = noop,
343 .irq_mask = noop,
344 .irq_unmask = noop,
345 END_INIT
346};
347
348/* 37/*
349 * Special, empty irq handler: 38 * Special, empty irq handler:
350 */ 39 */
@@ -540,21 +229,3 @@ out:
540 return 1; 229 return 1;
541} 230}
542#endif 231#endif
543
544void early_init_irq_lock_class(void)
545{
546 struct irq_desc *desc;
547 int i;
548
549 for_each_irq_desc(i, desc) {
550 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
551 }
552}
553
554unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
555{
556 struct irq_desc *desc = irq_to_desc(irq);
557 return desc ? desc->kstat_irqs[cpu] : 0;
558}
559EXPORT_SYMBOL(kstat_irqs_cpu);
560