aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-09-22 11:09:43 -0400
committerThomas Gleixner <tglx@linutronix.de>2010-10-12 10:39:05 -0400
commit3795de236d67a05994a1a12759db9d4dd9ffc42c (patch)
tree75e5a2a8922e114de60f468494c879ba4f65ebee /kernel/irq
parentf303a6dd127b5ec6de90d1cd79ed19820c7e9658 (diff)
genirq: Distangle kernel/irq/handle.c
kernel/irq/handle.c has become a dumpground for random code in random order. Split out the irq descriptor management and the dummy irq_chip implementation into separate files. Cleanup the include maze while at it. No code change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/Makefile2
-rw-r--r--kernel/irq/dummychip.c68
-rw-r--r--kernel/irq/handle.c333
-rw-r--r--kernel/irq/irqdesc.c269
4 files changed, 340 insertions, 332 deletions
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 7d047808419d..1eaab0da56db 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,5 +1,5 @@
1 1
2obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o 2obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 4obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c
new file mode 100644
index 000000000000..918dea9de9ea
--- /dev/null
+++ b/kernel/irq/dummychip.c
@@ -0,0 +1,68 @@
1/*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the dummy interrupt chip implementation
6 */
7#include <linux/interrupt.h>
8#include <linux/irq.h>
9
10#include "internals.h"
11
12/*
13 * What should we do if we get a hw irq event on an illegal vector?
14 * Each architecture has to answer this themself.
15 */
16static void ack_bad(struct irq_data *data)
17{
18 struct irq_desc *desc = irq_data_to_desc(data);
19
20 print_irq_desc(data->irq, desc);
21 ack_bad_irq(data->irq);
22}
23
24/*
25 * NOP functions
26 */
27static void noop(struct irq_data *data) { }
28
29static unsigned int noop_ret(struct irq_data *data)
30{
31 return 0;
32}
33
34#ifndef CONFIG_GENERIC_HARDIRQS_NO_CRUFT
35static void compat_noop(unsigned int irq) { }
36#define END_INIT .end = compat_noop
37#else
38#define END_INIT
39#endif
40
41/*
42 * Generic no controller implementation
43 */
44struct irq_chip no_irq_chip = {
45 .name = "none",
46 .irq_startup = noop_ret,
47 .irq_shutdown = noop,
48 .irq_enable = noop,
49 .irq_disable = noop,
50 .irq_ack = ack_bad,
51 END_INIT
52};
53
54/*
55 * Generic dummy implementation which can be used for
56 * real dumb interrupt sources
57 */
58struct irq_chip dummy_irq_chip = {
59 .name = "dummy",
60 .irq_startup = noop_ret,
61 .irq_shutdown = noop,
62 .irq_enable = noop,
63 .irq_disable = noop,
64 .irq_ack = noop,
65 .irq_mask = noop,
66 .irq_unmask = noop,
67 END_INIT
68};
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3fcef37154a1..e2347eb63306 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -11,24 +11,15 @@
11 */ 11 */
12 12
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/sched.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/random.h> 14#include <linux/random.h>
15#include <linux/sched.h>
18#include <linux/interrupt.h> 16#include <linux/interrupt.h>
19#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
20#include <linux/rculist.h> 18
21#include <linux/hash.h>
22#include <linux/radix-tree.h>
23#include <trace/events/irq.h> 19#include <trace/events/irq.h>
24 20
25#include "internals.h" 21#include "internals.h"
26 22
27/*
28 * lockdep: we want to handle all irq_desc locks as a single lock-class:
29 */
30struct lock_class_key irq_desc_lock_class;
31
32/** 23/**
33 * handle_bad_irq - handle spurious and unhandled irqs 24 * handle_bad_irq - handle spurious and unhandled irqs
34 * @irq: the interrupt number 25 * @irq: the interrupt number
@@ -43,308 +34,6 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
43 ack_bad_irq(irq); 34 ack_bad_irq(irq);
44} 35}
45 36
46#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
47static void __init init_irq_default_affinity(void)
48{
49 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
50 cpumask_setall(irq_default_affinity);
51}
52#else
53static void __init init_irq_default_affinity(void)
54{
55}
56#endif
57
58/*
59 * Linux has a controller-independent interrupt architecture.
60 * Every controller has a 'controller-template', that is used
61 * by the main code to do the right thing. Each driver-visible
62 * interrupt source is transparently wired to the appropriate
63 * controller. Thus drivers need not be aware of the
64 * interrupt-controller.
65 *
66 * The code is designed to be easily extended with new/different
67 * interrupt controllers, without having to do assembly magic or
68 * having to touch the generic code.
69 *
70 * Controller mappings for all interrupt sources:
71 */
72int nr_irqs = NR_IRQS;
73EXPORT_SYMBOL_GPL(nr_irqs);
74
75#ifdef CONFIG_SPARSE_IRQ
76
77static struct irq_desc irq_desc_init = {
78 .status = IRQ_DISABLED,
79 .handle_irq = handle_bad_irq,
80 .depth = 1,
81 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
82};
83
84void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
85{
86 void *ptr;
87
88 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
89 GFP_ATOMIC, node);
90
91 /*
92 * don't overwite if can not get new one
93 * init_copy_kstat_irqs() could still use old one
94 */
95 if (ptr) {
96 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
97 desc->kstat_irqs = ptr;
98 }
99}
100
101static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
102{
103 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
104
105 raw_spin_lock_init(&desc->lock);
106 desc->irq_data.irq = irq;
107#ifdef CONFIG_SMP
108 desc->irq_data.node = node;
109#endif
110 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
111 init_kstat_irqs(desc, node, nr_cpu_ids);
112 if (!desc->kstat_irqs) {
113 printk(KERN_ERR "can not alloc kstat_irqs\n");
114 BUG_ON(1);
115 }
116 if (!alloc_desc_masks(desc, node, false)) {
117 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
118 BUG_ON(1);
119 }
120 init_desc_masks(desc);
121 arch_init_chip_data(desc, node);
122}
123
124/*
125 * Protect the sparse_irqs:
126 */
127DEFINE_RAW_SPINLOCK(sparse_irq_lock);
128
129static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
130
131static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
132{
133 radix_tree_insert(&irq_desc_tree, irq, desc);
134}
135
136struct irq_desc *irq_to_desc(unsigned int irq)
137{
138 return radix_tree_lookup(&irq_desc_tree, irq);
139}
140
141void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
142{
143 void **ptr;
144
145 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
146 if (ptr)
147 radix_tree_replace_slot(ptr, desc);
148}
149
150static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
151 [0 ... NR_IRQS_LEGACY-1] = {
152 .status = IRQ_DISABLED,
153 .handle_irq = handle_bad_irq,
154 .depth = 1,
155 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
156 }
157};
158
159static unsigned int *kstat_irqs_legacy;
160
161int __init early_irq_init(void)
162{
163 struct irq_desc *desc;
164 int legacy_count;
165 int node;
166 int i;
167
168 init_irq_default_affinity();
169
170 /* initialize nr_irqs based on nr_cpu_ids */
171 arch_probe_nr_irqs();
172 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
173
174 desc = irq_desc_legacy;
175 legacy_count = ARRAY_SIZE(irq_desc_legacy);
176 node = first_online_node;
177
178 /* allocate based on nr_cpu_ids */
179 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
180 sizeof(int), GFP_NOWAIT, node);
181
182 irq_desc_init.irq_data.chip = &no_irq_chip;
183
184 for (i = 0; i < legacy_count; i++) {
185 desc[i].irq_data.irq = i;
186 desc[i].irq_data.chip = &no_irq_chip;
187#ifdef CONFIG_SMP
188 desc[i].irq_data.node = node;
189#endif
190 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
191 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
192 alloc_desc_masks(&desc[i], node, true);
193 init_desc_masks(&desc[i]);
194 set_irq_desc(i, &desc[i]);
195 }
196
197 return arch_early_irq_init();
198}
199
200struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
201{
202 struct irq_desc *desc;
203 unsigned long flags;
204
205 if (irq >= nr_irqs) {
206 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
207 irq, nr_irqs);
208 return NULL;
209 }
210
211 desc = irq_to_desc(irq);
212 if (desc)
213 return desc;
214
215 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
216
217 /* We have to check it to avoid races with another CPU */
218 desc = irq_to_desc(irq);
219 if (desc)
220 goto out_unlock;
221
222 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
223
224 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
225 if (!desc) {
226 printk(KERN_ERR "can not alloc irq_desc\n");
227 BUG_ON(1);
228 }
229 init_one_irq_desc(irq, desc, node);
230
231 set_irq_desc(irq, desc);
232
233out_unlock:
234 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
235
236 return desc;
237}
238
239#else /* !CONFIG_SPARSE_IRQ */
240
241struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
242 [0 ... NR_IRQS-1] = {
243 .status = IRQ_DISABLED,
244 .handle_irq = handle_bad_irq,
245 .depth = 1,
246 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
247 }
248};
249
250static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
251int __init early_irq_init(void)
252{
253 struct irq_desc *desc;
254 int count;
255 int i;
256
257 init_irq_default_affinity();
258
259 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
260
261 desc = irq_desc;
262 count = ARRAY_SIZE(irq_desc);
263
264 for (i = 0; i < count; i++) {
265 desc[i].irq_data.irq = i;
266 desc[i].irq_data.chip = &no_irq_chip;
267 alloc_desc_masks(&desc[i], 0, true);
268 init_desc_masks(&desc[i]);
269 desc[i].kstat_irqs = kstat_irqs_all[i];
270 }
271 return arch_early_irq_init();
272}
273
274struct irq_desc *irq_to_desc(unsigned int irq)
275{
276 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
277}
278
279struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
280{
281 return irq_to_desc(irq);
282}
283#endif /* !CONFIG_SPARSE_IRQ */
284
285void clear_kstat_irqs(struct irq_desc *desc)
286{
287 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
288}
289
290/*
291 * What should we do if we get a hw irq event on an illegal vector?
292 * Each architecture has to answer this themself.
293 */
294static void ack_bad(struct irq_data *data)
295{
296 struct irq_desc *desc = irq_data_to_desc(data);
297
298 print_irq_desc(data->irq, desc);
299 ack_bad_irq(data->irq);
300}
301
302/*
303 * NOP functions
304 */
305static void noop(struct irq_data *data) { }
306
307static unsigned int noop_ret(struct irq_data *data)
308{
309 return 0;
310}
311
312#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
313static void compat_noop(unsigned int irq) { }
314#define END_INIT .end = compat_noop
315#else
316#define END_INIT
317#endif
318
319/*
320 * Generic no controller implementation
321 */
322struct irq_chip no_irq_chip = {
323 .name = "none",
324 .irq_startup = noop_ret,
325 .irq_shutdown = noop,
326 .irq_enable = noop,
327 .irq_disable = noop,
328 .irq_ack = ack_bad,
329 END_INIT
330};
331
332/*
333 * Generic dummy implementation which can be used for
334 * real dumb interrupt sources
335 */
336struct irq_chip dummy_irq_chip = {
337 .name = "dummy",
338 .irq_startup = noop_ret,
339 .irq_shutdown = noop,
340 .irq_enable = noop,
341 .irq_disable = noop,
342 .irq_ack = noop,
343 .irq_mask = noop,
344 .irq_unmask = noop,
345 END_INIT
346};
347
348/* 37/*
349 * Special, empty irq handler: 38 * Special, empty irq handler:
350 */ 39 */
@@ -540,21 +229,3 @@ out:
540 return 1; 229 return 1;
541} 230}
542#endif 231#endif
543
544void early_init_irq_lock_class(void)
545{
546 struct irq_desc *desc;
547 int i;
548
549 for_each_irq_desc(i, desc) {
550 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
551 }
552}
553
554unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
555{
556 struct irq_desc *desc = irq_to_desc(irq);
557 return desc ? desc->kstat_irqs[cpu] : 0;
558}
559EXPORT_SYMBOL(kstat_irqs_cpu);
560
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
new file mode 100644
index 000000000000..fbf8cfa00510
--- /dev/null
+++ b/kernel/irq/irqdesc.c
@@ -0,0 +1,269 @@
1/*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the interrupt descriptor management code
6 *
7 * Detailed information is available in Documentation/DocBook/genericirq
8 *
9 */
10#include <linux/irq.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/radix-tree.h>
16
17#include "internals.h"
18
19/*
20 * lockdep: we want to handle all irq_desc locks as a single lock-class:
21 */
22struct lock_class_key irq_desc_lock_class;
23
24#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
25static void __init init_irq_default_affinity(void)
26{
27 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
28 cpumask_setall(irq_default_affinity);
29}
30#else
31static void __init init_irq_default_affinity(void)
32{
33}
34#endif
35
36int nr_irqs = NR_IRQS;
37EXPORT_SYMBOL_GPL(nr_irqs);
38
39#ifdef CONFIG_SPARSE_IRQ
40
41static struct irq_desc irq_desc_init = {
42 .status = IRQ_DISABLED,
43 .handle_irq = handle_bad_irq,
44 .depth = 1,
45 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
46};
47
48void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
49{
50 void *ptr;
51
52 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
53 GFP_ATOMIC, node);
54
55 /*
56 * don't overwite if can not get new one
57 * init_copy_kstat_irqs() could still use old one
58 */
59 if (ptr) {
60 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
61 desc->kstat_irqs = ptr;
62 }
63}
64
65static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
66{
67 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
68
69 raw_spin_lock_init(&desc->lock);
70 desc->irq_data.irq = irq;
71#ifdef CONFIG_SMP
72 desc->irq_data.node = node;
73#endif
74 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
75 init_kstat_irqs(desc, node, nr_cpu_ids);
76 if (!desc->kstat_irqs) {
77 printk(KERN_ERR "can not alloc kstat_irqs\n");
78 BUG_ON(1);
79 }
80 if (!alloc_desc_masks(desc, node, false)) {
81 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
82 BUG_ON(1);
83 }
84 init_desc_masks(desc);
85 arch_init_chip_data(desc, node);
86}
87
88/*
89 * Protect the sparse_irqs:
90 */
91DEFINE_RAW_SPINLOCK(sparse_irq_lock);
92
93static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
94
95static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
96{
97 radix_tree_insert(&irq_desc_tree, irq, desc);
98}
99
100struct irq_desc *irq_to_desc(unsigned int irq)
101{
102 return radix_tree_lookup(&irq_desc_tree, irq);
103}
104
105void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
106{
107 void **ptr;
108
109 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
110 if (ptr)
111 radix_tree_replace_slot(ptr, desc);
112}
113
114static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
115 [0 ... NR_IRQS_LEGACY-1] = {
116 .status = IRQ_DISABLED,
117 .handle_irq = handle_bad_irq,
118 .depth = 1,
119 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
120 }
121};
122
123static unsigned int *kstat_irqs_legacy;
124
125int __init early_irq_init(void)
126{
127 struct irq_desc *desc;
128 int legacy_count;
129 int node;
130 int i;
131
132 init_irq_default_affinity();
133
134 /* initialize nr_irqs based on nr_cpu_ids */
135 arch_probe_nr_irqs();
136 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
137
138 desc = irq_desc_legacy;
139 legacy_count = ARRAY_SIZE(irq_desc_legacy);
140 node = first_online_node;
141
142 /* allocate based on nr_cpu_ids */
143 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
144 sizeof(int), GFP_NOWAIT, node);
145
146 irq_desc_init.irq_data.chip = &no_irq_chip;
147
148 for (i = 0; i < legacy_count; i++) {
149 desc[i].irq_data.irq = i;
150 desc[i].irq_data.chip = &no_irq_chip;
151#ifdef CONFIG_SMP
152 desc[i].irq_data.node = node;
153#endif
154 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
155 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
156 alloc_desc_masks(&desc[i], node, true);
157 init_desc_masks(&desc[i]);
158 set_irq_desc(i, &desc[i]);
159 }
160
161 return arch_early_irq_init();
162}
163
164struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
165{
166 struct irq_desc *desc;
167 unsigned long flags;
168
169 if (irq >= nr_irqs) {
170 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
171 irq, nr_irqs);
172 return NULL;
173 }
174
175 desc = irq_to_desc(irq);
176 if (desc)
177 return desc;
178
179 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
180
181 /* We have to check it to avoid races with another CPU */
182 desc = irq_to_desc(irq);
183 if (desc)
184 goto out_unlock;
185
186 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
187
188 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
189 if (!desc) {
190 printk(KERN_ERR "can not alloc irq_desc\n");
191 BUG_ON(1);
192 }
193 init_one_irq_desc(irq, desc, node);
194
195 set_irq_desc(irq, desc);
196
197out_unlock:
198 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
199
200 return desc;
201}
202
203#else /* !CONFIG_SPARSE_IRQ */
204
205struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
206 [0 ... NR_IRQS-1] = {
207 .status = IRQ_DISABLED,
208 .handle_irq = handle_bad_irq,
209 .depth = 1,
210 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
211 }
212};
213
214static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
215int __init early_irq_init(void)
216{
217 struct irq_desc *desc;
218 int count;
219 int i;
220
221 init_irq_default_affinity();
222
223 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
224
225 desc = irq_desc;
226 count = ARRAY_SIZE(irq_desc);
227
228 for (i = 0; i < count; i++) {
229 desc[i].irq_data.irq = i;
230 desc[i].irq_data.chip = &no_irq_chip;
231 alloc_desc_masks(&desc[i], 0, true);
232 init_desc_masks(&desc[i]);
233 desc[i].kstat_irqs = kstat_irqs_all[i];
234 }
235 return arch_early_irq_init();
236}
237
238struct irq_desc *irq_to_desc(unsigned int irq)
239{
240 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
241}
242
243struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
244{
245 return irq_to_desc(irq);
246}
247#endif /* !CONFIG_SPARSE_IRQ */
248
249void clear_kstat_irqs(struct irq_desc *desc)
250{
251 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
252}
253
254void early_init_irq_lock_class(void)
255{
256 struct irq_desc *desc;
257 int i;
258
259 for_each_irq_desc(i, desc) {
260 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
261 }
262}
263
264unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
265{
266 struct irq_desc *desc = irq_to_desc(irq);
267 return desc ? desc->kstat_irqs[cpu] : 0;
268}
269EXPORT_SYMBOL(kstat_irqs_cpu);