diff options
Diffstat (limited to 'kernel/irq/handle.c')
-rw-r--r-- | kernel/irq/handle.c | 559 |
1 files changed, 92 insertions, 467 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 27e5c6911223..470d08c82bbe 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -11,24 +11,15 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
14 | #include <linux/sched.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/random.h> | 14 | #include <linux/random.h> |
15 | #include <linux/sched.h> | ||
18 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
19 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
20 | #include <linux/rculist.h> | 18 | |
21 | #include <linux/hash.h> | ||
22 | #include <linux/radix-tree.h> | ||
23 | #include <trace/events/irq.h> | 19 | #include <trace/events/irq.h> |
24 | 20 | ||
25 | #include "internals.h" | 21 | #include "internals.h" |
26 | 22 | ||
27 | /* | ||
28 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
29 | */ | ||
30 | struct lock_class_key irq_desc_lock_class; | ||
31 | |||
32 | /** | 23 | /** |
33 | * handle_bad_irq - handle spurious and unhandled irqs | 24 | * handle_bad_irq - handle spurious and unhandled irqs |
34 | * @irq: the interrupt number | 25 | * @irq: the interrupt number |
@@ -43,304 +34,6 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
43 | ack_bad_irq(irq); | 34 | ack_bad_irq(irq); |
44 | } | 35 | } |
45 | 36 | ||
46 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
47 | static void __init init_irq_default_affinity(void) | ||
48 | { | ||
49 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | ||
50 | cpumask_setall(irq_default_affinity); | ||
51 | } | ||
52 | #else | ||
53 | static void __init init_irq_default_affinity(void) | ||
54 | { | ||
55 | } | ||
56 | #endif | ||
57 | |||
58 | /* | ||
59 | * Linux has a controller-independent interrupt architecture. | ||
60 | * Every controller has a 'controller-template', that is used | ||
61 | * by the main code to do the right thing. Each driver-visible | ||
62 | * interrupt source is transparently wired to the appropriate | ||
63 | * controller. Thus drivers need not be aware of the | ||
64 | * interrupt-controller. | ||
65 | * | ||
66 | * The code is designed to be easily extended with new/different | ||
67 | * interrupt controllers, without having to do assembly magic or | ||
68 | * having to touch the generic code. | ||
69 | * | ||
70 | * Controller mappings for all interrupt sources: | ||
71 | */ | ||
72 | int nr_irqs = NR_IRQS; | ||
73 | EXPORT_SYMBOL_GPL(nr_irqs); | ||
74 | |||
75 | #ifdef CONFIG_SPARSE_IRQ | ||
76 | |||
77 | static struct irq_desc irq_desc_init = { | ||
78 | .irq = -1, | ||
79 | .status = IRQ_DISABLED, | ||
80 | .chip = &no_irq_chip, | ||
81 | .handle_irq = handle_bad_irq, | ||
82 | .depth = 1, | ||
83 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
84 | }; | ||
85 | |||
86 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) | ||
87 | { | ||
88 | void *ptr; | ||
89 | |||
90 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), | ||
91 | GFP_ATOMIC, node); | ||
92 | |||
93 | /* | ||
94 | * don't overwite if can not get new one | ||
95 | * init_copy_kstat_irqs() could still use old one | ||
96 | */ | ||
97 | if (ptr) { | ||
98 | printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); | ||
99 | desc->kstat_irqs = ptr; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) | ||
104 | { | ||
105 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | ||
106 | |||
107 | raw_spin_lock_init(&desc->lock); | ||
108 | desc->irq = irq; | ||
109 | #ifdef CONFIG_SMP | ||
110 | desc->node = node; | ||
111 | #endif | ||
112 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
113 | init_kstat_irqs(desc, node, nr_cpu_ids); | ||
114 | if (!desc->kstat_irqs) { | ||
115 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | ||
116 | BUG_ON(1); | ||
117 | } | ||
118 | if (!alloc_desc_masks(desc, node, false)) { | ||
119 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | ||
120 | BUG_ON(1); | ||
121 | } | ||
122 | init_desc_masks(desc); | ||
123 | arch_init_chip_data(desc, node); | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Protect the sparse_irqs: | ||
128 | */ | ||
129 | DEFINE_RAW_SPINLOCK(sparse_irq_lock); | ||
130 | |||
131 | static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); | ||
132 | |||
133 | static void set_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
134 | { | ||
135 | radix_tree_insert(&irq_desc_tree, irq, desc); | ||
136 | } | ||
137 | |||
138 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
139 | { | ||
140 | return radix_tree_lookup(&irq_desc_tree, irq); | ||
141 | } | ||
142 | |||
143 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
144 | { | ||
145 | void **ptr; | ||
146 | |||
147 | ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); | ||
148 | if (ptr) | ||
149 | radix_tree_replace_slot(ptr, desc); | ||
150 | } | ||
151 | |||
152 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | ||
153 | [0 ... NR_IRQS_LEGACY-1] = { | ||
154 | .irq = -1, | ||
155 | .status = IRQ_DISABLED, | ||
156 | .chip = &no_irq_chip, | ||
157 | .handle_irq = handle_bad_irq, | ||
158 | .depth = 1, | ||
159 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
160 | } | ||
161 | }; | ||
162 | |||
163 | static unsigned int *kstat_irqs_legacy; | ||
164 | |||
165 | int __init early_irq_init(void) | ||
166 | { | ||
167 | struct irq_desc *desc; | ||
168 | int legacy_count; | ||
169 | int node; | ||
170 | int i; | ||
171 | |||
172 | init_irq_default_affinity(); | ||
173 | |||
174 | /* initialize nr_irqs based on nr_cpu_ids */ | ||
175 | arch_probe_nr_irqs(); | ||
176 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); | ||
177 | |||
178 | desc = irq_desc_legacy; | ||
179 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | ||
180 | node = first_online_node; | ||
181 | |||
182 | /* allocate based on nr_cpu_ids */ | ||
183 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * | ||
184 | sizeof(int), GFP_NOWAIT, node); | ||
185 | |||
186 | for (i = 0; i < legacy_count; i++) { | ||
187 | desc[i].irq = i; | ||
188 | #ifdef CONFIG_SMP | ||
189 | desc[i].node = node; | ||
190 | #endif | ||
191 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; | ||
192 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | ||
193 | alloc_desc_masks(&desc[i], node, true); | ||
194 | init_desc_masks(&desc[i]); | ||
195 | set_irq_desc(i, &desc[i]); | ||
196 | } | ||
197 | |||
198 | return arch_early_irq_init(); | ||
199 | } | ||
200 | |||
201 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | ||
202 | { | ||
203 | struct irq_desc *desc; | ||
204 | unsigned long flags; | ||
205 | |||
206 | if (irq >= nr_irqs) { | ||
207 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", | ||
208 | irq, nr_irqs); | ||
209 | return NULL; | ||
210 | } | ||
211 | |||
212 | desc = irq_to_desc(irq); | ||
213 | if (desc) | ||
214 | return desc; | ||
215 | |||
216 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); | ||
217 | |||
218 | /* We have to check it to avoid races with another CPU */ | ||
219 | desc = irq_to_desc(irq); | ||
220 | if (desc) | ||
221 | goto out_unlock; | ||
222 | |||
223 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | ||
224 | |||
225 | printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); | ||
226 | if (!desc) { | ||
227 | printk(KERN_ERR "can not alloc irq_desc\n"); | ||
228 | BUG_ON(1); | ||
229 | } | ||
230 | init_one_irq_desc(irq, desc, node); | ||
231 | |||
232 | set_irq_desc(irq, desc); | ||
233 | |||
234 | out_unlock: | ||
235 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
236 | |||
237 | return desc; | ||
238 | } | ||
239 | |||
240 | #else /* !CONFIG_SPARSE_IRQ */ | ||
241 | |||
242 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | ||
243 | [0 ... NR_IRQS-1] = { | ||
244 | .status = IRQ_DISABLED, | ||
245 | .chip = &no_irq_chip, | ||
246 | .handle_irq = handle_bad_irq, | ||
247 | .depth = 1, | ||
248 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | ||
249 | } | ||
250 | }; | ||
251 | |||
252 | static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; | ||
253 | int __init early_irq_init(void) | ||
254 | { | ||
255 | struct irq_desc *desc; | ||
256 | int count; | ||
257 | int i; | ||
258 | |||
259 | init_irq_default_affinity(); | ||
260 | |||
261 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | ||
262 | |||
263 | desc = irq_desc; | ||
264 | count = ARRAY_SIZE(irq_desc); | ||
265 | |||
266 | for (i = 0; i < count; i++) { | ||
267 | desc[i].irq = i; | ||
268 | alloc_desc_masks(&desc[i], 0, true); | ||
269 | init_desc_masks(&desc[i]); | ||
270 | desc[i].kstat_irqs = kstat_irqs_all[i]; | ||
271 | } | ||
272 | return arch_early_irq_init(); | ||
273 | } | ||
274 | |||
275 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
276 | { | ||
277 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | ||
278 | } | ||
279 | |||
280 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) | ||
281 | { | ||
282 | return irq_to_desc(irq); | ||
283 | } | ||
284 | #endif /* !CONFIG_SPARSE_IRQ */ | ||
285 | |||
286 | void clear_kstat_irqs(struct irq_desc *desc) | ||
287 | { | ||
288 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * What should we do if we get a hw irq event on an illegal vector? | ||
293 | * Each architecture has to answer this themself. | ||
294 | */ | ||
295 | static void ack_bad(unsigned int irq) | ||
296 | { | ||
297 | struct irq_desc *desc = irq_to_desc(irq); | ||
298 | |||
299 | print_irq_desc(irq, desc); | ||
300 | ack_bad_irq(irq); | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * NOP functions | ||
305 | */ | ||
306 | static void noop(unsigned int irq) | ||
307 | { | ||
308 | } | ||
309 | |||
310 | static unsigned int noop_ret(unsigned int irq) | ||
311 | { | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Generic no controller implementation | ||
317 | */ | ||
318 | struct irq_chip no_irq_chip = { | ||
319 | .name = "none", | ||
320 | .startup = noop_ret, | ||
321 | .shutdown = noop, | ||
322 | .enable = noop, | ||
323 | .disable = noop, | ||
324 | .ack = ack_bad, | ||
325 | .end = noop, | ||
326 | }; | ||
327 | |||
328 | /* | ||
329 | * Generic dummy implementation which can be used for | ||
330 | * real dumb interrupt sources | ||
331 | */ | ||
332 | struct irq_chip dummy_irq_chip = { | ||
333 | .name = "dummy", | ||
334 | .startup = noop_ret, | ||
335 | .shutdown = noop, | ||
336 | .enable = noop, | ||
337 | .disable = noop, | ||
338 | .ack = noop, | ||
339 | .mask = noop, | ||
340 | .unmask = noop, | ||
341 | .end = noop, | ||
342 | }; | ||
343 | |||
344 | /* | 37 | /* |
345 | * Special, empty irq handler: | 38 | * Special, empty irq handler: |
346 | */ | 39 | */ |
@@ -358,31 +51,87 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action) | |||
358 | "but no thread function available.", irq, action->name); | 51 | "but no thread function available.", irq, action->name); |
359 | } | 52 | } |
360 | 53 | ||
361 | /** | 54 | static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action) |
362 | * handle_IRQ_event - irq action chain handler | 55 | { |
363 | * @irq: the interrupt number | 56 | /* |
364 | * @action: the interrupt action chain for this irq | 57 | * Wake up the handler thread for this action. In case the |
365 | * | 58 | * thread crashed and was killed we just pretend that we |
366 | * Handles the action chain of an irq event | 59 | * handled the interrupt. The hardirq handler has disabled the |
367 | */ | 60 | * device interrupt, so no irq storm is lurking. If the |
368 | irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | 61 | * RUNTHREAD bit is already set, nothing to do. |
62 | */ | ||
63 | if (test_bit(IRQTF_DIED, &action->thread_flags) || | ||
64 | test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
65 | return; | ||
66 | |||
67 | /* | ||
68 | * It's safe to OR the mask lockless here. We have only two | ||
69 | * places which write to threads_oneshot: This code and the | ||
70 | * irq thread. | ||
71 | * | ||
72 | * This code is the hard irq context and can never run on two | ||
73 | * cpus in parallel. If it ever does we have more serious | ||
74 | * problems than this bitmask. | ||
75 | * | ||
76 | * The irq threads of this irq which clear their "running" bit | ||
77 | * in threads_oneshot are serialized via desc->lock against | ||
78 | * each other and they are serialized against this code by | ||
79 | * IRQS_INPROGRESS. | ||
80 | * | ||
81 | * Hard irq handler: | ||
82 | * | ||
83 | * spin_lock(desc->lock); | ||
84 | * desc->state |= IRQS_INPROGRESS; | ||
85 | * spin_unlock(desc->lock); | ||
86 | * set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
87 | * desc->threads_oneshot |= mask; | ||
88 | * spin_lock(desc->lock); | ||
89 | * desc->state &= ~IRQS_INPROGRESS; | ||
90 | * spin_unlock(desc->lock); | ||
91 | * | ||
92 | * irq thread: | ||
93 | * | ||
94 | * again: | ||
95 | * spin_lock(desc->lock); | ||
96 | * if (desc->state & IRQS_INPROGRESS) { | ||
97 | * spin_unlock(desc->lock); | ||
98 | * while(desc->state & IRQS_INPROGRESS) | ||
99 | * cpu_relax(); | ||
100 | * goto again; | ||
101 | * } | ||
102 | * if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
103 | * desc->threads_oneshot &= ~mask; | ||
104 | * spin_unlock(desc->lock); | ||
105 | * | ||
106 | * So either the thread waits for us to clear IRQS_INPROGRESS | ||
107 | * or we are waiting in the flow handler for desc->lock to be | ||
108 | * released before we reach this point. The thread also checks | ||
109 | * IRQTF_RUNTHREAD under desc->lock. If set it leaves | ||
110 | * threads_oneshot untouched and runs the thread another time. | ||
111 | */ | ||
112 | desc->threads_oneshot |= action->thread_mask; | ||
113 | wake_up_process(action->thread); | ||
114 | } | ||
115 | |||
116 | irqreturn_t | ||
117 | handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | ||
369 | { | 118 | { |
370 | irqreturn_t ret, retval = IRQ_NONE; | 119 | irqreturn_t retval = IRQ_NONE; |
371 | unsigned int status = 0; | 120 | unsigned int random = 0, irq = desc->irq_data.irq; |
372 | 121 | ||
373 | do { | 122 | do { |
123 | irqreturn_t res; | ||
124 | |||
374 | trace_irq_handler_entry(irq, action); | 125 | trace_irq_handler_entry(irq, action); |
375 | ret = action->handler(irq, action->dev_id); | 126 | res = action->handler(irq, action->dev_id); |
376 | trace_irq_handler_exit(irq, action, ret); | 127 | trace_irq_handler_exit(irq, action, res); |
377 | 128 | ||
378 | switch (ret) { | 129 | if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n", |
379 | case IRQ_WAKE_THREAD: | 130 | irq, action->handler)) |
380 | /* | 131 | local_irq_disable(); |
381 | * Set result to handled so the spurious check | ||
382 | * does not trigger. | ||
383 | */ | ||
384 | ret = IRQ_HANDLED; | ||
385 | 132 | ||
133 | switch (res) { | ||
134 | case IRQ_WAKE_THREAD: | ||
386 | /* | 135 | /* |
387 | * Catch drivers which return WAKE_THREAD but | 136 | * Catch drivers which return WAKE_THREAD but |
388 | * did not set up a thread function | 137 | * did not set up a thread function |
@@ -392,165 +141,41 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
392 | break; | 141 | break; |
393 | } | 142 | } |
394 | 143 | ||
395 | /* | 144 | irq_wake_thread(desc, action); |
396 | * Wake up the handler thread for this | ||
397 | * action. In case the thread crashed and was | ||
398 | * killed we just pretend that we handled the | ||
399 | * interrupt. The hardirq handler above has | ||
400 | * disabled the device interrupt, so no irq | ||
401 | * storm is lurking. | ||
402 | */ | ||
403 | if (likely(!test_bit(IRQTF_DIED, | ||
404 | &action->thread_flags))) { | ||
405 | set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
406 | wake_up_process(action->thread); | ||
407 | } | ||
408 | 145 | ||
409 | /* Fall through to add to randomness */ | 146 | /* Fall through to add to randomness */ |
410 | case IRQ_HANDLED: | 147 | case IRQ_HANDLED: |
411 | status |= action->flags; | 148 | random |= action->flags; |
412 | break; | 149 | break; |
413 | 150 | ||
414 | default: | 151 | default: |
415 | break; | 152 | break; |
416 | } | 153 | } |
417 | 154 | ||
418 | retval |= ret; | 155 | retval |= res; |
419 | action = action->next; | 156 | action = action->next; |
420 | } while (action); | 157 | } while (action); |
421 | 158 | ||
422 | if (status & IRQF_SAMPLE_RANDOM) | 159 | if (random & IRQF_SAMPLE_RANDOM) |
423 | add_interrupt_randomness(irq); | 160 | add_interrupt_randomness(irq); |
424 | local_irq_disable(); | ||
425 | 161 | ||
162 | if (!noirqdebug) | ||
163 | note_interrupt(irq, desc, retval); | ||
426 | return retval; | 164 | return retval; |
427 | } | 165 | } |
428 | 166 | ||
429 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | 167 | irqreturn_t handle_irq_event(struct irq_desc *desc) |
430 | |||
431 | #ifdef CONFIG_ENABLE_WARN_DEPRECATED | ||
432 | # warning __do_IRQ is deprecated. Please convert to proper flow handlers | ||
433 | #endif | ||
434 | |||
435 | /** | ||
436 | * __do_IRQ - original all in one highlevel IRQ handler | ||
437 | * @irq: the interrupt number | ||
438 | * | ||
439 | * __do_IRQ handles all normal device IRQ's (the special | ||
440 | * SMP cross-CPU interrupts have their own specific | ||
441 | * handlers). | ||
442 | * | ||
443 | * This is the original x86 implementation which is used for every | ||
444 | * interrupt type. | ||
445 | */ | ||
446 | unsigned int __do_IRQ(unsigned int irq) | ||
447 | { | 168 | { |
448 | struct irq_desc *desc = irq_to_desc(irq); | 169 | struct irqaction *action = desc->action; |
449 | struct irqaction *action; | 170 | irqreturn_t ret; |
450 | unsigned int status; | ||
451 | |||
452 | kstat_incr_irqs_this_cpu(irq, desc); | ||
453 | |||
454 | if (CHECK_IRQ_PER_CPU(desc->status)) { | ||
455 | irqreturn_t action_ret; | ||
456 | |||
457 | /* | ||
458 | * No locking required for CPU-local interrupts: | ||
459 | */ | ||
460 | if (desc->chip->ack) | ||
461 | desc->chip->ack(irq); | ||
462 | if (likely(!(desc->status & IRQ_DISABLED))) { | ||
463 | action_ret = handle_IRQ_event(irq, desc->action); | ||
464 | if (!noirqdebug) | ||
465 | note_interrupt(irq, desc, action_ret); | ||
466 | } | ||
467 | desc->chip->end(irq); | ||
468 | return 1; | ||
469 | } | ||
470 | |||
471 | raw_spin_lock(&desc->lock); | ||
472 | if (desc->chip->ack) | ||
473 | desc->chip->ack(irq); | ||
474 | /* | ||
475 | * REPLAY is when Linux resends an IRQ that was dropped earlier | ||
476 | * WAITING is used by probe to mark irqs that are being tested | ||
477 | */ | ||
478 | status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); | ||
479 | status |= IRQ_PENDING; /* we _want_ to handle it */ | ||
480 | |||
481 | /* | ||
482 | * If the IRQ is disabled for whatever reason, we cannot | ||
483 | * use the action we have. | ||
484 | */ | ||
485 | action = NULL; | ||
486 | if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { | ||
487 | action = desc->action; | ||
488 | status &= ~IRQ_PENDING; /* we commit to handling */ | ||
489 | status |= IRQ_INPROGRESS; /* we are handling it */ | ||
490 | } | ||
491 | desc->status = status; | ||
492 | 171 | ||
493 | /* | 172 | desc->istate &= ~IRQS_PENDING; |
494 | * If there is no IRQ handler or it was disabled, exit early. | 173 | irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
495 | * Since we set PENDING, if another processor is handling | ||
496 | * a different instance of this same irq, the other processor | ||
497 | * will take care of it. | ||
498 | */ | ||
499 | if (unlikely(!action)) | ||
500 | goto out; | ||
501 | |||
502 | /* | ||
503 | * Edge triggered interrupts need to remember | ||
504 | * pending events. | ||
505 | * This applies to any hw interrupts that allow a second | ||
506 | * instance of the same irq to arrive while we are in do_IRQ | ||
507 | * or in the handler. But the code here only handles the _second_ | ||
508 | * instance of the irq, not the third or fourth. So it is mostly | ||
509 | * useful for irq hardware that does not mask cleanly in an | ||
510 | * SMP environment. | ||
511 | */ | ||
512 | for (;;) { | ||
513 | irqreturn_t action_ret; | ||
514 | |||
515 | raw_spin_unlock(&desc->lock); | ||
516 | |||
517 | action_ret = handle_IRQ_event(irq, action); | ||
518 | if (!noirqdebug) | ||
519 | note_interrupt(irq, desc, action_ret); | ||
520 | |||
521 | raw_spin_lock(&desc->lock); | ||
522 | if (likely(!(desc->status & IRQ_PENDING))) | ||
523 | break; | ||
524 | desc->status &= ~IRQ_PENDING; | ||
525 | } | ||
526 | desc->status &= ~IRQ_INPROGRESS; | ||
527 | |||
528 | out: | ||
529 | /* | ||
530 | * The ->end() handler has to deal with interrupts which got | ||
531 | * disabled while the handler was running. | ||
532 | */ | ||
533 | desc->chip->end(irq); | ||
534 | raw_spin_unlock(&desc->lock); | 174 | raw_spin_unlock(&desc->lock); |
535 | 175 | ||
536 | return 1; | 176 | ret = handle_irq_event_percpu(desc, action); |
537 | } | ||
538 | #endif | ||
539 | |||
540 | void early_init_irq_lock_class(void) | ||
541 | { | ||
542 | struct irq_desc *desc; | ||
543 | int i; | ||
544 | |||
545 | for_each_irq_desc(i, desc) { | ||
546 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
547 | } | ||
548 | } | ||
549 | 177 | ||
550 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 178 | raw_spin_lock(&desc->lock); |
551 | { | 179 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
552 | struct irq_desc *desc = irq_to_desc(irq); | 180 | return ret; |
553 | return desc ? desc->kstat_irqs[cpu] : 0; | ||
554 | } | 181 | } |
555 | EXPORT_SYMBOL(kstat_irqs_cpu); | ||
556 | |||