aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/chip.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-05-14 06:06:36 -0400
committerThomas Gleixner <tglx@linutronix.de>2011-05-14 06:06:36 -0400
commita18f22a968de17b29f2310cdb7ba69163e65ec15 (patch)
treea7d56d88fad5e444d7661484109758a2f436129e /kernel/irq/chip.c
parenta1c57e0fec53defe745e64417eacdbd3618c3e66 (diff)
parent798778b8653f64b7b2162ac70eca10367cff6ce8 (diff)
Merge branch 'consolidate-clksrc-i8253' of master.kernel.org:~rmk/linux-2.6-arm into timers/clocksource
Conflicts: arch/ia64/kernel/cyclone.c arch/mips/kernel/i8253.c arch/x86/kernel/i8253.c Reason: Resolve conflicts so further cleanups do not conflict further Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/chip.c')
-rw-r--r--kernel/irq/chip.c696
1 files changed, 297 insertions, 399 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index baa5c4acad83..4af1e2b244cb 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -19,140 +19,115 @@
19#include "internals.h" 19#include "internals.h"
20 20
21/** 21/**
22 * set_irq_chip - set the irq chip for an irq 22 * irq_set_chip - set the irq chip for an irq
23 * @irq: irq number 23 * @irq: irq number
24 * @chip: pointer to irq chip description structure 24 * @chip: pointer to irq chip description structure
25 */ 25 */
26int set_irq_chip(unsigned int irq, struct irq_chip *chip) 26int irq_set_chip(unsigned int irq, struct irq_chip *chip)
27{ 27{
28 struct irq_desc *desc = irq_to_desc(irq);
29 unsigned long flags; 28 unsigned long flags;
29 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
30 30
31 if (!desc) { 31 if (!desc)
32 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
33 return -EINVAL; 32 return -EINVAL;
34 }
35 33
36 if (!chip) 34 if (!chip)
37 chip = &no_irq_chip; 35 chip = &no_irq_chip;
38 36
39 raw_spin_lock_irqsave(&desc->lock, flags);
40 irq_chip_set_defaults(chip);
41 desc->irq_data.chip = chip; 37 desc->irq_data.chip = chip;
42 raw_spin_unlock_irqrestore(&desc->lock, flags); 38 irq_put_desc_unlock(desc, flags);
43 39 /*
40 * For !CONFIG_SPARSE_IRQ make the irq show up in
41 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
42 * already marked, and this call is harmless.
43 */
44 irq_reserve_irq(irq);
44 return 0; 45 return 0;
45} 46}
46EXPORT_SYMBOL(set_irq_chip); 47EXPORT_SYMBOL(irq_set_chip);
47 48
48/** 49/**
49 * set_irq_type - set the irq trigger type for an irq 50 * irq_set_type - set the irq trigger type for an irq
50 * @irq: irq number 51 * @irq: irq number
51 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 52 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
52 */ 53 */
53int set_irq_type(unsigned int irq, unsigned int type) 54int irq_set_irq_type(unsigned int irq, unsigned int type)
54{ 55{
55 struct irq_desc *desc = irq_to_desc(irq);
56 unsigned long flags; 56 unsigned long flags;
57 int ret = -ENXIO; 57 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
58 int ret = 0;
58 59
59 if (!desc) { 60 if (!desc)
60 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); 61 return -EINVAL;
61 return -ENODEV;
62 }
63 62
64 type &= IRQ_TYPE_SENSE_MASK; 63 type &= IRQ_TYPE_SENSE_MASK;
65 if (type == IRQ_TYPE_NONE) 64 if (type != IRQ_TYPE_NONE)
66 return 0; 65 ret = __irq_set_trigger(desc, irq, type);
67 66 irq_put_desc_busunlock(desc, flags);
68 raw_spin_lock_irqsave(&desc->lock, flags);
69 ret = __irq_set_trigger(desc, irq, type);
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
71 return ret; 67 return ret;
72} 68}
73EXPORT_SYMBOL(set_irq_type); 69EXPORT_SYMBOL(irq_set_irq_type);
74 70
75/** 71/**
76 * set_irq_data - set irq type data for an irq 72 * irq_set_handler_data - set irq handler data for an irq
77 * @irq: Interrupt number 73 * @irq: Interrupt number
78 * @data: Pointer to interrupt specific data 74 * @data: Pointer to interrupt specific data
79 * 75 *
80 * Set the hardware irq controller data for an irq 76 * Set the hardware irq controller data for an irq
81 */ 77 */
82int set_irq_data(unsigned int irq, void *data) 78int irq_set_handler_data(unsigned int irq, void *data)
83{ 79{
84 struct irq_desc *desc = irq_to_desc(irq);
85 unsigned long flags; 80 unsigned long flags;
81 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
86 82
87 if (!desc) { 83 if (!desc)
88 printk(KERN_ERR
89 "Trying to install controller data for IRQ%d\n", irq);
90 return -EINVAL; 84 return -EINVAL;
91 }
92
93 raw_spin_lock_irqsave(&desc->lock, flags);
94 desc->irq_data.handler_data = data; 85 desc->irq_data.handler_data = data;
95 raw_spin_unlock_irqrestore(&desc->lock, flags); 86 irq_put_desc_unlock(desc, flags);
96 return 0; 87 return 0;
97} 88}
98EXPORT_SYMBOL(set_irq_data); 89EXPORT_SYMBOL(irq_set_handler_data);
99 90
100/** 91/**
101 * set_irq_msi - set MSI descriptor data for an irq 92 * irq_set_msi_desc - set MSI descriptor data for an irq
102 * @irq: Interrupt number 93 * @irq: Interrupt number
103 * @entry: Pointer to MSI descriptor data 94 * @entry: Pointer to MSI descriptor data
104 * 95 *
105 * Set the MSI descriptor entry for an irq 96 * Set the MSI descriptor entry for an irq
106 */ 97 */
107int set_irq_msi(unsigned int irq, struct msi_desc *entry) 98int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
108{ 99{
109 struct irq_desc *desc = irq_to_desc(irq);
110 unsigned long flags; 100 unsigned long flags;
101 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
111 102
112 if (!desc) { 103 if (!desc)
113 printk(KERN_ERR
114 "Trying to install msi data for IRQ%d\n", irq);
115 return -EINVAL; 104 return -EINVAL;
116 }
117
118 raw_spin_lock_irqsave(&desc->lock, flags);
119 desc->irq_data.msi_desc = entry; 105 desc->irq_data.msi_desc = entry;
120 if (entry) 106 if (entry)
121 entry->irq = irq; 107 entry->irq = irq;
122 raw_spin_unlock_irqrestore(&desc->lock, flags); 108 irq_put_desc_unlock(desc, flags);
123 return 0; 109 return 0;
124} 110}
125 111
126/** 112/**
127 * set_irq_chip_data - set irq chip data for an irq 113 * irq_set_chip_data - set irq chip data for an irq
128 * @irq: Interrupt number 114 * @irq: Interrupt number
129 * @data: Pointer to chip specific data 115 * @data: Pointer to chip specific data
130 * 116 *
131 * Set the hardware irq chip data for an irq 117 * Set the hardware irq chip data for an irq
132 */ 118 */
133int set_irq_chip_data(unsigned int irq, void *data) 119int irq_set_chip_data(unsigned int irq, void *data)
134{ 120{
135 struct irq_desc *desc = irq_to_desc(irq);
136 unsigned long flags; 121 unsigned long flags;
122 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
137 123
138 if (!desc) { 124 if (!desc)
139 printk(KERN_ERR
140 "Trying to install chip data for IRQ%d\n", irq);
141 return -EINVAL;
142 }
143
144 if (!desc->irq_data.chip) {
145 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
146 return -EINVAL; 125 return -EINVAL;
147 }
148
149 raw_spin_lock_irqsave(&desc->lock, flags);
150 desc->irq_data.chip_data = data; 126 desc->irq_data.chip_data = data;
151 raw_spin_unlock_irqrestore(&desc->lock, flags); 127 irq_put_desc_unlock(desc, flags);
152
153 return 0; 128 return 0;
154} 129}
155EXPORT_SYMBOL(set_irq_chip_data); 130EXPORT_SYMBOL(irq_set_chip_data);
156 131
157struct irq_data *irq_get_irq_data(unsigned int irq) 132struct irq_data *irq_get_irq_data(unsigned int irq)
158{ 133{
@@ -162,221 +137,71 @@ struct irq_data *irq_get_irq_data(unsigned int irq)
162} 137}
163EXPORT_SYMBOL_GPL(irq_get_irq_data); 138EXPORT_SYMBOL_GPL(irq_get_irq_data);
164 139
165/** 140static void irq_state_clr_disabled(struct irq_desc *desc)
166 * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
167 *
168 * @irq: Interrupt number
169 * @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag
170 *
171 * The IRQ_NESTED_THREAD flag indicates that on
172 * request_threaded_irq() no separate interrupt thread should be
173 * created for the irq as the handler are called nested in the
174 * context of a demultiplexing interrupt handler thread.
175 */
176void set_irq_nested_thread(unsigned int irq, int nest)
177{
178 struct irq_desc *desc = irq_to_desc(irq);
179 unsigned long flags;
180
181 if (!desc)
182 return;
183
184 raw_spin_lock_irqsave(&desc->lock, flags);
185 if (nest)
186 desc->status |= IRQ_NESTED_THREAD;
187 else
188 desc->status &= ~IRQ_NESTED_THREAD;
189 raw_spin_unlock_irqrestore(&desc->lock, flags);
190}
191EXPORT_SYMBOL_GPL(set_irq_nested_thread);
192
193/*
194 * default enable function
195 */
196static void default_enable(struct irq_data *data)
197{ 141{
198 struct irq_desc *desc = irq_data_to_desc(data); 142 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
199
200 desc->irq_data.chip->irq_unmask(&desc->irq_data);
201 desc->status &= ~IRQ_MASKED;
202} 143}
203 144
204/* 145static void irq_state_set_disabled(struct irq_desc *desc)
205 * default disable function
206 */
207static void default_disable(struct irq_data *data)
208{
209}
210
211/*
212 * default startup function
213 */
214static unsigned int default_startup(struct irq_data *data)
215{ 146{
216 struct irq_desc *desc = irq_data_to_desc(data); 147 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
217
218 desc->irq_data.chip->irq_enable(data);
219 return 0;
220} 148}
221 149
222/* 150static void irq_state_clr_masked(struct irq_desc *desc)
223 * default shutdown function
224 */
225static void default_shutdown(struct irq_data *data)
226{ 151{
227 struct irq_desc *desc = irq_data_to_desc(data); 152 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
228
229 desc->irq_data.chip->irq_mask(&desc->irq_data);
230 desc->status |= IRQ_MASKED;
231} 153}
232 154
233#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 155static void irq_state_set_masked(struct irq_desc *desc)
234/* Temporary migration helpers */
235static void compat_irq_mask(struct irq_data *data)
236{ 156{
237 data->chip->mask(data->irq); 157 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
238} 158}
239 159
240static void compat_irq_unmask(struct irq_data *data) 160int irq_startup(struct irq_desc *desc)
241{ 161{
242 data->chip->unmask(data->irq); 162 irq_state_clr_disabled(desc);
243} 163 desc->depth = 0;
244 164
245static void compat_irq_ack(struct irq_data *data) 165 if (desc->irq_data.chip->irq_startup) {
246{ 166 int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
247 data->chip->ack(data->irq); 167 irq_state_clr_masked(desc);
248} 168 return ret;
249 169 }
250static void compat_irq_mask_ack(struct irq_data *data)
251{
252 data->chip->mask_ack(data->irq);
253}
254
255static void compat_irq_eoi(struct irq_data *data)
256{
257 data->chip->eoi(data->irq);
258}
259
260static void compat_irq_enable(struct irq_data *data)
261{
262 data->chip->enable(data->irq);
263}
264
265static void compat_irq_disable(struct irq_data *data)
266{
267 data->chip->disable(data->irq);
268}
269
270static void compat_irq_shutdown(struct irq_data *data)
271{
272 data->chip->shutdown(data->irq);
273}
274
275static unsigned int compat_irq_startup(struct irq_data *data)
276{
277 return data->chip->startup(data->irq);
278}
279
280static int compat_irq_set_affinity(struct irq_data *data,
281 const struct cpumask *dest, bool force)
282{
283 return data->chip->set_affinity(data->irq, dest);
284}
285
286static int compat_irq_set_type(struct irq_data *data, unsigned int type)
287{
288 return data->chip->set_type(data->irq, type);
289}
290
291static int compat_irq_set_wake(struct irq_data *data, unsigned int on)
292{
293 return data->chip->set_wake(data->irq, on);
294}
295 170
296static int compat_irq_retrigger(struct irq_data *data) 171 irq_enable(desc);
297{ 172 return 0;
298 return data->chip->retrigger(data->irq);
299} 173}
300 174
301static void compat_bus_lock(struct irq_data *data) 175void irq_shutdown(struct irq_desc *desc)
302{ 176{
303 data->chip->bus_lock(data->irq); 177 irq_state_set_disabled(desc);
178 desc->depth = 1;
179 if (desc->irq_data.chip->irq_shutdown)
180 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
181 if (desc->irq_data.chip->irq_disable)
182 desc->irq_data.chip->irq_disable(&desc->irq_data);
183 else
184 desc->irq_data.chip->irq_mask(&desc->irq_data);
185 irq_state_set_masked(desc);
304} 186}
305 187
306static void compat_bus_sync_unlock(struct irq_data *data) 188void irq_enable(struct irq_desc *desc)
307{ 189{
308 data->chip->bus_sync_unlock(data->irq); 190 irq_state_clr_disabled(desc);
191 if (desc->irq_data.chip->irq_enable)
192 desc->irq_data.chip->irq_enable(&desc->irq_data);
193 else
194 desc->irq_data.chip->irq_unmask(&desc->irq_data);
195 irq_state_clr_masked(desc);
309} 196}
310#endif
311 197
312/* 198void irq_disable(struct irq_desc *desc)
313 * Fixup enable/disable function pointers
314 */
315void irq_chip_set_defaults(struct irq_chip *chip)
316{ 199{
317#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 200 irq_state_set_disabled(desc);
318 /* 201 if (desc->irq_data.chip->irq_disable) {
319 * Compat fixup functions need to be before we set the 202 desc->irq_data.chip->irq_disable(&desc->irq_data);
320 * defaults for enable/disable/startup/shutdown 203 irq_state_set_masked(desc);
321 */ 204 }
322 if (chip->enable)
323 chip->irq_enable = compat_irq_enable;
324 if (chip->disable)
325 chip->irq_disable = compat_irq_disable;
326 if (chip->shutdown)
327 chip->irq_shutdown = compat_irq_shutdown;
328 if (chip->startup)
329 chip->irq_startup = compat_irq_startup;
330#endif
331 /*
332 * The real defaults
333 */
334 if (!chip->irq_enable)
335 chip->irq_enable = default_enable;
336 if (!chip->irq_disable)
337 chip->irq_disable = default_disable;
338 if (!chip->irq_startup)
339 chip->irq_startup = default_startup;
340 /*
341 * We use chip->irq_disable, when the user provided its own. When
342 * we have default_disable set for chip->irq_disable, then we need
343 * to use default_shutdown, otherwise the irq line is not
344 * disabled on free_irq():
345 */
346 if (!chip->irq_shutdown)
347 chip->irq_shutdown = chip->irq_disable != default_disable ?
348 chip->irq_disable : default_shutdown;
349
350#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
351 if (!chip->end)
352 chip->end = dummy_irq_chip.end;
353
354 /*
355 * Now fix up the remaining compat handlers
356 */
357 if (chip->bus_lock)
358 chip->irq_bus_lock = compat_bus_lock;
359 if (chip->bus_sync_unlock)
360 chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
361 if (chip->mask)
362 chip->irq_mask = compat_irq_mask;
363 if (chip->unmask)
364 chip->irq_unmask = compat_irq_unmask;
365 if (chip->ack)
366 chip->irq_ack = compat_irq_ack;
367 if (chip->mask_ack)
368 chip->irq_mask_ack = compat_irq_mask_ack;
369 if (chip->eoi)
370 chip->irq_eoi = compat_irq_eoi;
371 if (chip->set_affinity)
372 chip->irq_set_affinity = compat_irq_set_affinity;
373 if (chip->set_type)
374 chip->irq_set_type = compat_irq_set_type;
375 if (chip->set_wake)
376 chip->irq_set_wake = compat_irq_set_wake;
377 if (chip->retrigger)
378 chip->irq_retrigger = compat_irq_retrigger;
379#endif
380} 205}
381 206
382static inline void mask_ack_irq(struct irq_desc *desc) 207static inline void mask_ack_irq(struct irq_desc *desc)
@@ -388,22 +213,22 @@ static inline void mask_ack_irq(struct irq_desc *desc)
388 if (desc->irq_data.chip->irq_ack) 213 if (desc->irq_data.chip->irq_ack)
389 desc->irq_data.chip->irq_ack(&desc->irq_data); 214 desc->irq_data.chip->irq_ack(&desc->irq_data);
390 } 215 }
391 desc->status |= IRQ_MASKED; 216 irq_state_set_masked(desc);
392} 217}
393 218
394static inline void mask_irq(struct irq_desc *desc) 219void mask_irq(struct irq_desc *desc)
395{ 220{
396 if (desc->irq_data.chip->irq_mask) { 221 if (desc->irq_data.chip->irq_mask) {
397 desc->irq_data.chip->irq_mask(&desc->irq_data); 222 desc->irq_data.chip->irq_mask(&desc->irq_data);
398 desc->status |= IRQ_MASKED; 223 irq_state_set_masked(desc);
399 } 224 }
400} 225}
401 226
402static inline void unmask_irq(struct irq_desc *desc) 227void unmask_irq(struct irq_desc *desc)
403{ 228{
404 if (desc->irq_data.chip->irq_unmask) { 229 if (desc->irq_data.chip->irq_unmask) {
405 desc->irq_data.chip->irq_unmask(&desc->irq_data); 230 desc->irq_data.chip->irq_unmask(&desc->irq_data);
406 desc->status &= ~IRQ_MASKED; 231 irq_state_clr_masked(desc);
407 } 232 }
408} 233}
409 234
@@ -428,10 +253,10 @@ void handle_nested_irq(unsigned int irq)
428 kstat_incr_irqs_this_cpu(irq, desc); 253 kstat_incr_irqs_this_cpu(irq, desc);
429 254
430 action = desc->action; 255 action = desc->action;
431 if (unlikely(!action || (desc->status & IRQ_DISABLED))) 256 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
432 goto out_unlock; 257 goto out_unlock;
433 258
434 desc->status |= IRQ_INPROGRESS; 259 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
435 raw_spin_unlock_irq(&desc->lock); 260 raw_spin_unlock_irq(&desc->lock);
436 261
437 action_ret = action->thread_fn(action->irq, action->dev_id); 262 action_ret = action->thread_fn(action->irq, action->dev_id);
@@ -439,13 +264,20 @@ void handle_nested_irq(unsigned int irq)
439 note_interrupt(irq, desc, action_ret); 264 note_interrupt(irq, desc, action_ret);
440 265
441 raw_spin_lock_irq(&desc->lock); 266 raw_spin_lock_irq(&desc->lock);
442 desc->status &= ~IRQ_INPROGRESS; 267 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
443 268
444out_unlock: 269out_unlock:
445 raw_spin_unlock_irq(&desc->lock); 270 raw_spin_unlock_irq(&desc->lock);
446} 271}
447EXPORT_SYMBOL_GPL(handle_nested_irq); 272EXPORT_SYMBOL_GPL(handle_nested_irq);
448 273
274static bool irq_check_poll(struct irq_desc *desc)
275{
276 if (!(desc->istate & IRQS_POLL_INPROGRESS))
277 return false;
278 return irq_wait_for_poll(desc);
279}
280
449/** 281/**
450 * handle_simple_irq - Simple and software-decoded IRQs. 282 * handle_simple_irq - Simple and software-decoded IRQs.
451 * @irq: the interrupt number 283 * @irq: the interrupt number
@@ -461,29 +293,20 @@ EXPORT_SYMBOL_GPL(handle_nested_irq);
461void 293void
462handle_simple_irq(unsigned int irq, struct irq_desc *desc) 294handle_simple_irq(unsigned int irq, struct irq_desc *desc)
463{ 295{
464 struct irqaction *action;
465 irqreturn_t action_ret;
466
467 raw_spin_lock(&desc->lock); 296 raw_spin_lock(&desc->lock);
468 297
469 if (unlikely(desc->status & IRQ_INPROGRESS)) 298 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
470 goto out_unlock; 299 if (!irq_check_poll(desc))
471 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 300 goto out_unlock;
301
302 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
472 kstat_incr_irqs_this_cpu(irq, desc); 303 kstat_incr_irqs_this_cpu(irq, desc);
473 304
474 action = desc->action; 305 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
475 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
476 goto out_unlock; 306 goto out_unlock;
477 307
478 desc->status |= IRQ_INPROGRESS; 308 handle_irq_event(desc);
479 raw_spin_unlock(&desc->lock);
480 309
481 action_ret = handle_IRQ_event(irq, action);
482 if (!noirqdebug)
483 note_interrupt(irq, desc, action_ret);
484
485 raw_spin_lock(&desc->lock);
486 desc->status &= ~IRQ_INPROGRESS;
487out_unlock: 310out_unlock:
488 raw_spin_unlock(&desc->lock); 311 raw_spin_unlock(&desc->lock);
489} 312}
@@ -501,42 +324,42 @@ out_unlock:
501void 324void
502handle_level_irq(unsigned int irq, struct irq_desc *desc) 325handle_level_irq(unsigned int irq, struct irq_desc *desc)
503{ 326{
504 struct irqaction *action;
505 irqreturn_t action_ret;
506
507 raw_spin_lock(&desc->lock); 327 raw_spin_lock(&desc->lock);
508 mask_ack_irq(desc); 328 mask_ack_irq(desc);
509 329
510 if (unlikely(desc->status & IRQ_INPROGRESS)) 330 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
511 goto out_unlock; 331 if (!irq_check_poll(desc))
512 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 332 goto out_unlock;
333
334 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
513 kstat_incr_irqs_this_cpu(irq, desc); 335 kstat_incr_irqs_this_cpu(irq, desc);
514 336
515 /* 337 /*
516 * If its disabled or no action available 338 * If its disabled or no action available
517 * keep it masked and get out of here 339 * keep it masked and get out of here
518 */ 340 */
519 action = desc->action; 341 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
520 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
521 goto out_unlock; 342 goto out_unlock;
522 343
523 desc->status |= IRQ_INPROGRESS; 344 handle_irq_event(desc);
524 raw_spin_unlock(&desc->lock);
525
526 action_ret = handle_IRQ_event(irq, action);
527 if (!noirqdebug)
528 note_interrupt(irq, desc, action_ret);
529
530 raw_spin_lock(&desc->lock);
531 desc->status &= ~IRQ_INPROGRESS;
532 345
533 if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) 346 if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
534 unmask_irq(desc); 347 unmask_irq(desc);
535out_unlock: 348out_unlock:
536 raw_spin_unlock(&desc->lock); 349 raw_spin_unlock(&desc->lock);
537} 350}
538EXPORT_SYMBOL_GPL(handle_level_irq); 351EXPORT_SYMBOL_GPL(handle_level_irq);
539 352
353#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
354static inline void preflow_handler(struct irq_desc *desc)
355{
356 if (desc->preflow_handler)
357 desc->preflow_handler(&desc->irq_data);
358}
359#else
360static inline void preflow_handler(struct irq_desc *desc) { }
361#endif
362
540/** 363/**
541 * handle_fasteoi_irq - irq handler for transparent controllers 364 * handle_fasteoi_irq - irq handler for transparent controllers
542 * @irq: the interrupt number 365 * @irq: the interrupt number
@@ -550,42 +373,40 @@ EXPORT_SYMBOL_GPL(handle_level_irq);
550void 373void
551handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 374handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
552{ 375{
553 struct irqaction *action;
554 irqreturn_t action_ret;
555
556 raw_spin_lock(&desc->lock); 376 raw_spin_lock(&desc->lock);
557 377
558 if (unlikely(desc->status & IRQ_INPROGRESS)) 378 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
559 goto out; 379 if (!irq_check_poll(desc))
380 goto out;
560 381
561 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 382 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
562 kstat_incr_irqs_this_cpu(irq, desc); 383 kstat_incr_irqs_this_cpu(irq, desc);
563 384
564 /* 385 /*
565 * If its disabled or no action available 386 * If its disabled or no action available
566 * then mask it and get out of here: 387 * then mask it and get out of here:
567 */ 388 */
568 action = desc->action; 389 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
569 if (unlikely(!action || (desc->status & IRQ_DISABLED))) { 390 desc->istate |= IRQS_PENDING;
570 desc->status |= IRQ_PENDING;
571 mask_irq(desc); 391 mask_irq(desc);
572 goto out; 392 goto out;
573 } 393 }
574 394
575 desc->status |= IRQ_INPROGRESS; 395 if (desc->istate & IRQS_ONESHOT)
576 desc->status &= ~IRQ_PENDING; 396 mask_irq(desc);
577 raw_spin_unlock(&desc->lock);
578 397
579 action_ret = handle_IRQ_event(irq, action); 398 preflow_handler(desc);
580 if (!noirqdebug) 399 handle_irq_event(desc);
581 note_interrupt(irq, desc, action_ret);
582 400
583 raw_spin_lock(&desc->lock); 401out_eoi:
584 desc->status &= ~IRQ_INPROGRESS;
585out:
586 desc->irq_data.chip->irq_eoi(&desc->irq_data); 402 desc->irq_data.chip->irq_eoi(&desc->irq_data);
587 403out_unlock:
588 raw_spin_unlock(&desc->lock); 404 raw_spin_unlock(&desc->lock);
405 return;
406out:
407 if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
408 goto out_eoi;
409 goto out_unlock;
589} 410}
590 411
591/** 412/**
@@ -594,7 +415,7 @@ out:
594 * @desc: the interrupt description structure for this irq 415 * @desc: the interrupt description structure for this irq
595 * 416 *
596 * Interrupt occures on the falling and/or rising edge of a hardware 417 * Interrupt occures on the falling and/or rising edge of a hardware
597 * signal. The occurence is latched into the irq controller hardware 418 * signal. The occurrence is latched into the irq controller hardware
598 * and must be acked in order to be reenabled. After the ack another 419 * and must be acked in order to be reenabled. After the ack another
599 * interrupt can happen on the same source even before the first one 420 * interrupt can happen on the same source even before the first one
600 * is handled by the associated event handler. If this happens it 421 * is handled by the associated event handler. If this happens it
@@ -609,32 +430,27 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
609{ 430{
610 raw_spin_lock(&desc->lock); 431 raw_spin_lock(&desc->lock);
611 432
612 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 433 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
613
614 /* 434 /*
615 * If we're currently running this IRQ, or its disabled, 435 * If we're currently running this IRQ, or its disabled,
616 * we shouldn't process the IRQ. Mark it pending, handle 436 * we shouldn't process the IRQ. Mark it pending, handle
617 * the necessary masking and go out 437 * the necessary masking and go out
618 */ 438 */
619 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || 439 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
620 !desc->action)) { 440 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
621 desc->status |= (IRQ_PENDING | IRQ_MASKED); 441 if (!irq_check_poll(desc)) {
622 mask_ack_irq(desc); 442 desc->istate |= IRQS_PENDING;
623 goto out_unlock; 443 mask_ack_irq(desc);
444 goto out_unlock;
445 }
624 } 446 }
625 kstat_incr_irqs_this_cpu(irq, desc); 447 kstat_incr_irqs_this_cpu(irq, desc);
626 448
627 /* Start handling the irq */ 449 /* Start handling the irq */
628 desc->irq_data.chip->irq_ack(&desc->irq_data); 450 desc->irq_data.chip->irq_ack(&desc->irq_data);
629 451
630 /* Mark the IRQ currently in progress.*/
631 desc->status |= IRQ_INPROGRESS;
632
633 do { 452 do {
634 struct irqaction *action = desc->action; 453 if (unlikely(!desc->action)) {
635 irqreturn_t action_ret;
636
637 if (unlikely(!action)) {
638 mask_irq(desc); 454 mask_irq(desc);
639 goto out_unlock; 455 goto out_unlock;
640 } 456 }
@@ -644,26 +460,66 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
644 * one, we could have masked the irq. 460 * one, we could have masked the irq.
645 * Renable it, if it was not disabled in meantime. 461 * Renable it, if it was not disabled in meantime.
646 */ 462 */
647 if (unlikely((desc->status & 463 if (unlikely(desc->istate & IRQS_PENDING)) {
648 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == 464 if (!irqd_irq_disabled(&desc->irq_data) &&
649 (IRQ_PENDING | IRQ_MASKED))) { 465 irqd_irq_masked(&desc->irq_data))
650 unmask_irq(desc); 466 unmask_irq(desc);
651 } 467 }
652 468
653 desc->status &= ~IRQ_PENDING; 469 handle_irq_event(desc);
654 raw_spin_unlock(&desc->lock);
655 action_ret = handle_IRQ_event(irq, action);
656 if (!noirqdebug)
657 note_interrupt(irq, desc, action_ret);
658 raw_spin_lock(&desc->lock);
659 470
660 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); 471 } while ((desc->istate & IRQS_PENDING) &&
472 !irqd_irq_disabled(&desc->irq_data));
661 473
662 desc->status &= ~IRQ_INPROGRESS;
663out_unlock: 474out_unlock:
664 raw_spin_unlock(&desc->lock); 475 raw_spin_unlock(&desc->lock);
665} 476}
666 477
478#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
479/**
480 * handle_edge_eoi_irq - edge eoi type IRQ handler
481 * @irq: the interrupt number
482 * @desc: the interrupt description structure for this irq
483 *
484 * Similar as the above handle_edge_irq, but using eoi and w/o the
485 * mask/unmask logic.
486 */
487void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
488{
489 struct irq_chip *chip = irq_desc_get_chip(desc);
490
491 raw_spin_lock(&desc->lock);
492
493 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
494 /*
495 * If we're currently running this IRQ, or its disabled,
496 * we shouldn't process the IRQ. Mark it pending, handle
497 * the necessary masking and go out
498 */
499 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
500 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
501 if (!irq_check_poll(desc)) {
502 desc->istate |= IRQS_PENDING;
503 goto out_eoi;
504 }
505 }
506 kstat_incr_irqs_this_cpu(irq, desc);
507
508 do {
509 if (unlikely(!desc->action))
510 goto out_eoi;
511
512 handle_irq_event(desc);
513
514 } while ((desc->istate & IRQS_PENDING) &&
515 !irqd_irq_disabled(&desc->irq_data));
516
517out_eoi:
518 chip->irq_eoi(&desc->irq_data);
519 raw_spin_unlock(&desc->lock);
520}
521#endif
522
667/** 523/**
668 * handle_percpu_irq - Per CPU local irq handler 524 * handle_percpu_irq - Per CPU local irq handler
669 * @irq: the interrupt number 525 * @irq: the interrupt number
@@ -674,103 +530,145 @@ out_unlock:
674void 530void
675handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 531handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
676{ 532{
677 irqreturn_t action_ret; 533 struct irq_chip *chip = irq_desc_get_chip(desc);
678 534
679 kstat_incr_irqs_this_cpu(irq, desc); 535 kstat_incr_irqs_this_cpu(irq, desc);
680 536
681 if (desc->irq_data.chip->irq_ack) 537 if (chip->irq_ack)
682 desc->irq_data.chip->irq_ack(&desc->irq_data); 538 chip->irq_ack(&desc->irq_data);
683 539
684 action_ret = handle_IRQ_event(irq, desc->action); 540 handle_irq_event_percpu(desc, desc->action);
685 if (!noirqdebug)
686 note_interrupt(irq, desc, action_ret);
687 541
688 if (desc->irq_data.chip->irq_eoi) 542 if (chip->irq_eoi)
689 desc->irq_data.chip->irq_eoi(&desc->irq_data); 543 chip->irq_eoi(&desc->irq_data);
690} 544}
691 545
692void 546void
693__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 547__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
694 const char *name) 548 const char *name)
695{ 549{
696 struct irq_desc *desc = irq_to_desc(irq);
697 unsigned long flags; 550 unsigned long flags;
551 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
698 552
699 if (!desc) { 553 if (!desc)
700 printk(KERN_ERR
701 "Trying to install type control for IRQ%d\n", irq);
702 return; 554 return;
703 }
704 555
705 if (!handle) 556 if (!handle) {
706 handle = handle_bad_irq; 557 handle = handle_bad_irq;
707 else if (desc->irq_data.chip == &no_irq_chip) { 558 } else {
708 printk(KERN_WARNING "Trying to install %sinterrupt handler " 559 if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
709 "for IRQ%d\n", is_chained ? "chained " : "", irq); 560 goto out;
710 /*
711 * Some ARM implementations install a handler for really dumb
712 * interrupt hardware without setting an irq_chip. This worked
713 * with the ARM no_irq_chip but the check in setup_irq would
714 * prevent us to setup the interrupt at all. Switch it to
715 * dummy_irq_chip for easy transition.
716 */
717 desc->irq_data.chip = &dummy_irq_chip;
718 } 561 }
719 562
720 chip_bus_lock(desc);
721 raw_spin_lock_irqsave(&desc->lock, flags);
722
723 /* Uninstall? */ 563 /* Uninstall? */
724 if (handle == handle_bad_irq) { 564 if (handle == handle_bad_irq) {
725 if (desc->irq_data.chip != &no_irq_chip) 565 if (desc->irq_data.chip != &no_irq_chip)
726 mask_ack_irq(desc); 566 mask_ack_irq(desc);
727 desc->status |= IRQ_DISABLED; 567 irq_state_set_disabled(desc);
728 desc->depth = 1; 568 desc->depth = 1;
729 } 569 }
730 desc->handle_irq = handle; 570 desc->handle_irq = handle;
731 desc->name = name; 571 desc->name = name;
732 572
733 if (handle != handle_bad_irq && is_chained) { 573 if (handle != handle_bad_irq && is_chained) {
734 desc->status &= ~IRQ_DISABLED; 574 irq_settings_set_noprobe(desc);
735 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; 575 irq_settings_set_norequest(desc);
736 desc->depth = 0; 576 irq_startup(desc);
737 desc->irq_data.chip->irq_startup(&desc->irq_data);
738 } 577 }
739 raw_spin_unlock_irqrestore(&desc->lock, flags); 578out:
740 chip_bus_sync_unlock(desc); 579 irq_put_desc_busunlock(desc, flags);
741}
742EXPORT_SYMBOL_GPL(__set_irq_handler);
743
744void
745set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
746 irq_flow_handler_t handle)
747{
748 set_irq_chip(irq, chip);
749 __set_irq_handler(irq, handle, 0, NULL);
750} 580}
581EXPORT_SYMBOL_GPL(__irq_set_handler);
751 582
752void 583void
753set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 584irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
754 irq_flow_handler_t handle, const char *name) 585 irq_flow_handler_t handle, const char *name)
755{ 586{
756 set_irq_chip(irq, chip); 587 irq_set_chip(irq, chip);
757 __set_irq_handler(irq, handle, 0, name); 588 __irq_set_handler(irq, handle, 0, name);
758} 589}
759 590
760void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 591void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
761{ 592{
762 struct irq_desc *desc = irq_to_desc(irq);
763 unsigned long flags; 593 unsigned long flags;
594 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
764 595
765 if (!desc) 596 if (!desc)
766 return; 597 return;
598 irq_settings_clr_and_set(desc, clr, set);
599
600 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
601 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
602 if (irq_settings_has_no_balance_set(desc))
603 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
604 if (irq_settings_is_per_cpu(desc))
605 irqd_set(&desc->irq_data, IRQD_PER_CPU);
606 if (irq_settings_can_move_pcntxt(desc))
607 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
608 if (irq_settings_is_level(desc))
609 irqd_set(&desc->irq_data, IRQD_LEVEL);
610
611 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
612
613 irq_put_desc_unlock(desc, flags);
614}
615
616/**
617 * irq_cpu_online - Invoke all irq_cpu_online functions.
618 *
619 * Iterate through all irqs and invoke the chip.irq_cpu_online()
620 * for each.
621 */
622void irq_cpu_online(void)
623{
624 struct irq_desc *desc;
625 struct irq_chip *chip;
626 unsigned long flags;
627 unsigned int irq;
628
629 for_each_active_irq(irq) {
630 desc = irq_to_desc(irq);
631 if (!desc)
632 continue;
767 633
768 /* Sanitize flags */ 634 raw_spin_lock_irqsave(&desc->lock, flags);
769 set &= IRQF_MODIFY_MASK;
770 clr &= IRQF_MODIFY_MASK;
771 635
772 raw_spin_lock_irqsave(&desc->lock, flags); 636 chip = irq_data_get_irq_chip(&desc->irq_data);
773 desc->status &= ~clr; 637 if (chip && chip->irq_cpu_online &&
774 desc->status |= set; 638 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
775 raw_spin_unlock_irqrestore(&desc->lock, flags); 639 !irqd_irq_disabled(&desc->irq_data)))
640 chip->irq_cpu_online(&desc->irq_data);
641
642 raw_spin_unlock_irqrestore(&desc->lock, flags);
643 }
644}
645
646/**
647 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
648 *
649 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
650 * for each.
651 */
652void irq_cpu_offline(void)
653{
654 struct irq_desc *desc;
655 struct irq_chip *chip;
656 unsigned long flags;
657 unsigned int irq;
658
659 for_each_active_irq(irq) {
660 desc = irq_to_desc(irq);
661 if (!desc)
662 continue;
663
664 raw_spin_lock_irqsave(&desc->lock, flags);
665
666 chip = irq_data_get_irq_chip(&desc->irq_data);
667 if (chip && chip->irq_cpu_offline &&
668 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
669 !irqd_irq_disabled(&desc->irq_data)))
670 chip->irq_cpu_offline(&desc->irq_data);
671
672 raw_spin_unlock_irqrestore(&desc->lock, flags);
673 }
776} 674}