diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 156 |
1 files changed, 104 insertions, 52 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 1279e3499534..9eb1d518ee1c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * linux/kernel/irq/manage.c | 2 | * linux/kernel/irq/manage.c |
3 | * | 3 | * |
4 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar | 4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006 Thomas Gleixner | ||
5 | * | 6 | * |
6 | * This file contains driver APIs to the irq subsystem. | 7 | * This file contains driver APIs to the irq subsystem. |
7 | */ | 8 | */ |
@@ -16,12 +17,6 @@ | |||
16 | 17 | ||
17 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
18 | 19 | ||
19 | cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL }; | ||
20 | |||
21 | #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) | ||
22 | cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS]; | ||
23 | #endif | ||
24 | |||
25 | /** | 20 | /** |
26 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
27 | * @irq: interrupt number to wait for | 22 | * @irq: interrupt number to wait for |
@@ -42,7 +37,6 @@ void synchronize_irq(unsigned int irq) | |||
42 | while (desc->status & IRQ_INPROGRESS) | 37 | while (desc->status & IRQ_INPROGRESS) |
43 | cpu_relax(); | 38 | cpu_relax(); |
44 | } | 39 | } |
45 | |||
46 | EXPORT_SYMBOL(synchronize_irq); | 40 | EXPORT_SYMBOL(synchronize_irq); |
47 | 41 | ||
48 | #endif | 42 | #endif |
@@ -60,7 +54,7 @@ EXPORT_SYMBOL(synchronize_irq); | |||
60 | */ | 54 | */ |
61 | void disable_irq_nosync(unsigned int irq) | 55 | void disable_irq_nosync(unsigned int irq) |
62 | { | 56 | { |
63 | irq_desc_t *desc = irq_desc + irq; | 57 | struct irq_desc *desc = irq_desc + irq; |
64 | unsigned long flags; | 58 | unsigned long flags; |
65 | 59 | ||
66 | if (irq >= NR_IRQS) | 60 | if (irq >= NR_IRQS) |
@@ -69,11 +63,10 @@ void disable_irq_nosync(unsigned int irq) | |||
69 | spin_lock_irqsave(&desc->lock, flags); | 63 | spin_lock_irqsave(&desc->lock, flags); |
70 | if (!desc->depth++) { | 64 | if (!desc->depth++) { |
71 | desc->status |= IRQ_DISABLED; | 65 | desc->status |= IRQ_DISABLED; |
72 | desc->handler->disable(irq); | 66 | desc->chip->disable(irq); |
73 | } | 67 | } |
74 | spin_unlock_irqrestore(&desc->lock, flags); | 68 | spin_unlock_irqrestore(&desc->lock, flags); |
75 | } | 69 | } |
76 | |||
77 | EXPORT_SYMBOL(disable_irq_nosync); | 70 | EXPORT_SYMBOL(disable_irq_nosync); |
78 | 71 | ||
79 | /** | 72 | /** |
@@ -90,7 +83,7 @@ EXPORT_SYMBOL(disable_irq_nosync); | |||
90 | */ | 83 | */ |
91 | void disable_irq(unsigned int irq) | 84 | void disable_irq(unsigned int irq) |
92 | { | 85 | { |
93 | irq_desc_t *desc = irq_desc + irq; | 86 | struct irq_desc *desc = irq_desc + irq; |
94 | 87 | ||
95 | if (irq >= NR_IRQS) | 88 | if (irq >= NR_IRQS) |
96 | return; | 89 | return; |
@@ -99,7 +92,6 @@ void disable_irq(unsigned int irq) | |||
99 | if (desc->action) | 92 | if (desc->action) |
100 | synchronize_irq(irq); | 93 | synchronize_irq(irq); |
101 | } | 94 | } |
102 | |||
103 | EXPORT_SYMBOL(disable_irq); | 95 | EXPORT_SYMBOL(disable_irq); |
104 | 96 | ||
105 | /** | 97 | /** |
@@ -114,7 +106,7 @@ EXPORT_SYMBOL(disable_irq); | |||
114 | */ | 106 | */ |
115 | void enable_irq(unsigned int irq) | 107 | void enable_irq(unsigned int irq) |
116 | { | 108 | { |
117 | irq_desc_t *desc = irq_desc + irq; | 109 | struct irq_desc *desc = irq_desc + irq; |
118 | unsigned long flags; | 110 | unsigned long flags; |
119 | 111 | ||
120 | if (irq >= NR_IRQS) | 112 | if (irq >= NR_IRQS) |
@@ -123,17 +115,15 @@ void enable_irq(unsigned int irq) | |||
123 | spin_lock_irqsave(&desc->lock, flags); | 115 | spin_lock_irqsave(&desc->lock, flags); |
124 | switch (desc->depth) { | 116 | switch (desc->depth) { |
125 | case 0: | 117 | case 0: |
118 | printk(KERN_WARNING "Unablanced enable_irq(%d)\n", irq); | ||
126 | WARN_ON(1); | 119 | WARN_ON(1); |
127 | break; | 120 | break; |
128 | case 1: { | 121 | case 1: { |
129 | unsigned int status = desc->status & ~IRQ_DISABLED; | 122 | unsigned int status = desc->status & ~IRQ_DISABLED; |
130 | 123 | ||
131 | desc->status = status; | 124 | /* Prevent probing on this irq: */ |
132 | if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { | 125 | desc->status = status | IRQ_NOPROBE; |
133 | desc->status = status | IRQ_REPLAY; | 126 | check_irq_resend(desc, irq); |
134 | hw_resend_irq(desc->handler,irq); | ||
135 | } | ||
136 | desc->handler->enable(irq); | ||
137 | /* fall-through */ | 127 | /* fall-through */ |
138 | } | 128 | } |
139 | default: | 129 | default: |
@@ -141,9 +131,29 @@ void enable_irq(unsigned int irq) | |||
141 | } | 131 | } |
142 | spin_unlock_irqrestore(&desc->lock, flags); | 132 | spin_unlock_irqrestore(&desc->lock, flags); |
143 | } | 133 | } |
144 | |||
145 | EXPORT_SYMBOL(enable_irq); | 134 | EXPORT_SYMBOL(enable_irq); |
146 | 135 | ||
136 | /** | ||
137 | * set_irq_wake - control irq power management wakeup | ||
138 | * @irq: interrupt to control | ||
139 | * @on: enable/disable power management wakeup | ||
140 | * | ||
141 | * Enable/disable power management wakeup mode | ||
142 | */ | ||
143 | int set_irq_wake(unsigned int irq, unsigned int on) | ||
144 | { | ||
145 | struct irq_desc *desc = irq_desc + irq; | ||
146 | unsigned long flags; | ||
147 | int ret = -ENXIO; | ||
148 | |||
149 | spin_lock_irqsave(&desc->lock, flags); | ||
150 | if (desc->chip->set_wake) | ||
151 | ret = desc->chip->set_wake(irq, on); | ||
152 | spin_unlock_irqrestore(&desc->lock, flags); | ||
153 | return ret; | ||
154 | } | ||
155 | EXPORT_SYMBOL(set_irq_wake); | ||
156 | |||
147 | /* | 157 | /* |
148 | * Internal function that tells the architecture code whether a | 158 | * Internal function that tells the architecture code whether a |
149 | * particular irq has been exclusively allocated or is available | 159 | * particular irq has been exclusively allocated or is available |
@@ -153,7 +163,7 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) | |||
153 | { | 163 | { |
154 | struct irqaction *action; | 164 | struct irqaction *action; |
155 | 165 | ||
156 | if (irq >= NR_IRQS) | 166 | if (irq >= NR_IRQS || irq_desc[irq].status & IRQ_NOREQUEST) |
157 | return 0; | 167 | return 0; |
158 | 168 | ||
159 | action = irq_desc[irq].action; | 169 | action = irq_desc[irq].action; |
@@ -164,11 +174,22 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) | |||
164 | return !action; | 174 | return !action; |
165 | } | 175 | } |
166 | 176 | ||
177 | void compat_irq_chip_set_default_handler(struct irq_desc *desc) | ||
178 | { | ||
179 | /* | ||
180 | * If the architecture still has not overriden | ||
181 | * the flow handler then zap the default. This | ||
182 | * should catch incorrect flow-type setting. | ||
183 | */ | ||
184 | if (desc->handle_irq == &handle_bad_irq) | ||
185 | desc->handle_irq = NULL; | ||
186 | } | ||
187 | |||
167 | /* | 188 | /* |
168 | * Internal function to register an irqaction - typically used to | 189 | * Internal function to register an irqaction - typically used to |
169 | * allocate special interrupts that are part of the architecture. | 190 | * allocate special interrupts that are part of the architecture. |
170 | */ | 191 | */ |
171 | int setup_irq(unsigned int irq, struct irqaction * new) | 192 | int setup_irq(unsigned int irq, struct irqaction *new) |
172 | { | 193 | { |
173 | struct irq_desc *desc = irq_desc + irq; | 194 | struct irq_desc *desc = irq_desc + irq; |
174 | struct irqaction *old, **p; | 195 | struct irqaction *old, **p; |
@@ -178,7 +199,7 @@ int setup_irq(unsigned int irq, struct irqaction * new) | |||
178 | if (irq >= NR_IRQS) | 199 | if (irq >= NR_IRQS) |
179 | return -EINVAL; | 200 | return -EINVAL; |
180 | 201 | ||
181 | if (desc->handler == &no_irq_type) | 202 | if (desc->chip == &no_irq_chip) |
182 | return -ENOSYS; | 203 | return -ENOSYS; |
183 | /* | 204 | /* |
184 | * Some drivers like serial.c use request_irq() heavily, | 205 | * Some drivers like serial.c use request_irq() heavily, |
@@ -200,14 +221,21 @@ int setup_irq(unsigned int irq, struct irqaction * new) | |||
200 | /* | 221 | /* |
201 | * The following block of code has to be executed atomically | 222 | * The following block of code has to be executed atomically |
202 | */ | 223 | */ |
203 | spin_lock_irqsave(&desc->lock,flags); | 224 | spin_lock_irqsave(&desc->lock, flags); |
204 | p = &desc->action; | 225 | p = &desc->action; |
205 | if ((old = *p) != NULL) { | 226 | old = *p; |
206 | /* Can't share interrupts unless both agree to */ | 227 | if (old) { |
207 | if (!(old->flags & new->flags & SA_SHIRQ)) | 228 | /* |
229 | * Can't share interrupts unless both agree to and are | ||
230 | * the same type (level, edge, polarity). So both flag | ||
231 | * fields must have SA_SHIRQ set and the bits which | ||
232 | * set the trigger type must match. | ||
233 | */ | ||
234 | if (!((old->flags & new->flags) & SA_SHIRQ) || | ||
235 | ((old->flags ^ new->flags) & SA_TRIGGER_MASK)) | ||
208 | goto mismatch; | 236 | goto mismatch; |
209 | 237 | ||
210 | #if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) | 238 | #if defined(CONFIG_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) |
211 | /* All handlers must agree on per-cpuness */ | 239 | /* All handlers must agree on per-cpuness */ |
212 | if ((old->flags & IRQ_PER_CPU) != (new->flags & IRQ_PER_CPU)) | 240 | if ((old->flags & IRQ_PER_CPU) != (new->flags & IRQ_PER_CPU)) |
213 | goto mismatch; | 241 | goto mismatch; |
@@ -222,20 +250,44 @@ int setup_irq(unsigned int irq, struct irqaction * new) | |||
222 | } | 250 | } |
223 | 251 | ||
224 | *p = new; | 252 | *p = new; |
225 | #if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) | 253 | #if defined(CONFIG_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) |
226 | if (new->flags & SA_PERCPU_IRQ) | 254 | if (new->flags & SA_PERCPU_IRQ) |
227 | desc->status |= IRQ_PER_CPU; | 255 | desc->status |= IRQ_PER_CPU; |
228 | #endif | 256 | #endif |
229 | if (!shared) { | 257 | if (!shared) { |
230 | desc->depth = 0; | 258 | irq_chip_set_defaults(desc->chip); |
231 | desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | | 259 | |
232 | IRQ_WAITING | IRQ_INPROGRESS); | 260 | /* Setup the type (level, edge polarity) if configured: */ |
233 | if (desc->handler->startup) | 261 | if (new->flags & SA_TRIGGER_MASK) { |
234 | desc->handler->startup(irq); | 262 | if (desc->chip && desc->chip->set_type) |
235 | else | 263 | desc->chip->set_type(irq, |
236 | desc->handler->enable(irq); | 264 | new->flags & SA_TRIGGER_MASK); |
265 | else | ||
266 | /* | ||
267 | * SA_TRIGGER_* but the PIC does not support | ||
268 | * multiple flow-types? | ||
269 | */ | ||
270 | printk(KERN_WARNING "setup_irq(%d) SA_TRIGGER" | ||
271 | "set. No set_type function available\n", | ||
272 | irq); | ||
273 | } else | ||
274 | compat_irq_chip_set_default_handler(desc); | ||
275 | |||
276 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | | ||
277 | IRQ_INPROGRESS); | ||
278 | |||
279 | if (!(desc->status & IRQ_NOAUTOEN)) { | ||
280 | desc->depth = 0; | ||
281 | desc->status &= ~IRQ_DISABLED; | ||
282 | if (desc->chip->startup) | ||
283 | desc->chip->startup(irq); | ||
284 | else | ||
285 | desc->chip->enable(irq); | ||
286 | } else | ||
287 | /* Undo nested disables: */ | ||
288 | desc->depth = 1; | ||
237 | } | 289 | } |
238 | spin_unlock_irqrestore(&desc->lock,flags); | 290 | spin_unlock_irqrestore(&desc->lock, flags); |
239 | 291 | ||
240 | new->irq = irq; | 292 | new->irq = irq; |
241 | register_irq_proc(irq); | 293 | register_irq_proc(irq); |
@@ -278,10 +330,10 @@ void free_irq(unsigned int irq, void *dev_id) | |||
278 | return; | 330 | return; |
279 | 331 | ||
280 | desc = irq_desc + irq; | 332 | desc = irq_desc + irq; |
281 | spin_lock_irqsave(&desc->lock,flags); | 333 | spin_lock_irqsave(&desc->lock, flags); |
282 | p = &desc->action; | 334 | p = &desc->action; |
283 | for (;;) { | 335 | for (;;) { |
284 | struct irqaction * action = *p; | 336 | struct irqaction *action = *p; |
285 | 337 | ||
286 | if (action) { | 338 | if (action) { |
287 | struct irqaction **pp = p; | 339 | struct irqaction **pp = p; |
@@ -295,18 +347,18 @@ void free_irq(unsigned int irq, void *dev_id) | |||
295 | 347 | ||
296 | /* Currently used only by UML, might disappear one day.*/ | 348 | /* Currently used only by UML, might disappear one day.*/ |
297 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 349 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
298 | if (desc->handler->release) | 350 | if (desc->chip->release) |
299 | desc->handler->release(irq, dev_id); | 351 | desc->chip->release(irq, dev_id); |
300 | #endif | 352 | #endif |
301 | 353 | ||
302 | if (!desc->action) { | 354 | if (!desc->action) { |
303 | desc->status |= IRQ_DISABLED; | 355 | desc->status |= IRQ_DISABLED; |
304 | if (desc->handler->shutdown) | 356 | if (desc->chip->shutdown) |
305 | desc->handler->shutdown(irq); | 357 | desc->chip->shutdown(irq); |
306 | else | 358 | else |
307 | desc->handler->disable(irq); | 359 | desc->chip->disable(irq); |
308 | } | 360 | } |
309 | spin_unlock_irqrestore(&desc->lock,flags); | 361 | spin_unlock_irqrestore(&desc->lock, flags); |
310 | unregister_handler_proc(irq, action); | 362 | unregister_handler_proc(irq, action); |
311 | 363 | ||
312 | /* Make sure it's not being used on another CPU */ | 364 | /* Make sure it's not being used on another CPU */ |
@@ -314,12 +366,11 @@ void free_irq(unsigned int irq, void *dev_id) | |||
314 | kfree(action); | 366 | kfree(action); |
315 | return; | 367 | return; |
316 | } | 368 | } |
317 | printk(KERN_ERR "Trying to free free IRQ%d\n",irq); | 369 | printk(KERN_ERR "Trying to free free IRQ%d\n", irq); |
318 | spin_unlock_irqrestore(&desc->lock,flags); | 370 | spin_unlock_irqrestore(&desc->lock, flags); |
319 | return; | 371 | return; |
320 | } | 372 | } |
321 | } | 373 | } |
322 | |||
323 | EXPORT_SYMBOL(free_irq); | 374 | EXPORT_SYMBOL(free_irq); |
324 | 375 | ||
325 | /** | 376 | /** |
@@ -353,9 +404,9 @@ EXPORT_SYMBOL(free_irq); | |||
353 | */ | 404 | */ |
354 | int request_irq(unsigned int irq, | 405 | int request_irq(unsigned int irq, |
355 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | 406 | irqreturn_t (*handler)(int, void *, struct pt_regs *), |
356 | unsigned long irqflags, const char * devname, void *dev_id) | 407 | unsigned long irqflags, const char *devname, void *dev_id) |
357 | { | 408 | { |
358 | struct irqaction * action; | 409 | struct irqaction *action; |
359 | int retval; | 410 | int retval; |
360 | 411 | ||
361 | /* | 412 | /* |
@@ -368,6 +419,8 @@ int request_irq(unsigned int irq, | |||
368 | return -EINVAL; | 419 | return -EINVAL; |
369 | if (irq >= NR_IRQS) | 420 | if (irq >= NR_IRQS) |
370 | return -EINVAL; | 421 | return -EINVAL; |
422 | if (irq_desc[irq].status & IRQ_NOREQUEST) | ||
423 | return -EINVAL; | ||
371 | if (!handler) | 424 | if (!handler) |
372 | return -EINVAL; | 425 | return -EINVAL; |
373 | 426 | ||
@@ -390,6 +443,5 @@ int request_irq(unsigned int irq, | |||
390 | 443 | ||
391 | return retval; | 444 | return retval; |
392 | } | 445 | } |
393 | |||
394 | EXPORT_SYMBOL(request_irq); | 446 | EXPORT_SYMBOL(request_irq); |
395 | 447 | ||