diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 68 |
1 files changed, 43 insertions, 25 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 3cfc0fefb5ee..f8914b92b664 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -260,9 +260,7 @@ int set_irq_wake(unsigned int irq, unsigned int on) | |||
260 | } | 260 | } |
261 | } else { | 261 | } else { |
262 | if (desc->wake_depth == 0) { | 262 | if (desc->wake_depth == 0) { |
263 | printk(KERN_WARNING "Unbalanced IRQ %d " | 263 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
264 | "wake disable\n", irq); | ||
265 | WARN_ON(1); | ||
266 | } else if (--desc->wake_depth == 0) { | 264 | } else if (--desc->wake_depth == 0) { |
267 | ret = set_irq_wake_real(irq, on); | 265 | ret = set_irq_wake_real(irq, on); |
268 | if (ret) | 266 | if (ret) |
@@ -308,6 +306,30 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc) | |||
308 | desc->handle_irq = NULL; | 306 | desc->handle_irq = NULL; |
309 | } | 307 | } |
310 | 308 | ||
309 | static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, | ||
310 | unsigned long flags) | ||
311 | { | ||
312 | int ret; | ||
313 | |||
314 | if (!chip || !chip->set_type) { | ||
315 | /* | ||
316 | * IRQF_TRIGGER_* but the PIC does not support multiple | ||
317 | * flow-types? | ||
318 | */ | ||
319 | pr_warning("No set_type function for IRQ %d (%s)\n", irq, | ||
320 | chip ? (chip->name ? : "unknown") : "unknown"); | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); | ||
325 | |||
326 | if (ret) | ||
327 | pr_err("setting flow type for irq %u failed (%pF)\n", | ||
328 | irq, chip->set_type); | ||
329 | |||
330 | return ret; | ||
331 | } | ||
332 | |||
311 | /* | 333 | /* |
312 | * Internal function to register an irqaction - typically used to | 334 | * Internal function to register an irqaction - typically used to |
313 | * allocate special interrupts that are part of the architecture. | 335 | * allocate special interrupts that are part of the architecture. |
@@ -319,6 +341,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
319 | const char *old_name = NULL; | 341 | const char *old_name = NULL; |
320 | unsigned long flags; | 342 | unsigned long flags; |
321 | int shared = 0; | 343 | int shared = 0; |
344 | int ret; | ||
322 | 345 | ||
323 | if (irq >= NR_IRQS) | 346 | if (irq >= NR_IRQS) |
324 | return -EINVAL; | 347 | return -EINVAL; |
@@ -376,35 +399,23 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
376 | shared = 1; | 399 | shared = 1; |
377 | } | 400 | } |
378 | 401 | ||
379 | *p = new; | ||
380 | |||
381 | /* Exclude IRQ from balancing */ | ||
382 | if (new->flags & IRQF_NOBALANCING) | ||
383 | desc->status |= IRQ_NO_BALANCING; | ||
384 | |||
385 | if (!shared) { | 402 | if (!shared) { |
386 | irq_chip_set_defaults(desc->chip); | 403 | irq_chip_set_defaults(desc->chip); |
387 | 404 | ||
388 | #if defined(CONFIG_IRQ_PER_CPU) | ||
389 | if (new->flags & IRQF_PERCPU) | ||
390 | desc->status |= IRQ_PER_CPU; | ||
391 | #endif | ||
392 | |||
393 | /* Setup the type (level, edge polarity) if configured: */ | 405 | /* Setup the type (level, edge polarity) if configured: */ |
394 | if (new->flags & IRQF_TRIGGER_MASK) { | 406 | if (new->flags & IRQF_TRIGGER_MASK) { |
395 | if (desc->chip->set_type) | 407 | ret = __irq_set_trigger(desc->chip, irq, new->flags); |
396 | desc->chip->set_type(irq, | 408 | |
397 | new->flags & IRQF_TRIGGER_MASK); | 409 | if (ret) { |
398 | else | 410 | spin_unlock_irqrestore(&desc->lock, flags); |
399 | /* | 411 | return ret; |
400 | * IRQF_TRIGGER_* but the PIC does not support | 412 | } |
401 | * multiple flow-types? | ||
402 | */ | ||
403 | printk(KERN_WARNING "No IRQF_TRIGGER set_type " | ||
404 | "function for IRQ %d (%s)\n", irq, | ||
405 | desc->chip->name); | ||
406 | } else | 413 | } else |
407 | compat_irq_chip_set_default_handler(desc); | 414 | compat_irq_chip_set_default_handler(desc); |
415 | #if defined(CONFIG_IRQ_PER_CPU) | ||
416 | if (new->flags & IRQF_PERCPU) | ||
417 | desc->status |= IRQ_PER_CPU; | ||
418 | #endif | ||
408 | 419 | ||
409 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | | 420 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | |
410 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 421 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); |
@@ -423,6 +434,13 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
423 | /* Set default affinity mask once everything is setup */ | 434 | /* Set default affinity mask once everything is setup */ |
424 | irq_select_affinity(irq); | 435 | irq_select_affinity(irq); |
425 | } | 436 | } |
437 | |||
438 | *p = new; | ||
439 | |||
440 | /* Exclude IRQ from balancing */ | ||
441 | if (new->flags & IRQF_NOBALANCING) | ||
442 | desc->status |= IRQ_NO_BALANCING; | ||
443 | |||
426 | /* Reset broken irq detection when installing new handler */ | 444 | /* Reset broken irq detection when installing new handler */ |
427 | desc->irq_count = 0; | 445 | desc->irq_count = 0; |
428 | desc->irqs_unhandled = 0; | 446 | desc->irqs_unhandled = 0; |