diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/handle.c | 31 | ||||
-rw-r--r-- | kernel/irq/manage.c | 192 |
2 files changed, 209 insertions, 14 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 9ebf77968871..fe8f45374e86 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -357,8 +357,37 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
357 | 357 | ||
358 | do { | 358 | do { |
359 | ret = action->handler(irq, action->dev_id); | 359 | ret = action->handler(irq, action->dev_id); |
360 | if (ret == IRQ_HANDLED) | 360 | |
361 | switch (ret) { | ||
362 | case IRQ_WAKE_THREAD: | ||
363 | /* | ||
364 | * Wake up the handler thread for this | ||
365 | * action. In case the thread crashed and was | ||
366 | * killed we just pretend that we handled the | ||
367 | * interrupt. The hardirq handler above has | ||
368 | * disabled the device interrupt, so no irq | ||
369 | * storm is lurking. | ||
370 | */ | ||
371 | if (likely(!test_bit(IRQTF_DIED, | ||
372 | &action->thread_flags))) { | ||
373 | set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
374 | wake_up_process(action->thread); | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Set it to handled so the spurious check | ||
379 | * does not trigger. | ||
380 | */ | ||
381 | ret = IRQ_HANDLED; | ||
382 | /* Fall through to add to randomness */ | ||
383 | case IRQ_HANDLED: | ||
361 | status |= action->flags; | 384 | status |= action->flags; |
385 | break; | ||
386 | |||
387 | default: | ||
388 | break; | ||
389 | } | ||
390 | |||
362 | retval |= ret; | 391 | retval |= ret; |
363 | action = action->next; | 392 | action = action->next; |
364 | } while (action); | 393 | } while (action); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6458e99984c0..a4c1ab86cd25 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -8,16 +8,15 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
11 | #include <linux/kthread.h> | ||
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/random.h> | 13 | #include <linux/random.h> |
13 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/sched.h> | ||
15 | 17 | ||
16 | #include "internals.h" | 18 | #include "internals.h" |
17 | 19 | ||
18 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
19 | cpumask_var_t irq_default_affinity; | ||
20 | |||
21 | /** | 20 | /** |
22 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
23 | * @irq: interrupt number to wait for | 22 | * @irq: interrupt number to wait for |
@@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq) | |||
53 | 52 | ||
54 | /* Oops, that failed? */ | 53 | /* Oops, that failed? */ |
55 | } while (status & IRQ_INPROGRESS); | 54 | } while (status & IRQ_INPROGRESS); |
55 | |||
56 | /* | ||
57 | * We made sure that no hardirq handler is running. Now verify | ||
58 | * that no threaded handlers are active. | ||
59 | */ | ||
60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | ||
56 | } | 61 | } |
57 | EXPORT_SYMBOL(synchronize_irq); | 62 | EXPORT_SYMBOL(synchronize_irq); |
58 | 63 | ||
64 | #ifdef CONFIG_SMP | ||
65 | cpumask_var_t irq_default_affinity; | ||
66 | |||
59 | /** | 67 | /** |
60 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set |
61 | * @irq: Interrupt to check | 69 | * @irq: Interrupt to check |
@@ -72,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq) | |||
72 | return 1; | 80 | return 1; |
73 | } | 81 | } |
74 | 82 | ||
83 | static void | ||
84 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) | ||
85 | { | ||
86 | struct irqaction *action = desc->action; | ||
87 | |||
88 | while (action) { | ||
89 | if (action->thread) | ||
90 | set_cpus_allowed_ptr(action->thread, cpumask); | ||
91 | action = action->next; | ||
92 | } | ||
93 | } | ||
94 | |||
75 | /** | 95 | /** |
76 | * irq_set_affinity - Set the irq affinity of a given irq | 96 | * irq_set_affinity - Set the irq affinity of a given irq |
77 | * @irq: Interrupt to set affinity | 97 | * @irq: Interrupt to set affinity |
@@ -100,6 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
100 | cpumask_copy(desc->affinity, cpumask); | 120 | cpumask_copy(desc->affinity, cpumask); |
101 | desc->chip->set_affinity(irq, cpumask); | 121 | desc->chip->set_affinity(irq, cpumask); |
102 | #endif | 122 | #endif |
123 | irq_set_thread_affinity(desc, cpumask); | ||
103 | desc->status |= IRQ_AFFINITY_SET; | 124 | desc->status |= IRQ_AFFINITY_SET; |
104 | spin_unlock_irqrestore(&desc->lock, flags); | 125 | spin_unlock_irqrestore(&desc->lock, flags); |
105 | return 0; | 126 | return 0; |
@@ -150,6 +171,8 @@ int irq_select_affinity_usr(unsigned int irq) | |||
150 | 171 | ||
151 | spin_lock_irqsave(&desc->lock, flags); | 172 | spin_lock_irqsave(&desc->lock, flags); |
152 | ret = setup_affinity(irq, desc); | 173 | ret = setup_affinity(irq, desc); |
174 | if (!ret) | ||
175 | irq_set_thread_affinity(desc, desc->affinity); | ||
153 | spin_unlock_irqrestore(&desc->lock, flags); | 176 | spin_unlock_irqrestore(&desc->lock, flags); |
154 | 177 | ||
155 | return ret; | 178 | return ret; |
@@ -384,6 +407,93 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
384 | return ret; | 407 | return ret; |
385 | } | 408 | } |
386 | 409 | ||
410 | static inline int irq_thread_should_run(struct irqaction *action) | ||
411 | { | ||
412 | return test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
413 | } | ||
414 | |||
415 | static int irq_wait_for_interrupt(struct irqaction *action) | ||
416 | { | ||
417 | while (!kthread_should_stop()) { | ||
418 | set_current_state(TASK_INTERRUPTIBLE); | ||
419 | if (irq_thread_should_run(action)) { | ||
420 | __set_current_state(TASK_RUNNING); | ||
421 | return 0; | ||
422 | } else | ||
423 | schedule(); | ||
424 | } | ||
425 | return -1; | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * Interrupt handler thread | ||
430 | */ | ||
431 | static int irq_thread(void *data) | ||
432 | { | ||
433 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | ||
434 | struct irqaction *action = data; | ||
435 | struct irq_desc *desc = irq_to_desc(action->irq); | ||
436 | int wake; | ||
437 | |||
438 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
439 | current->irqaction = action; | ||
440 | |||
441 | while (!irq_wait_for_interrupt(action)) { | ||
442 | |||
443 | atomic_inc(&desc->threads_active); | ||
444 | |||
445 | spin_lock_irq(&desc->lock); | ||
446 | if (unlikely(desc->status & IRQ_DISABLED)) { | ||
447 | /* | ||
448 | * CHECKME: We might need a dedicated | ||
449 | * IRQ_THREAD_PENDING flag here, which | ||
450 | * retriggers the thread in check_irq_resend() | ||
451 | * but AFAICT IRQ_PENDING should be fine as it | ||
452 | * retriggers the interrupt itself --- tglx | ||
453 | */ | ||
454 | desc->status |= IRQ_PENDING; | ||
455 | spin_unlock_irq(&desc->lock); | ||
456 | } else { | ||
457 | spin_unlock_irq(&desc->lock); | ||
458 | |||
459 | action->thread_fn(action->irq, action->dev_id); | ||
460 | } | ||
461 | |||
462 | wake = atomic_dec_and_test(&desc->threads_active); | ||
463 | |||
464 | if (wake && waitqueue_active(&desc->wait_for_threads)) | ||
465 | wake_up(&desc->wait_for_threads); | ||
466 | } | ||
467 | |||
468 | /* | ||
469 | * Clear irqaction. Otherwise exit_irq_thread() would make | ||
470 | * fuzz about an active irq thread going into nirvana. | ||
471 | */ | ||
472 | current->irqaction = NULL; | ||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | /* | ||
477 | * Called from do_exit() | ||
478 | */ | ||
479 | void exit_irq_thread(void) | ||
480 | { | ||
481 | struct task_struct *tsk = current; | ||
482 | |||
483 | if (!tsk->irqaction) | ||
484 | return; | ||
485 | |||
486 | printk(KERN_ERR | ||
487 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
488 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | ||
489 | |||
490 | /* | ||
491 | * Set the THREAD DIED flag to prevent further wakeups of the | ||
492 | * soon to be gone threaded handler. | ||
493 | */ | ||
494 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | ||
495 | } | ||
496 | |||
387 | /* | 497 | /* |
388 | * Internal function to register an irqaction - typically used to | 498 | * Internal function to register an irqaction - typically used to |
389 | * allocate special interrupts that are part of the architecture. | 499 | * allocate special interrupts that are part of the architecture. |
@@ -420,6 +530,26 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
420 | } | 530 | } |
421 | 531 | ||
422 | /* | 532 | /* |
533 | * Threaded handler ? | ||
534 | */ | ||
535 | if (new->thread_fn) { | ||
536 | struct task_struct *t; | ||
537 | |||
538 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | ||
539 | new->name); | ||
540 | if (IS_ERR(t)) | ||
541 | return PTR_ERR(t); | ||
542 | /* | ||
543 | * We keep the reference to the task struct even if | ||
544 | * the thread dies to avoid that the interrupt code | ||
545 | * references an already freed task_struct. | ||
546 | */ | ||
547 | get_task_struct(t); | ||
548 | new->thread = t; | ||
549 | wake_up_process(t); | ||
550 | } | ||
551 | |||
552 | /* | ||
423 | * The following block of code has to be executed atomically | 553 | * The following block of code has to be executed atomically |
424 | */ | 554 | */ |
425 | spin_lock_irqsave(&desc->lock, flags); | 555 | spin_lock_irqsave(&desc->lock, flags); |
@@ -456,15 +586,15 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
456 | if (!shared) { | 586 | if (!shared) { |
457 | irq_chip_set_defaults(desc->chip); | 587 | irq_chip_set_defaults(desc->chip); |
458 | 588 | ||
589 | init_waitqueue_head(&desc->wait_for_threads); | ||
590 | |||
459 | /* Setup the type (level, edge polarity) if configured: */ | 591 | /* Setup the type (level, edge polarity) if configured: */ |
460 | if (new->flags & IRQF_TRIGGER_MASK) { | 592 | if (new->flags & IRQF_TRIGGER_MASK) { |
461 | ret = __irq_set_trigger(desc, irq, | 593 | ret = __irq_set_trigger(desc, irq, |
462 | new->flags & IRQF_TRIGGER_MASK); | 594 | new->flags & IRQF_TRIGGER_MASK); |
463 | 595 | ||
464 | if (ret) { | 596 | if (ret) |
465 | spin_unlock_irqrestore(&desc->lock, flags); | 597 | goto out_thread; |
466 | return ret; | ||
467 | } | ||
468 | } else | 598 | } else |
469 | compat_irq_chip_set_default_handler(desc); | 599 | compat_irq_chip_set_default_handler(desc); |
470 | #if defined(CONFIG_IRQ_PER_CPU) | 600 | #if defined(CONFIG_IRQ_PER_CPU) |
@@ -532,8 +662,19 @@ mismatch: | |||
532 | dump_stack(); | 662 | dump_stack(); |
533 | } | 663 | } |
534 | #endif | 664 | #endif |
665 | ret = -EBUSY; | ||
666 | |||
667 | out_thread: | ||
535 | spin_unlock_irqrestore(&desc->lock, flags); | 668 | spin_unlock_irqrestore(&desc->lock, flags); |
536 | return -EBUSY; | 669 | if (new->thread) { |
670 | struct task_struct *t = new->thread; | ||
671 | |||
672 | new->thread = NULL; | ||
673 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | ||
674 | kthread_stop(t); | ||
675 | put_task_struct(t); | ||
676 | } | ||
677 | return ret; | ||
537 | } | 678 | } |
538 | 679 | ||
539 | /** | 680 | /** |
@@ -559,6 +700,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
559 | { | 700 | { |
560 | struct irq_desc *desc = irq_to_desc(irq); | 701 | struct irq_desc *desc = irq_to_desc(irq); |
561 | struct irqaction *action, **action_ptr; | 702 | struct irqaction *action, **action_ptr; |
703 | struct task_struct *irqthread; | ||
562 | unsigned long flags; | 704 | unsigned long flags; |
563 | 705 | ||
564 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 706 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
@@ -605,6 +747,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
605 | else | 747 | else |
606 | desc->chip->disable(irq); | 748 | desc->chip->disable(irq); |
607 | } | 749 | } |
750 | |||
751 | irqthread = action->thread; | ||
752 | action->thread = NULL; | ||
753 | |||
608 | spin_unlock_irqrestore(&desc->lock, flags); | 754 | spin_unlock_irqrestore(&desc->lock, flags); |
609 | 755 | ||
610 | unregister_handler_proc(irq, action); | 756 | unregister_handler_proc(irq, action); |
@@ -612,6 +758,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
612 | /* Make sure it's not being used on another CPU: */ | 758 | /* Make sure it's not being used on another CPU: */ |
613 | synchronize_irq(irq); | 759 | synchronize_irq(irq); |
614 | 760 | ||
761 | if (irqthread) { | ||
762 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
763 | kthread_stop(irqthread); | ||
764 | put_task_struct(irqthread); | ||
765 | } | ||
766 | |||
615 | #ifdef CONFIG_DEBUG_SHIRQ | 767 | #ifdef CONFIG_DEBUG_SHIRQ |
616 | /* | 768 | /* |
617 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | 769 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
@@ -664,9 +816,12 @@ void free_irq(unsigned int irq, void *dev_id) | |||
664 | EXPORT_SYMBOL(free_irq); | 816 | EXPORT_SYMBOL(free_irq); |
665 | 817 | ||
666 | /** | 818 | /** |
667 | * request_irq - allocate an interrupt line | 819 | * request_threaded_irq - allocate an interrupt line |
668 | * @irq: Interrupt line to allocate | 820 | * @irq: Interrupt line to allocate |
669 | * @handler: Function to be called when the IRQ occurs | 821 | * @handler: Function to be called when the IRQ occurs. |
822 | * Primary handler for threaded interrupts | ||
823 | * @thread_fn: Function called from the irq handler thread | ||
824 | * If NULL, no irq thread is created | ||
670 | * @irqflags: Interrupt type flags | 825 | * @irqflags: Interrupt type flags |
671 | * @devname: An ascii name for the claiming device | 826 | * @devname: An ascii name for the claiming device |
672 | * @dev_id: A cookie passed back to the handler function | 827 | * @dev_id: A cookie passed back to the handler function |
@@ -678,6 +833,15 @@ EXPORT_SYMBOL(free_irq); | |||
678 | * raises, you must take care both to initialise your hardware | 833 | * raises, you must take care both to initialise your hardware |
679 | * and to set up the interrupt handler in the right order. | 834 | * and to set up the interrupt handler in the right order. |
680 | * | 835 | * |
836 | * If you want to set up a threaded irq handler for your device | ||
837 | * then you need to supply @handler and @thread_fn. @handler ist | ||
838 | * still called in hard interrupt context and has to check | ||
839 | * whether the interrupt originates from the device. If yes it | ||
840 | * needs to disable the interrupt on the device and return | ||
841 | * IRQ_THREAD_WAKE which will wake up the handler thread and run | ||
842 | * @thread_fn. This split handler design is necessary to support | ||
843 | * shared interrupts. | ||
844 | * | ||
681 | * Dev_id must be globally unique. Normally the address of the | 845 | * Dev_id must be globally unique. Normally the address of the |
682 | * device data structure is used as the cookie. Since the handler | 846 | * device data structure is used as the cookie. Since the handler |
683 | * receives this value it makes sense to use it. | 847 | * receives this value it makes sense to use it. |
@@ -693,8 +857,9 @@ EXPORT_SYMBOL(free_irq); | |||
693 | * IRQF_TRIGGER_* Specify active edge(s) or level | 857 | * IRQF_TRIGGER_* Specify active edge(s) or level |
694 | * | 858 | * |
695 | */ | 859 | */ |
696 | int request_irq(unsigned int irq, irq_handler_t handler, | 860 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
697 | unsigned long irqflags, const char *devname, void *dev_id) | 861 | irq_handler_t thread_fn, unsigned long irqflags, |
862 | const char *devname, void *dev_id) | ||
698 | { | 863 | { |
699 | struct irqaction *action; | 864 | struct irqaction *action; |
700 | struct irq_desc *desc; | 865 | struct irq_desc *desc; |
@@ -742,6 +907,7 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
742 | return -ENOMEM; | 907 | return -ENOMEM; |
743 | 908 | ||
744 | action->handler = handler; | 909 | action->handler = handler; |
910 | action->thread_fn = thread_fn; | ||
745 | action->flags = irqflags; | 911 | action->flags = irqflags; |
746 | action->name = devname; | 912 | action->name = devname; |
747 | action->dev_id = dev_id; | 913 | action->dev_id = dev_id; |
@@ -771,4 +937,4 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
771 | #endif | 937 | #endif |
772 | return retval; | 938 | return retval; |
773 | } | 939 | } |
774 | EXPORT_SYMBOL(request_irq); | 940 | EXPORT_SYMBOL(request_threaded_irq); |