aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c95
1 files changed, 73 insertions, 22 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 2734eca59243..0ec9ed831737 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq)
80 return 1; 80 return 1;
81} 81}
82 82
83static void 83/**
84irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) 84 * irq_set_thread_affinity - Notify irq threads to adjust affinity
85 * @desc: irq descriptor which has affitnity changed
86 *
87 * We just set IRQTF_AFFINITY and delegate the affinity setting
88 * to the interrupt thread itself. We can not call
89 * set_cpus_allowed_ptr() here as we hold desc->lock and this
90 * code can be called from hard interrupt context.
91 */
92void irq_set_thread_affinity(struct irq_desc *desc)
85{ 93{
86 struct irqaction *action = desc->action; 94 struct irqaction *action = desc->action;
87 95
88 while (action) { 96 while (action) {
89 if (action->thread) 97 if (action->thread)
90 set_cpus_allowed_ptr(action->thread, cpumask); 98 set_bit(IRQTF_AFFINITY, &action->thread_flags);
91 action = action->next; 99 action = action->next;
92 } 100 }
93} 101}
@@ -109,17 +117,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
109 spin_lock_irqsave(&desc->lock, flags); 117 spin_lock_irqsave(&desc->lock, flags);
110 118
111#ifdef CONFIG_GENERIC_PENDING_IRQ 119#ifdef CONFIG_GENERIC_PENDING_IRQ
112 if (desc->status & IRQ_MOVE_PCNTXT) 120 if (desc->status & IRQ_MOVE_PCNTXT) {
113 desc->chip->set_affinity(irq, cpumask); 121 if (!desc->chip->set_affinity(irq, cpumask)) {
122 cpumask_copy(desc->affinity, cpumask);
123 irq_set_thread_affinity(desc);
124 }
125 }
114 else { 126 else {
115 desc->status |= IRQ_MOVE_PENDING; 127 desc->status |= IRQ_MOVE_PENDING;
116 cpumask_copy(desc->pending_mask, cpumask); 128 cpumask_copy(desc->pending_mask, cpumask);
117 } 129 }
118#else 130#else
119 cpumask_copy(desc->affinity, cpumask); 131 if (!desc->chip->set_affinity(irq, cpumask)) {
120 desc->chip->set_affinity(irq, cpumask); 132 cpumask_copy(desc->affinity, cpumask);
133 irq_set_thread_affinity(desc);
134 }
121#endif 135#endif
122 irq_set_thread_affinity(desc, cpumask);
123 desc->status |= IRQ_AFFINITY_SET; 136 desc->status |= IRQ_AFFINITY_SET;
124 spin_unlock_irqrestore(&desc->lock, flags); 137 spin_unlock_irqrestore(&desc->lock, flags);
125 return 0; 138 return 0;
@@ -171,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq)
171 spin_lock_irqsave(&desc->lock, flags); 184 spin_lock_irqsave(&desc->lock, flags);
172 ret = setup_affinity(irq, desc); 185 ret = setup_affinity(irq, desc);
173 if (!ret) 186 if (!ret)
174 irq_set_thread_affinity(desc, desc->affinity); 187 irq_set_thread_affinity(desc);
175 spin_unlock_irqrestore(&desc->lock, flags); 188 spin_unlock_irqrestore(&desc->lock, flags);
176 189
177 return ret; 190 return ret;
@@ -438,6 +451,39 @@ static int irq_wait_for_interrupt(struct irqaction *action)
438 return -1; 451 return -1;
439} 452}
440 453
454#ifdef CONFIG_SMP
455/*
456 * Check whether we need to change the affinity of the interrupt thread.
457 */
458static void
459irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
460{
461 cpumask_var_t mask;
462
463 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
464 return;
465
466 /*
467 * In case we are out of memory we set IRQTF_AFFINITY again and
468 * try again next time
469 */
470 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
471 set_bit(IRQTF_AFFINITY, &action->thread_flags);
472 return;
473 }
474
475 spin_lock_irq(&desc->lock);
476 cpumask_copy(mask, desc->affinity);
477 spin_unlock_irq(&desc->lock);
478
479 set_cpus_allowed_ptr(current, mask);
480 free_cpumask_var(mask);
481}
482#else
483static inline void
484irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
485#endif
486
441/* 487/*
442 * Interrupt handler thread 488 * Interrupt handler thread
443 */ 489 */
@@ -453,6 +499,8 @@ static int irq_thread(void *data)
453 499
454 while (!irq_wait_for_interrupt(action)) { 500 while (!irq_wait_for_interrupt(action)) {
455 501
502 irq_thread_check_affinity(desc, action);
503
456 atomic_inc(&desc->threads_active); 504 atomic_inc(&desc->threads_active);
457 505
458 spin_lock_irq(&desc->lock); 506 spin_lock_irq(&desc->lock);
@@ -559,7 +607,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
559 */ 607 */
560 get_task_struct(t); 608 get_task_struct(t);
561 new->thread = t; 609 new->thread = t;
562 wake_up_process(t);
563 } 610 }
564 611
565 /* 612 /*
@@ -642,6 +689,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
642 (int)(new->flags & IRQF_TRIGGER_MASK)); 689 (int)(new->flags & IRQF_TRIGGER_MASK));
643 } 690 }
644 691
692 new->irq = irq;
645 *old_ptr = new; 693 *old_ptr = new;
646 694
647 /* Reset broken irq detection when installing new handler */ 695 /* Reset broken irq detection when installing new handler */
@@ -659,7 +707,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
659 707
660 spin_unlock_irqrestore(&desc->lock, flags); 708 spin_unlock_irqrestore(&desc->lock, flags);
661 709
662 new->irq = irq; 710 /*
711 * Strictly no need to wake it up, but hung_task complains
712 * when no hard interrupt wakes the thread up.
713 */
714 if (new->thread)
715 wake_up_process(new->thread);
716
663 register_irq_proc(irq, desc); 717 register_irq_proc(irq, desc);
664 new->dir = NULL; 718 new->dir = NULL;
665 register_handler_proc(irq, new); 719 register_handler_proc(irq, new);
@@ -713,7 +767,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
713{ 767{
714 struct irq_desc *desc = irq_to_desc(irq); 768 struct irq_desc *desc = irq_to_desc(irq);
715 struct irqaction *action, **action_ptr; 769 struct irqaction *action, **action_ptr;
716 struct task_struct *irqthread;
717 unsigned long flags; 770 unsigned long flags;
718 771
719 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 772 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
@@ -761,9 +814,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
761 desc->chip->disable(irq); 814 desc->chip->disable(irq);
762 } 815 }
763 816
764 irqthread = action->thread;
765 action->thread = NULL;
766
767 spin_unlock_irqrestore(&desc->lock, flags); 817 spin_unlock_irqrestore(&desc->lock, flags);
768 818
769 unregister_handler_proc(irq, action); 819 unregister_handler_proc(irq, action);
@@ -771,12 +821,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
771 /* Make sure it's not being used on another CPU: */ 821 /* Make sure it's not being used on another CPU: */
772 synchronize_irq(irq); 822 synchronize_irq(irq);
773 823
774 if (irqthread) {
775 if (!test_bit(IRQTF_DIED, &action->thread_flags))
776 kthread_stop(irqthread);
777 put_task_struct(irqthread);
778 }
779
780#ifdef CONFIG_DEBUG_SHIRQ 824#ifdef CONFIG_DEBUG_SHIRQ
781 /* 825 /*
782 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 826 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
@@ -792,6 +836,13 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
792 local_irq_restore(flags); 836 local_irq_restore(flags);
793 } 837 }
794#endif 838#endif
839
840 if (action->thread) {
841 if (!test_bit(IRQTF_DIED, &action->thread_flags))
842 kthread_stop(action->thread);
843 put_task_struct(action->thread);
844 }
845
795 return action; 846 return action;
796} 847}
797 848
@@ -851,7 +902,7 @@ EXPORT_SYMBOL(free_irq);
851 * still called in hard interrupt context and has to check 902 * still called in hard interrupt context and has to check
852 * whether the interrupt originates from the device. If yes it 903 * whether the interrupt originates from the device. If yes it
853 * needs to disable the interrupt on the device and return 904 * needs to disable the interrupt on the device and return
854 * IRQ_THREAD_WAKE which will wake up the handler thread and run 905 * IRQ_WAKE_THREAD which will wake up the handler thread and run
855 * @thread_fn. This split handler design is necessary to support 906 * @thread_fn. This split handler design is necessary to support
856 * shared interrupts. 907 * shared interrupts.
857 * 908 *