aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c82
1 files changed, 64 insertions, 18 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 50da67672901..0ec9ed831737 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq)
80 return 1; 80 return 1;
81} 81}
82 82
83void 83/**
84irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) 84 * irq_set_thread_affinity - Notify irq threads to adjust affinity
85 * @desc: irq descriptor which has affitnity changed
86 *
87 * We just set IRQTF_AFFINITY and delegate the affinity setting
88 * to the interrupt thread itself. We can not call
89 * set_cpus_allowed_ptr() here as we hold desc->lock and this
90 * code can be called from hard interrupt context.
91 */
92void irq_set_thread_affinity(struct irq_desc *desc)
85{ 93{
86 struct irqaction *action = desc->action; 94 struct irqaction *action = desc->action;
87 95
88 while (action) { 96 while (action) {
89 if (action->thread) 97 if (action->thread)
90 set_cpus_allowed_ptr(action->thread, cpumask); 98 set_bit(IRQTF_AFFINITY, &action->thread_flags);
91 action = action->next; 99 action = action->next;
92 } 100 }
93} 101}
@@ -112,7 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
112 if (desc->status & IRQ_MOVE_PCNTXT) { 120 if (desc->status & IRQ_MOVE_PCNTXT) {
113 if (!desc->chip->set_affinity(irq, cpumask)) { 121 if (!desc->chip->set_affinity(irq, cpumask)) {
114 cpumask_copy(desc->affinity, cpumask); 122 cpumask_copy(desc->affinity, cpumask);
115 irq_set_thread_affinity(desc, cpumask); 123 irq_set_thread_affinity(desc);
116 } 124 }
117 } 125 }
118 else { 126 else {
@@ -122,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
122#else 130#else
123 if (!desc->chip->set_affinity(irq, cpumask)) { 131 if (!desc->chip->set_affinity(irq, cpumask)) {
124 cpumask_copy(desc->affinity, cpumask); 132 cpumask_copy(desc->affinity, cpumask);
125 irq_set_thread_affinity(desc, cpumask); 133 irq_set_thread_affinity(desc);
126 } 134 }
127#endif 135#endif
128 desc->status |= IRQ_AFFINITY_SET; 136 desc->status |= IRQ_AFFINITY_SET;
@@ -176,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq)
176 spin_lock_irqsave(&desc->lock, flags); 184 spin_lock_irqsave(&desc->lock, flags);
177 ret = setup_affinity(irq, desc); 185 ret = setup_affinity(irq, desc);
178 if (!ret) 186 if (!ret)
179 irq_set_thread_affinity(desc, desc->affinity); 187 irq_set_thread_affinity(desc);
180 spin_unlock_irqrestore(&desc->lock, flags); 188 spin_unlock_irqrestore(&desc->lock, flags);
181 189
182 return ret; 190 return ret;
@@ -443,6 +451,39 @@ static int irq_wait_for_interrupt(struct irqaction *action)
443 return -1; 451 return -1;
444} 452}
445 453
454#ifdef CONFIG_SMP
455/*
456 * Check whether we need to change the affinity of the interrupt thread.
457 */
458static void
459irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
460{
461 cpumask_var_t mask;
462
463 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
464 return;
465
466 /*
467 * In case we are out of memory we set IRQTF_AFFINITY again and
468 * try again next time
469 */
470 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
471 set_bit(IRQTF_AFFINITY, &action->thread_flags);
472 return;
473 }
474
475 spin_lock_irq(&desc->lock);
476 cpumask_copy(mask, desc->affinity);
477 spin_unlock_irq(&desc->lock);
478
479 set_cpus_allowed_ptr(current, mask);
480 free_cpumask_var(mask);
481}
482#else
483static inline void
484irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
485#endif
486
446/* 487/*
447 * Interrupt handler thread 488 * Interrupt handler thread
448 */ 489 */
@@ -458,6 +499,8 @@ static int irq_thread(void *data)
458 499
459 while (!irq_wait_for_interrupt(action)) { 500 while (!irq_wait_for_interrupt(action)) {
460 501
502 irq_thread_check_affinity(desc, action);
503
461 atomic_inc(&desc->threads_active); 504 atomic_inc(&desc->threads_active);
462 505
463 spin_lock_irq(&desc->lock); 506 spin_lock_irq(&desc->lock);
@@ -564,7 +607,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
564 */ 607 */
565 get_task_struct(t); 608 get_task_struct(t);
566 new->thread = t; 609 new->thread = t;
567 wake_up_process(t);
568 } 610 }
569 611
570 /* 612 /*
@@ -647,6 +689,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
647 (int)(new->flags & IRQF_TRIGGER_MASK)); 689 (int)(new->flags & IRQF_TRIGGER_MASK));
648 } 690 }
649 691
692 new->irq = irq;
650 *old_ptr = new; 693 *old_ptr = new;
651 694
652 /* Reset broken irq detection when installing new handler */ 695 /* Reset broken irq detection when installing new handler */
@@ -664,7 +707,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
664 707
665 spin_unlock_irqrestore(&desc->lock, flags); 708 spin_unlock_irqrestore(&desc->lock, flags);
666 709
667 new->irq = irq; 710 /*
711 * Strictly no need to wake it up, but hung_task complains
712 * when no hard interrupt wakes the thread up.
713 */
714 if (new->thread)
715 wake_up_process(new->thread);
716
668 register_irq_proc(irq, desc); 717 register_irq_proc(irq, desc);
669 new->dir = NULL; 718 new->dir = NULL;
670 register_handler_proc(irq, new); 719 register_handler_proc(irq, new);
@@ -718,7 +767,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
718{ 767{
719 struct irq_desc *desc = irq_to_desc(irq); 768 struct irq_desc *desc = irq_to_desc(irq);
720 struct irqaction *action, **action_ptr; 769 struct irqaction *action, **action_ptr;
721 struct task_struct *irqthread;
722 unsigned long flags; 770 unsigned long flags;
723 771
724 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 772 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
@@ -766,9 +814,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
766 desc->chip->disable(irq); 814 desc->chip->disable(irq);
767 } 815 }
768 816
769 irqthread = action->thread;
770 action->thread = NULL;
771
772 spin_unlock_irqrestore(&desc->lock, flags); 817 spin_unlock_irqrestore(&desc->lock, flags);
773 818
774 unregister_handler_proc(irq, action); 819 unregister_handler_proc(irq, action);
@@ -776,12 +821,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
776 /* Make sure it's not being used on another CPU: */ 821 /* Make sure it's not being used on another CPU: */
777 synchronize_irq(irq); 822 synchronize_irq(irq);
778 823
779 if (irqthread) {
780 if (!test_bit(IRQTF_DIED, &action->thread_flags))
781 kthread_stop(irqthread);
782 put_task_struct(irqthread);
783 }
784
785#ifdef CONFIG_DEBUG_SHIRQ 824#ifdef CONFIG_DEBUG_SHIRQ
786 /* 825 /*
787 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 826 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
@@ -797,6 +836,13 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
797 local_irq_restore(flags); 836 local_irq_restore(flags);
798 } 837 }
799#endif 838#endif
839
840 if (action->thread) {
841 if (!test_bit(IRQTF_DIED, &action->thread_flags))
842 kthread_stop(action->thread);
843 put_task_struct(action->thread);
844 }
845
800 return action; 846 return action;
801} 847}
802 848