aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-11-03 06:52:09 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-11-12 14:07:18 -0500
commit04aa530ec04f61875b99c12721162e2964e3318c (patch)
tree832a096a2434e6c1c0ceb44335436a78b12f22b8 /kernel
parentf3de44edf376d18773febca6a37800c042bada7d (diff)
genirq: Always force thread affinity
Sankara reported that the genirq core code fails to adjust the affinity of an interrupt thread in several cases: 1) On request/setup_irq() the call to setup_affinity() happens before the new action is registered, so the new thread is not notified. 2) For secondary shared interrupts nothing notifies the new thread to change its affinity. 3) Interrupts which have the IRQ_NO_BALANCE flag set are not moving the thread either. Fix this by setting the thread affinity flag right on thread creation time. This ensures that under all circumstances the thread moves to the right place. Requires a check in irq_thread_check_affinity for an existing affinity mask (CONFIG_CPU_MASK_OFFSTACK=y) Reported-and-tested-by: Sankara Muthukrishnan <sankara.m@gmail.com> Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1209041738200.2754@ionos Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/manage.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 1cbd572f6ad8..35c70c9e24d8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -732,6 +732,7 @@ static void
732irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 732irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
733{ 733{
734 cpumask_var_t mask; 734 cpumask_var_t mask;
735 bool valid = true;
735 736
736 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 737 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
737 return; 738 return;
@@ -746,10 +747,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
746 } 747 }
747 748
748 raw_spin_lock_irq(&desc->lock); 749 raw_spin_lock_irq(&desc->lock);
749 cpumask_copy(mask, desc->irq_data.affinity); 750 /*
751 * This code is triggered unconditionally. Check the affinity
752 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
753 */
754 if (desc->irq_data.affinity)
755 cpumask_copy(mask, desc->irq_data.affinity);
756 else
757 valid = false;
750 raw_spin_unlock_irq(&desc->lock); 758 raw_spin_unlock_irq(&desc->lock);
751 759
752 set_cpus_allowed_ptr(current, mask); 760 if (valid)
761 set_cpus_allowed_ptr(current, mask);
753 free_cpumask_var(mask); 762 free_cpumask_var(mask);
754} 763}
755#else 764#else
@@ -954,6 +963,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
954 */ 963 */
955 get_task_struct(t); 964 get_task_struct(t);
956 new->thread = t; 965 new->thread = t;
966 /*
967 * Tell the thread to set its affinity. This is
968 * important for shared interrupt handlers as we do
969 * not invoke setup_affinity() for the secondary
970 * handlers as everything is already set up. Even for
971 * interrupts marked with IRQF_NO_BALANCE this is
972 * correct as we want the thread to move to the cpu(s)
973 * on which the requesting code placed the interrupt.
974 */
975 set_bit(IRQTF_AFFINITY, &new->thread_flags);
957 } 976 }
958 977
959 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 978 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {