aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-02-07 10:02:20 -0500
committerThomas Gleixner <tglx@linutronix.de>2011-02-19 06:58:07 -0500
commit3b8249e759c701c4a82f99d957be651a7657bf6f (patch)
treef758675da3bb37282eefd50e57456d09b208b452 /kernel/irq/manage.c
parent569bda8df11effa03e618729293c7961696abb10 (diff)
genirq: Do not copy affinity before set
While rumaging through arch code I found that there are a few workarounds which deal with the fact that the initial affinity setting from request_irq() copies the mask into irq_data->affinity before the chip code is called. In the normal path we unconditionally copy the mask when the chip code returns 0. Copy after the code is called and add a return code IRQ_SET_MASK_OK_NOCOPY for the chip functions, which prevents the copy. That way we see the real mask when the chip function decided to truncate it further as some arches do. IRQ_SET_MASK_OK is 0, which is the current behaviour. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c47
1 files changed, 34 insertions, 13 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ade65bfb466d..dc95d53df510 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -148,9 +148,12 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
148 148
149 if (irq_can_move_pcntxt(desc)) { 149 if (irq_can_move_pcntxt(desc)) {
150 ret = chip->irq_set_affinity(&desc->irq_data, mask, false); 150 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
151 if (!ret) { 151 switch (ret) {
152 case IRQ_SET_MASK_OK:
152 cpumask_copy(desc->irq_data.affinity, mask); 153 cpumask_copy(desc->irq_data.affinity, mask);
154 case IRQ_SET_MASK_OK_NOCOPY:
153 irq_set_thread_affinity(desc); 155 irq_set_thread_affinity(desc);
156 ret = 0;
154 } 157 }
155 } else { 158 } else {
156 desc->status |= IRQ_MOVE_PENDING; 159 desc->status |= IRQ_MOVE_PENDING;
@@ -254,9 +257,12 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
254/* 257/*
255 * Generic version of the affinity autoselector. 258 * Generic version of the affinity autoselector.
256 */ 259 */
257static int setup_affinity(unsigned int irq, struct irq_desc *desc) 260static int
261setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
258{ 262{
263 struct irq_chip *chip = get_irq_desc_chip(desc);
259 struct cpumask *set = irq_default_affinity; 264 struct cpumask *set = irq_default_affinity;
265 int ret;
260 266
261 /* Excludes PER_CPU and NO_BALANCE interrupts */ 267 /* Excludes PER_CPU and NO_BALANCE interrupts */
262 if (!irq_can_set_affinity(irq)) 268 if (!irq_can_set_affinity(irq))
@@ -273,13 +279,20 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
273 else 279 else
274 desc->status &= ~IRQ_AFFINITY_SET; 280 desc->status &= ~IRQ_AFFINITY_SET;
275 } 281 }
276 cpumask_and(desc->irq_data.affinity, cpu_online_mask, set);
277 desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);
278 282
283 cpumask_and(mask, cpu_online_mask, set);
284 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
285 switch (ret) {
286 case IRQ_SET_MASK_OK:
287 cpumask_copy(desc->irq_data.affinity, mask);
288 case IRQ_SET_MASK_OK_NOCOPY:
289 irq_set_thread_affinity(desc);
290 }
279 return 0; 291 return 0;
280} 292}
281#else 293#else
282static inline int setup_affinity(unsigned int irq, struct irq_desc *d) 294static inline int
295setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
283{ 296{
284 return irq_select_affinity(irq); 297 return irq_select_affinity(irq);
285} 298}
@@ -288,23 +301,23 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
288/* 301/*
289 * Called when affinity is set via /proc/irq 302 * Called when affinity is set via /proc/irq
290 */ 303 */
291int irq_select_affinity_usr(unsigned int irq) 304int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
292{ 305{
293 struct irq_desc *desc = irq_to_desc(irq); 306 struct irq_desc *desc = irq_to_desc(irq);
294 unsigned long flags; 307 unsigned long flags;
295 int ret; 308 int ret;
296 309
297 raw_spin_lock_irqsave(&desc->lock, flags); 310 raw_spin_lock_irqsave(&desc->lock, flags);
298 ret = setup_affinity(irq, desc); 311 ret = setup_affinity(irq, desc, mask);
299 if (!ret) 312 if (!ret)
300 irq_set_thread_affinity(desc); 313 irq_set_thread_affinity(desc);
301 raw_spin_unlock_irqrestore(&desc->lock, flags); 314 raw_spin_unlock_irqrestore(&desc->lock, flags);
302
303 return ret; 315 return ret;
304} 316}
305 317
306#else 318#else
307static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) 319static inline int
320setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
308{ 321{
309 return 0; 322 return 0;
310} 323}
@@ -765,8 +778,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
765 struct irqaction *old, **old_ptr; 778 struct irqaction *old, **old_ptr;
766 const char *old_name = NULL; 779 const char *old_name = NULL;
767 unsigned long flags; 780 unsigned long flags;
768 int nested, shared = 0; 781 int ret, nested, shared = 0;
769 int ret; 782 cpumask_var_t mask;
770 783
771 if (!desc) 784 if (!desc)
772 return -EINVAL; 785 return -EINVAL;
@@ -831,6 +844,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
831 new->thread = t; 844 new->thread = t;
832 } 845 }
833 846
847 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
848 ret = -ENOMEM;
849 goto out_thread;
850 }
851
834 /* 852 /*
835 * The following block of code has to be executed atomically 853 * The following block of code has to be executed atomically
836 */ 854 */
@@ -876,7 +894,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
876 new->flags & IRQF_TRIGGER_MASK); 894 new->flags & IRQF_TRIGGER_MASK);
877 895
878 if (ret) 896 if (ret)
879 goto out_thread; 897 goto out_mask;
880 } else 898 } else
881 compat_irq_chip_set_default_handler(desc); 899 compat_irq_chip_set_default_handler(desc);
882#if defined(CONFIG_IRQ_PER_CPU) 900#if defined(CONFIG_IRQ_PER_CPU)
@@ -903,7 +921,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
903 desc->status |= IRQ_NO_BALANCING; 921 desc->status |= IRQ_NO_BALANCING;
904 922
905 /* Set default affinity mask once everything is setup */ 923 /* Set default affinity mask once everything is setup */
906 setup_affinity(irq, desc); 924 setup_affinity(irq, desc, mask);
907 925
908 } else if ((new->flags & IRQF_TRIGGER_MASK) 926 } else if ((new->flags & IRQF_TRIGGER_MASK)
909 && (new->flags & IRQF_TRIGGER_MASK) 927 && (new->flags & IRQF_TRIGGER_MASK)
@@ -956,6 +974,9 @@ mismatch:
956#endif 974#endif
957 ret = -EBUSY; 975 ret = -EBUSY;
958 976
977out_mask:
978 free_cpumask_var(mask);
979
959out_thread: 980out_thread:
960 raw_spin_unlock_irqrestore(&desc->lock, flags); 981 raw_spin_unlock_irqrestore(&desc->lock, flags);
961 if (new->thread) { 982 if (new->thread) {