aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2009-01-11 00:58:08 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-11 13:12:46 -0500
commit7f7ace0cda64c99599c23785f8979a072e118058 (patch)
tree13f2826e64e09ebaef94a3e7fd9c21cfbd31ec3f
parentc59765042f53a79a7a65585042ff463b69cb248c (diff)
cpumask: update irq_desc to use cpumask_var_t
Impact: reduce memory usage, use new cpumask API. Replace the affinity and pending_masks with cpumask_var_t's. This adds to the significant size reduction done with the SPARSE_IRQS changes. The added functions (init_alloc_desc_masks & init_copy_desc_masks) are in the include file so they can be inlined (and optimized out for the !CONFIG_CPUMASKS_OFFSTACK case.) [Naming chosen to be consistent with the other init*irq functions, as well as the backwards arg declaration of "from, to" instead of the more common "to, from" standard.] Includes a slight change to the declaration of struct irq_desc to embed the pending_mask within ifdef(CONFIG_SMP) to be consistent with other references, and some small changes to Xen. Tested: sparse/non-sparse/cpumask_offstack/non-cpumask_offstack/nonuma/nosmp on x86_64 Signed-off-by: Mike Travis <travis@sgi.com> Cc: Chris Wright <chrisw@sous-sol.org> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Cc: virtualization@lists.osdl.org Cc: xen-devel@lists.xensource.com Cc: Yinghai Lu <yhlu.kernel@gmail.com>
-rw-r--r--arch/x86/kernel/io_apic.c20
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c2
-rw-r--r--drivers/xen/events.c4
-rw-r--r--include/linux/irq.h81
-rw-r--r--kernel/irq/chip.c5
-rw-r--r--kernel/irq/handle.c26
-rw-r--r--kernel/irq/manage.c12
-rw-r--r--kernel/irq/migration.c12
-rw-r--r--kernel/irq/numa_migrate.c12
-rw-r--r--kernel/irq/proc.c4
11 files changed, 135 insertions, 45 deletions
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 1c4a1302536c..1337eab60ecc 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -356,7 +356,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
356 356
357 if (!cfg->move_in_progress) { 357 if (!cfg->move_in_progress) {
358 /* it means that domain is not changed */ 358 /* it means that domain is not changed */
359 if (!cpumask_intersects(&desc->affinity, mask)) 359 if (!cpumask_intersects(desc->affinity, mask))
360 cfg->move_desc_pending = 1; 360 cfg->move_desc_pending = 1;
361 } 361 }
362} 362}
@@ -579,9 +579,9 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
579 if (assign_irq_vector(irq, cfg, mask)) 579 if (assign_irq_vector(irq, cfg, mask))
580 return BAD_APICID; 580 return BAD_APICID;
581 581
582 cpumask_and(&desc->affinity, cfg->domain, mask); 582 cpumask_and(desc->affinity, cfg->domain, mask);
583 set_extra_move_desc(desc, mask); 583 set_extra_move_desc(desc, mask);
584 return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); 584 return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
585} 585}
586 586
587static void 587static void
@@ -2383,7 +2383,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2383 if (cfg->move_in_progress) 2383 if (cfg->move_in_progress)
2384 send_cleanup_vector(cfg); 2384 send_cleanup_vector(cfg);
2385 2385
2386 cpumask_copy(&desc->affinity, mask); 2386 cpumask_copy(desc->affinity, mask);
2387} 2387}
2388 2388
2389static int migrate_irq_remapped_level_desc(struct irq_desc *desc) 2389static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2405,11 +2405,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2405 } 2405 }
2406 2406
2407 /* everthing is clear. we have right of way */ 2407 /* everthing is clear. we have right of way */
2408 migrate_ioapic_irq_desc(desc, &desc->pending_mask); 2408 migrate_ioapic_irq_desc(desc, desc->pending_mask);
2409 2409
2410 ret = 0; 2410 ret = 0;
2411 desc->status &= ~IRQ_MOVE_PENDING; 2411 desc->status &= ~IRQ_MOVE_PENDING;
2412 cpumask_clear(&desc->pending_mask); 2412 cpumask_clear(desc->pending_mask);
2413 2413
2414unmask: 2414unmask:
2415 unmask_IO_APIC_irq_desc(desc); 2415 unmask_IO_APIC_irq_desc(desc);
@@ -2434,7 +2434,7 @@ static void ir_irq_migration(struct work_struct *work)
2434 continue; 2434 continue;
2435 } 2435 }
2436 2436
2437 desc->chip->set_affinity(irq, &desc->pending_mask); 2437 desc->chip->set_affinity(irq, desc->pending_mask);
2438 spin_unlock_irqrestore(&desc->lock, flags); 2438 spin_unlock_irqrestore(&desc->lock, flags);
2439 } 2439 }
2440 } 2440 }
@@ -2448,7 +2448,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2448{ 2448{
2449 if (desc->status & IRQ_LEVEL) { 2449 if (desc->status & IRQ_LEVEL) {
2450 desc->status |= IRQ_MOVE_PENDING; 2450 desc->status |= IRQ_MOVE_PENDING;
2451 cpumask_copy(&desc->pending_mask, mask); 2451 cpumask_copy(desc->pending_mask, mask);
2452 migrate_irq_remapped_level_desc(desc); 2452 migrate_irq_remapped_level_desc(desc);
2453 return; 2453 return;
2454 } 2454 }
@@ -2516,7 +2516,7 @@ static void irq_complete_move(struct irq_desc **descp)
2516 2516
2517 /* domain has not changed, but affinity did */ 2517 /* domain has not changed, but affinity did */
2518 me = smp_processor_id(); 2518 me = smp_processor_id();
2519 if (cpu_isset(me, desc->affinity)) { 2519 if (cpumask_test_cpu(me, desc->affinity)) {
2520 *descp = desc = move_irq_desc(desc, me); 2520 *descp = desc = move_irq_desc(desc, me);
2521 /* get the new one */ 2521 /* get the new one */
2522 cfg = desc->chip_data; 2522 cfg = desc->chip_data;
@@ -4039,7 +4039,7 @@ void __init setup_ioapic_dest(void)
4039 */ 4039 */
4040 if (desc->status & 4040 if (desc->status &
4041 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 4041 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4042 mask = &desc->affinity; 4042 mask = desc->affinity;
4043 else 4043 else
4044 mask = TARGET_CPUS; 4044 mask = TARGET_CPUS;
4045 4045
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 74b9ff7341e9..e0f29be8ab0b 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -248,7 +248,7 @@ void fixup_irqs(void)
248 if (irq == 2) 248 if (irq == 2)
249 continue; 249 continue;
250 250
251 affinity = &desc->affinity; 251 affinity = desc->affinity;
252 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 252 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
253 printk("Breaking affinity for irq %i\n", irq); 253 printk("Breaking affinity for irq %i\n", irq);
254 affinity = cpu_all_mask; 254 affinity = cpu_all_mask;
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 63c88e6ec025..0b21cb1ea11f 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -100,7 +100,7 @@ void fixup_irqs(void)
100 /* interrupt's are disabled at this point */ 100 /* interrupt's are disabled at this point */
101 spin_lock(&desc->lock); 101 spin_lock(&desc->lock);
102 102
103 affinity = &desc->affinity; 103 affinity = desc->affinity;
104 if (!irq_has_action(irq) || 104 if (!irq_has_action(irq) ||
105 cpumask_equal(affinity, cpu_online_mask)) { 105 cpumask_equal(affinity, cpu_online_mask)) {
106 spin_unlock(&desc->lock); 106 spin_unlock(&desc->lock);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index eb0dfdeaa949..e0767ff35d6c 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -125,7 +125,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
125 125
126 BUG_ON(irq == -1); 126 BUG_ON(irq == -1);
127#ifdef CONFIG_SMP 127#ifdef CONFIG_SMP
128 irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu); 128 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
129#endif 129#endif
130 130
131 __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); 131 __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
@@ -142,7 +142,7 @@ static void init_evtchn_cpu_bindings(void)
142 142
143 /* By default all event channels notify CPU#0. */ 143 /* By default all event channels notify CPU#0. */
144 for_each_irq_desc(i, desc) { 144 for_each_irq_desc(i, desc) {
145 desc->affinity = cpumask_of_cpu(0); 145 cpumask_copy(desc->affinity, cpumask_of(0));
146 } 146 }
147#endif 147#endif
148 148
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f899b502f186..fa27210f1dfd 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -182,11 +182,11 @@ struct irq_desc {
182 unsigned int irqs_unhandled; 182 unsigned int irqs_unhandled;
183 spinlock_t lock; 183 spinlock_t lock;
184#ifdef CONFIG_SMP 184#ifdef CONFIG_SMP
185 cpumask_t affinity; 185 cpumask_var_t affinity;
186 unsigned int cpu; 186 unsigned int cpu;
187#endif
188#ifdef CONFIG_GENERIC_PENDING_IRQ 187#ifdef CONFIG_GENERIC_PENDING_IRQ
189 cpumask_t pending_mask; 188 cpumask_var_t pending_mask;
189#endif
190#endif 190#endif
191#ifdef CONFIG_PROC_FS 191#ifdef CONFIG_PROC_FS
192 struct proc_dir_entry *dir; 192 struct proc_dir_entry *dir;
@@ -422,4 +422,79 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
422 422
423#endif /* !CONFIG_S390 */ 423#endif /* !CONFIG_S390 */
424 424
425#ifdef CONFIG_SMP
426/**
427 * init_alloc_desc_masks - allocate cpumasks for irq_desc
428 * @desc: pointer to irq_desc struct
429 * @boot: true if need bootmem
430 *
431 * Allocates affinity and pending_mask cpumask if required.
432 * Returns true if successful (or not required).
433 * Side effect: affinity has all bits set, pending_mask has all bits clear.
434 */
435static inline bool init_alloc_desc_masks(struct irq_desc *desc, int node,
436 bool boot)
437{
438 if (boot) {
439 alloc_bootmem_cpumask_var(&desc->affinity);
440 cpumask_setall(desc->affinity);
441
442#ifdef CONFIG_GENERIC_PENDING_IRQ
443 alloc_bootmem_cpumask_var(&desc->pending_mask);
444 cpumask_clear(desc->pending_mask);
445#endif
446 return true;
447 }
448
449 if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
450 return false;
451 cpumask_setall(desc->affinity);
452
453#ifdef CONFIG_GENERIC_PENDING_IRQ
454 if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
455 free_cpumask_var(desc->affinity);
456 return false;
457 }
458 cpumask_clear(desc->pending_mask);
459#endif
460 return true;
461}
462
463/**
464 * init_copy_desc_masks - copy cpumasks for irq_desc
465 * @old_desc: pointer to old irq_desc struct
466 * @new_desc: pointer to new irq_desc struct
467 *
468 * Insures affinity and pending_masks are copied to new irq_desc.
469 * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
470 * irq_desc struct so the copy is redundant.
471 */
472
473static inline void init_copy_desc_masks(struct irq_desc *old_desc,
474 struct irq_desc *new_desc)
475{
476#ifdef CONFIG_CPUMASKS_OFFSTACK
477 cpumask_copy(new_desc->affinity, old_desc->affinity);
478
479#ifdef CONFIG_GENERIC_PENDING_IRQ
480 cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
481#endif
482#endif
483}
484
485#else /* !CONFIG_SMP */
486
487static inline bool init_alloc_desc_masks(struct irq_desc *desc, int node,
488 bool boot)
489{
490 return true;
491}
492
493static inline void init_copy_desc_masks(struct irq_desc *old_desc,
494 struct irq_desc *new_desc)
495{
496}
497
498#endif /* CONFIG_SMP */
499
425#endif /* _LINUX_IRQ_H */ 500#endif /* _LINUX_IRQ_H */
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f63c706d25e1..c248eba98b43 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
46 desc->irq_count = 0; 46 desc->irq_count = 0;
47 desc->irqs_unhandled = 0; 47 desc->irqs_unhandled = 0;
48#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
49 cpumask_setall(&desc->affinity); 49 cpumask_setall(desc->affinity);
50#ifdef CONFIG_GENERIC_PENDING_IRQ
51 cpumask_clear(desc->pending_mask);
52#endif
50#endif 53#endif
51 spin_unlock_irqrestore(&desc->lock, flags); 54 spin_unlock_irqrestore(&desc->lock, flags);
52} 55}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index c20db0be9173..b8fa1354f01c 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -64,9 +64,6 @@ static struct irq_desc irq_desc_init = {
64 .handle_irq = handle_bad_irq, 64 .handle_irq = handle_bad_irq,
65 .depth = 1, 65 .depth = 1,
66 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 66 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
67#ifdef CONFIG_SMP
68 .affinity = CPU_MASK_ALL
69#endif
70}; 67};
71 68
72void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 69void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
@@ -88,6 +85,8 @@ void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
88 85
89static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 86static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
90{ 87{
88 int node = cpu_to_node(cpu);
89
91 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 90 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
92 91
93 spin_lock_init(&desc->lock); 92 spin_lock_init(&desc->lock);
@@ -101,6 +100,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
101 printk(KERN_ERR "can not alloc kstat_irqs\n"); 100 printk(KERN_ERR "can not alloc kstat_irqs\n");
102 BUG_ON(1); 101 BUG_ON(1);
103 } 102 }
103 if (!init_alloc_desc_masks(desc, node, false)) {
104 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
105 BUG_ON(1);
106 }
104 arch_init_chip_data(desc, cpu); 107 arch_init_chip_data(desc, cpu);
105} 108}
106 109
@@ -119,9 +122,6 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
119 .handle_irq = handle_bad_irq, 122 .handle_irq = handle_bad_irq,
120 .depth = 1, 123 .depth = 1,
121 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 124 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
122#ifdef CONFIG_SMP
123 .affinity = CPU_MASK_ALL
124#endif
125 } 125 }
126}; 126};
127 127
@@ -141,7 +141,7 @@ int __init early_irq_init(void)
141 desc[i].irq = i; 141 desc[i].irq = i;
142 desc[i].kstat_irqs = kstat_irqs_legacy[i]; 142 desc[i].kstat_irqs = kstat_irqs_legacy[i];
143 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 143 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
144 144 init_alloc_desc_masks(&desc[i], 0, true);
145 irq_desc_ptrs[i] = desc + i; 145 irq_desc_ptrs[i] = desc + i;
146 } 146 }
147 147
@@ -188,6 +188,10 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
188 printk(KERN_ERR "can not alloc irq_desc\n"); 188 printk(KERN_ERR "can not alloc irq_desc\n");
189 BUG_ON(1); 189 BUG_ON(1);
190 } 190 }
191 if (!init_alloc_desc_masks(desc, node, false)) {
192 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
193 BUG_ON(1);
194 }
191 init_one_irq_desc(irq, desc, cpu); 195 init_one_irq_desc(irq, desc, cpu);
192 196
193 irq_desc_ptrs[irq] = desc; 197 irq_desc_ptrs[irq] = desc;
@@ -207,9 +211,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
207 .handle_irq = handle_bad_irq, 211 .handle_irq = handle_bad_irq,
208 .depth = 1, 212 .depth = 1,
209 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 213 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
210#ifdef CONFIG_SMP
211 .affinity = CPU_MASK_ALL
212#endif
213 } 214 }
214}; 215};
215 216
@@ -222,9 +223,10 @@ int __init early_irq_init(void)
222 desc = irq_desc; 223 desc = irq_desc;
223 count = ARRAY_SIZE(irq_desc); 224 count = ARRAY_SIZE(irq_desc);
224 225
225 for (i = 0; i < count; i++) 226 for (i = 0; i < count; i++) {
226 desc[i].irq = i; 227 desc[i].irq = i;
227 228 init_alloc_desc_masks(&desc[i], 0, true);
229 }
228 return arch_early_irq_init(); 230 return arch_early_irq_init();
229} 231}
230 232
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index cd0cd8dcb345..b98739af4558 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -98,14 +98,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
98 98
99#ifdef CONFIG_GENERIC_PENDING_IRQ 99#ifdef CONFIG_GENERIC_PENDING_IRQ
100 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 100 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
101 cpumask_copy(&desc->affinity, cpumask); 101 cpumask_copy(desc->affinity, cpumask);
102 desc->chip->set_affinity(irq, cpumask); 102 desc->chip->set_affinity(irq, cpumask);
103 } else { 103 } else {
104 desc->status |= IRQ_MOVE_PENDING; 104 desc->status |= IRQ_MOVE_PENDING;
105 cpumask_copy(&desc->pending_mask, cpumask); 105 cpumask_copy(desc->pending_mask, cpumask);
106 } 106 }
107#else 107#else
108 cpumask_copy(&desc->affinity, cpumask); 108 cpumask_copy(desc->affinity, cpumask);
109 desc->chip->set_affinity(irq, cpumask); 109 desc->chip->set_affinity(irq, cpumask);
110#endif 110#endif
111 desc->status |= IRQ_AFFINITY_SET; 111 desc->status |= IRQ_AFFINITY_SET;
@@ -127,16 +127,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
127 * one of the targets is online. 127 * one of the targets is online.
128 */ 128 */
129 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 129 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
130 if (cpumask_any_and(&desc->affinity, cpu_online_mask) 130 if (cpumask_any_and(desc->affinity, cpu_online_mask)
131 < nr_cpu_ids) 131 < nr_cpu_ids)
132 goto set_affinity; 132 goto set_affinity;
133 else 133 else
134 desc->status &= ~IRQ_AFFINITY_SET; 134 desc->status &= ~IRQ_AFFINITY_SET;
135 } 135 }
136 136
137 cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); 137 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
138set_affinity: 138set_affinity:
139 desc->chip->set_affinity(irq, &desc->affinity); 139 desc->chip->set_affinity(irq, desc->affinity);
140 140
141 return 0; 141 return 0;
142} 142}
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index bd72329e630c..e05ad9be43b7 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -18,7 +18,7 @@ void move_masked_irq(int irq)
18 18
19 desc->status &= ~IRQ_MOVE_PENDING; 19 desc->status &= ~IRQ_MOVE_PENDING;
20 20
21 if (unlikely(cpumask_empty(&desc->pending_mask))) 21 if (unlikely(cpumask_empty(desc->pending_mask)))
22 return; 22 return;
23 23
24 if (!desc->chip->set_affinity) 24 if (!desc->chip->set_affinity)
@@ -38,13 +38,13 @@ void move_masked_irq(int irq)
38 * For correct operation this depends on the caller 38 * For correct operation this depends on the caller
39 * masking the irqs. 39 * masking the irqs.
40 */ 40 */
41 if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) 41 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
42 < nr_cpu_ids)) { 42 < nr_cpu_ids)) {
43 cpumask_and(&desc->affinity, 43 cpumask_and(desc->affinity,
44 &desc->pending_mask, cpu_online_mask); 44 desc->pending_mask, cpu_online_mask);
45 desc->chip->set_affinity(irq, &desc->affinity); 45 desc->chip->set_affinity(irq, desc->affinity);
46 } 46 }
47 cpumask_clear(&desc->pending_mask); 47 cpumask_clear(desc->pending_mask);
48} 48}
49 49
50void move_native_irq(int irq) 50void move_native_irq(int irq)
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index ecf765c6a77a..f001a4ea6414 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -46,6 +46,7 @@ static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
46 desc->cpu = cpu; 46 desc->cpu = cpu;
47 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 47 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
48 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); 48 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
49 init_copy_desc_masks(old_desc, desc);
49 arch_init_copy_chip_data(old_desc, desc, cpu); 50 arch_init_copy_chip_data(old_desc, desc, cpu);
50} 51}
51 52
@@ -76,11 +77,20 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
76 node = cpu_to_node(cpu); 77 node = cpu_to_node(cpu);
77 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 78 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
78 if (!desc) { 79 if (!desc) {
79 printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); 80 printk(KERN_ERR "irq %d: can not get new irq_desc "
81 "for migration.\n", irq);
80 /* still use old one */ 82 /* still use old one */
81 desc = old_desc; 83 desc = old_desc;
82 goto out_unlock; 84 goto out_unlock;
83 } 85 }
86 if (!init_alloc_desc_masks(desc, node, false)) {
87 printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
88 "for migration.\n", irq);
89 /* still use old one */
90 kfree(desc);
91 desc = old_desc;
92 goto out_unlock;
93 }
84 init_copy_one_irq_desc(irq, old_desc, desc, cpu); 94 init_copy_one_irq_desc(irq, old_desc, desc, cpu);
85 95
86 irq_desc_ptrs[irq] = desc; 96 irq_desc_ptrs[irq] = desc;
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index aae3f742bcec..692363dd591f 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
20static int irq_affinity_proc_show(struct seq_file *m, void *v) 20static int irq_affinity_proc_show(struct seq_file *m, void *v)
21{ 21{
22 struct irq_desc *desc = irq_to_desc((long)m->private); 22 struct irq_desc *desc = irq_to_desc((long)m->private);
23 const struct cpumask *mask = &desc->affinity; 23 const struct cpumask *mask = desc->affinity;
24 24
25#ifdef CONFIG_GENERIC_PENDING_IRQ 25#ifdef CONFIG_GENERIC_PENDING_IRQ
26 if (desc->status & IRQ_MOVE_PENDING) 26 if (desc->status & IRQ_MOVE_PENDING)
27 mask = &desc->pending_mask; 27 mask = desc->pending_mask;
28#endif 28#endif
29 seq_cpumask(m, mask); 29 seq_cpumask(m, mask);
30 seq_putc(m, '\n'); 30 seq_putc(m, '\n');