aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/io_apic.c
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2009-01-11 00:58:08 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-11 13:12:46 -0500
commit7f7ace0cda64c99599c23785f8979a072e118058 (patch)
tree13f2826e64e09ebaef94a3e7fd9c21cfbd31ec3f /arch/x86/kernel/io_apic.c
parentc59765042f53a79a7a65585042ff463b69cb248c (diff)
cpumask: update irq_desc to use cpumask_var_t
Impact: reduce memory usage, use new cpumask API. Replace the affinity and pending_masks with cpumask_var_t's. This adds to the significant size reduction done with the SPARSE_IRQS changes. The added functions (init_alloc_desc_masks & init_copy_desc_masks) are in the include file so they can be inlined (and optimized out for the !CONFIG_CPUMASKS_OFFSTACK case.) [Naming chosen to be consistent with the other init*irq functions, as well as the backwards arg declaration of "from, to" instead of the more common "to, from" standard.] Includes a slight change to the declaration of struct irq_desc to embed the pending_mask within ifdef(CONFIG_SMP) to be consistent with other references, and some small changes to Xen. Tested: sparse/non-sparse/cpumask_offstack/non-cpumask_offstack/nonuma/nosmp on x86_64 Signed-off-by: Mike Travis <travis@sgi.com> Cc: Chris Wright <chrisw@sous-sol.org> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Cc: virtualization@lists.osdl.org Cc: xen-devel@lists.xensource.com Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Diffstat (limited to 'arch/x86/kernel/io_apic.c')
-rw-r--r--arch/x86/kernel/io_apic.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 1c4a1302536..1337eab60ec 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -356,7 +356,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
356 356
357 if (!cfg->move_in_progress) { 357 if (!cfg->move_in_progress) {
358 /* it means that domain is not changed */ 358 /* it means that domain is not changed */
359 if (!cpumask_intersects(&desc->affinity, mask)) 359 if (!cpumask_intersects(desc->affinity, mask))
360 cfg->move_desc_pending = 1; 360 cfg->move_desc_pending = 1;
361 } 361 }
362} 362}
@@ -579,9 +579,9 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
579 if (assign_irq_vector(irq, cfg, mask)) 579 if (assign_irq_vector(irq, cfg, mask))
580 return BAD_APICID; 580 return BAD_APICID;
581 581
582 cpumask_and(&desc->affinity, cfg->domain, mask); 582 cpumask_and(desc->affinity, cfg->domain, mask);
583 set_extra_move_desc(desc, mask); 583 set_extra_move_desc(desc, mask);
584 return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); 584 return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
585} 585}
586 586
587static void 587static void
@@ -2383,7 +2383,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2383 if (cfg->move_in_progress) 2383 if (cfg->move_in_progress)
2384 send_cleanup_vector(cfg); 2384 send_cleanup_vector(cfg);
2385 2385
2386 cpumask_copy(&desc->affinity, mask); 2386 cpumask_copy(desc->affinity, mask);
2387} 2387}
2388 2388
2389static int migrate_irq_remapped_level_desc(struct irq_desc *desc) 2389static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2405,11 +2405,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2405 } 2405 }
2406 2406
2407 /* everthing is clear. we have right of way */ 2407 /* everthing is clear. we have right of way */
2408 migrate_ioapic_irq_desc(desc, &desc->pending_mask); 2408 migrate_ioapic_irq_desc(desc, desc->pending_mask);
2409 2409
2410 ret = 0; 2410 ret = 0;
2411 desc->status &= ~IRQ_MOVE_PENDING; 2411 desc->status &= ~IRQ_MOVE_PENDING;
2412 cpumask_clear(&desc->pending_mask); 2412 cpumask_clear(desc->pending_mask);
2413 2413
2414unmask: 2414unmask:
2415 unmask_IO_APIC_irq_desc(desc); 2415 unmask_IO_APIC_irq_desc(desc);
@@ -2434,7 +2434,7 @@ static void ir_irq_migration(struct work_struct *work)
2434 continue; 2434 continue;
2435 } 2435 }
2436 2436
2437 desc->chip->set_affinity(irq, &desc->pending_mask); 2437 desc->chip->set_affinity(irq, desc->pending_mask);
2438 spin_unlock_irqrestore(&desc->lock, flags); 2438 spin_unlock_irqrestore(&desc->lock, flags);
2439 } 2439 }
2440 } 2440 }
@@ -2448,7 +2448,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2448{ 2448{
2449 if (desc->status & IRQ_LEVEL) { 2449 if (desc->status & IRQ_LEVEL) {
2450 desc->status |= IRQ_MOVE_PENDING; 2450 desc->status |= IRQ_MOVE_PENDING;
2451 cpumask_copy(&desc->pending_mask, mask); 2451 cpumask_copy(desc->pending_mask, mask);
2452 migrate_irq_remapped_level_desc(desc); 2452 migrate_irq_remapped_level_desc(desc);
2453 return; 2453 return;
2454 } 2454 }
@@ -2516,7 +2516,7 @@ static void irq_complete_move(struct irq_desc **descp)
2516 2516
2517 /* domain has not changed, but affinity did */ 2517 /* domain has not changed, but affinity did */
2518 me = smp_processor_id(); 2518 me = smp_processor_id();
2519 if (cpu_isset(me, desc->affinity)) { 2519 if (cpumask_test_cpu(me, desc->affinity)) {
2520 *descp = desc = move_irq_desc(desc, me); 2520 *descp = desc = move_irq_desc(desc, me);
2521 /* get the new one */ 2521 /* get the new one */
2522 cfg = desc->chip_data; 2522 cfg = desc->chip_data;
@@ -4039,7 +4039,7 @@ void __init setup_ioapic_dest(void)
4039 */ 4039 */
4040 if (desc->status & 4040 if (desc->status &
4041 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 4041 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4042 mask = &desc->affinity; 4042 mask = desc->affinity;
4043 else 4043 else
4044 mask = TARGET_CPUS; 4044 mask = TARGET_CPUS;
4045 4045