aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/apic.h
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2012-06-25 16:38:27 -0400
committerIngo Molnar <mingo@kernel.org>2012-07-06 05:00:21 -0400
commitb39f25a849d7677a7dbf183f2483fd41c201a5ce (patch)
tree87f06060976461b91d5cb196f0ed04fbcea0bb26 /arch/x86/include/asm/apic.h
parentabf71f3066740f3b59c3f731b4b68ed335f7b24d (diff)
x86/apic: Optimize cpu traversal in __assign_irq_vector() using domain membership
Currently __assign_irq_vector() goes through each cpu in the specified mask until it finds a free vector in all the cpu's that are part of the same interrupt domain. We visit all the interrupt domain sibling cpus to reserve the free vector. So, when we fail to find a free vector in an interrupt domain, it is safe to continue our search with a cpu belonging to a new interrupt domain. No need to go through each cpu, if the domain containing that cpu is already visited. Use the irq_cfg's old_domain to track the visited domains and optimize the cpu traversal while finding a free vector in the given cpumask. NOTE: We can also optimize the search by using for_each_cpu() and skip the current cpu, if it is not the first cpu in the mask returned by the vector_allocation_domain(). But re-using the cfg->old_domain to track the visited domains will be slightly faster. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Yinghai Lu <yinghai@kernel.org> Acked-by: Alexander Gordeev <agordeev@redhat.com> Acked-by: Cyrill Gorcunov <gorcunov@openvz.org> Link: http://lkml.kernel.org/r/1340656709-11423-2-git-send-email-suresh.b.siddha@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/apic.h')
-rw-r--r--arch/x86/include/asm/apic.h8
1 files changed, 3 insertions, 5 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index eec240e12091..8bebeb8952fb 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -306,7 +306,7 @@ struct apic {
306 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); 306 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
307 unsigned long (*check_apicid_present)(int apicid); 307 unsigned long (*check_apicid_present)(int apicid);
308 308
309 bool (*vector_allocation_domain)(int cpu, struct cpumask *retmask); 309 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
310 void (*init_apic_ldr)(void); 310 void (*init_apic_ldr)(void);
311 311
312 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); 312 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
@@ -614,7 +614,7 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
614 const struct cpumask *andmask, 614 const struct cpumask *andmask,
615 unsigned int *apicid); 615 unsigned int *apicid);
616 616
617static inline bool 617static inline void
618flat_vector_allocation_domain(int cpu, struct cpumask *retmask) 618flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
619{ 619{
620 /* Careful. Some cpus do not strictly honor the set of cpus 620 /* Careful. Some cpus do not strictly honor the set of cpus
@@ -627,14 +627,12 @@ flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
627 */ 627 */
628 cpumask_clear(retmask); 628 cpumask_clear(retmask);
629 cpumask_bits(retmask)[0] = APIC_ALL_CPUS; 629 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
630 return false;
631} 630}
632 631
633static inline bool 632static inline void
634default_vector_allocation_domain(int cpu, struct cpumask *retmask) 633default_vector_allocation_domain(int cpu, struct cpumask *retmask)
635{ 634{
636 cpumask_copy(retmask, cpumask_of(cpu)); 635 cpumask_copy(retmask, cpumask_of(cpu));
637 return true;
638} 636}
639 637
640static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) 638static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)