aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorAlexander Gordeev <agordeev@redhat.com>2012-06-07 09:15:44 -0400
committerIngo Molnar <mingo@kernel.org>2012-06-08 05:44:29 -0400
commit8637e38aff14d048b649075114023023a2e80fba (patch)
treecbafb89d56c83e7a74d2297a490989865e95cccd /arch/x86/include
parent1bccd58bfffc5a677051937b332b71f0686187c1 (diff)
x86/apic: Avoid useless scanning thru a cpumask in assign_irq_vector()
In case of static vector allocation domains (i.e. flat) if all vector numbers are exhausted, an attempt to assign a new vector will lead to useless scans through all CPUs in the cpumask, even though it is known that each new pass would fail. Make this corner case less painful by letting report whether the vector allocation domain depends on passed arguments or not and stop scanning early. The same could have been achived by introducing a static flag to the apic operations. But let's allow vector_allocation_domain() have more intelligence here and decide dynamically, in case we would need it in the future. Signed-off-by: Alexander Gordeev <agordeev@redhat.com> Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/20120607131542.GE4759@dhcp-26-207.brq.redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/apic.h8
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index feb2dbdae9ec..e3fecd50d5ca 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -306,7 +306,7 @@ struct apic {
306 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); 306 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
307 unsigned long (*check_apicid_present)(int apicid); 307 unsigned long (*check_apicid_present)(int apicid);
308 308
309 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); 309 bool (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
310 void (*init_apic_ldr)(void); 310 void (*init_apic_ldr)(void);
311 311
312 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); 312 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
@@ -615,7 +615,7 @@ extern unsigned int
615default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 615default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
616 const struct cpumask *andmask); 616 const struct cpumask *andmask);
617 617
618static inline void 618static inline bool
619flat_vector_allocation_domain(int cpu, struct cpumask *retmask) 619flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
620{ 620{
621 /* Careful. Some cpus do not strictly honor the set of cpus 621 /* Careful. Some cpus do not strictly honor the set of cpus
@@ -628,12 +628,14 @@ flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
628 */ 628 */
629 cpumask_clear(retmask); 629 cpumask_clear(retmask);
630 cpumask_bits(retmask)[0] = APIC_ALL_CPUS; 630 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
631 return false;
631} 632}
632 633
633static inline void 634static inline bool
634default_vector_allocation_domain(int cpu, struct cpumask *retmask) 635default_vector_allocation_domain(int cpu, struct cpumask *retmask)
635{ 636{
636 cpumask_copy(retmask, cpumask_of(cpu)); 637 cpumask_copy(retmask, cpumask_of(cpu));
638 return true;
637} 639}
638 640
639static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) 641static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)