aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/apic.h8
-rw-r--r--arch/x86/kernel/apic/apic_noop.c3
-rw-r--r--arch/x86/kernel/apic/io_apic.c15
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c3
-rw-r--r--arch/x86/kernel/vsmp_64.c3
5 files changed, 14 insertions, 18 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index eec240e12091..8bebeb8952fb 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -306,7 +306,7 @@ struct apic {
306 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); 306 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
307 unsigned long (*check_apicid_present)(int apicid); 307 unsigned long (*check_apicid_present)(int apicid);
308 308
309 bool (*vector_allocation_domain)(int cpu, struct cpumask *retmask); 309 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
310 void (*init_apic_ldr)(void); 310 void (*init_apic_ldr)(void);
311 311
312 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); 312 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
@@ -614,7 +614,7 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
614 const struct cpumask *andmask, 614 const struct cpumask *andmask,
615 unsigned int *apicid); 615 unsigned int *apicid);
616 616
617static inline bool 617static inline void
618flat_vector_allocation_domain(int cpu, struct cpumask *retmask) 618flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
619{ 619{
620 /* Careful. Some cpus do not strictly honor the set of cpus 620 /* Careful. Some cpus do not strictly honor the set of cpus
@@ -627,14 +627,12 @@ flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
627 */ 627 */
628 cpumask_clear(retmask); 628 cpumask_clear(retmask);
629 cpumask_bits(retmask)[0] = APIC_ALL_CPUS; 629 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
630 return false;
631} 630}
632 631
633static inline bool 632static inline void
634default_vector_allocation_domain(int cpu, struct cpumask *retmask) 633default_vector_allocation_domain(int cpu, struct cpumask *retmask)
635{ 634{
636 cpumask_copy(retmask, cpumask_of(cpu)); 635 cpumask_copy(retmask, cpumask_of(cpu));
637 return true;
638} 636}
639 637
640static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) 638static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 65c07fc630a1..08c337bc49ff 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -100,12 +100,11 @@ static unsigned long noop_check_apicid_present(int bit)
100 return physid_isset(bit, phys_cpu_present_map); 100 return physid_isset(bit, phys_cpu_present_map);
101} 101}
102 102
103static bool noop_vector_allocation_domain(int cpu, struct cpumask *retmask) 103static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
104{ 104{
105 if (cpu != 0) 105 if (cpu != 0)
106 pr_warning("APIC: Vector allocated for non-BSP cpu\n"); 106 pr_warning("APIC: Vector allocated for non-BSP cpu\n");
107 cpumask_copy(retmask, cpumask_of(cpu)); 107 cpumask_copy(retmask, cpumask_of(cpu));
108 return true;
109} 108}
110 109
111static u32 noop_apic_read(u32 reg) 110static u32 noop_apic_read(u32 reg)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index a951ef7decb1..8a08f09aa505 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1134,12 +1134,13 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1134 1134
1135 /* Only try and allocate irqs on cpus that are present */ 1135 /* Only try and allocate irqs on cpus that are present */
1136 err = -ENOSPC; 1136 err = -ENOSPC;
1137 for_each_cpu_and(cpu, mask, cpu_online_mask) { 1137 cpumask_clear(cfg->old_domain);
1138 cpu = cpumask_first_and(mask, cpu_online_mask);
1139 while (cpu < nr_cpu_ids) {
1138 int new_cpu; 1140 int new_cpu;
1139 int vector, offset; 1141 int vector, offset;
1140 bool more_domains;
1141 1142
1142 more_domains = apic->vector_allocation_domain(cpu, tmp_mask); 1143 apic->vector_allocation_domain(cpu, tmp_mask);
1143 1144
1144 if (cpumask_subset(tmp_mask, cfg->domain)) { 1145 if (cpumask_subset(tmp_mask, cfg->domain)) {
1145 free_cpumask_var(tmp_mask); 1146 free_cpumask_var(tmp_mask);
@@ -1156,10 +1157,10 @@ next:
1156 } 1157 }
1157 1158
1158 if (unlikely(current_vector == vector)) { 1159 if (unlikely(current_vector == vector)) {
1159 if (more_domains) 1160 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
1160 continue; 1161 cpumask_andnot(tmp_mask, mask, cfg->old_domain);
1161 else 1162 cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
1162 break; 1163 continue;
1163 } 1164 }
1164 1165
1165 if (test_bit(vector, used_vectors)) 1166 if (test_bit(vector, used_vectors))
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 943d03fc6fc4..b5d889b5659a 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -212,11 +212,10 @@ static int x2apic_cluster_probe(void)
212/* 212/*
213 * Each x2apic cluster is an allocation domain. 213 * Each x2apic cluster is an allocation domain.
214 */ 214 */
215static bool cluster_vector_allocation_domain(int cpu, struct cpumask *retmask) 215static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask)
216{ 216{
217 cpumask_clear(retmask); 217 cpumask_clear(retmask);
218 cpumask_copy(retmask, per_cpu(cpus_in_cluster, cpu)); 218 cpumask_copy(retmask, per_cpu(cpus_in_cluster, cpu));
219 return true;
220} 219}
221 220
222static struct apic apic_x2apic_cluster = { 221static struct apic apic_x2apic_cluster = {
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index fa5adb7c228c..3f0285ac00fa 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -208,10 +208,9 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
208 * In vSMP, all cpus should be capable of handling interrupts, regardless of 208 * In vSMP, all cpus should be capable of handling interrupts, regardless of
209 * the APIC used. 209 * the APIC used.
210 */ 210 */
211static bool fill_vector_allocation_domain(int cpu, struct cpumask *retmask) 211static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask)
212{ 212{
213 cpumask_setall(retmask); 213 cpumask_setall(retmask);
214 return false;
215} 214}
216 215
217static void vsmp_apic_post_init(void) 216static void vsmp_apic_post_init(void)