aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-12-17 18:21:39 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-18 05:59:24 -0500
commita775a38b1353161a6d7af86b667d6523c12c1a37 (patch)
tree3bb16b2345b1090bacbd06c64ca62298a5fc23d6 /arch
parent9a3d8f735eee90bb5b1351983e946bc637041c01 (diff)
x86: fix cpu_mask_to_apicid_and to include cpu_online_mask
Impact: fix potential APIC crash In determining the destination apicid, there are usually three cpumasks that are considered: the incoming cpumask arg, cfg->domain and the cpu_online_mask. Since we are just introducing the cpu_mask_to_apicid_and function, make sure it includes the cpu_online_mask in it's evaluation. [Added with this patch.] There are two io_apic.c functions that did not previously use the cpu_online_mask: setup_IO_APIC_irq and msi_compose_msg. Both of these simply used cpu_mask_to_apicid(cfg->domain & TARGET_CPUS), and all but one arch (NUMAQ[*]) returns only online cpus in the TARGET_CPUS mask, so the behavior is identical for all cases. [*: NUMAQ bug?] Note that alloc_cpumask_var is only used for the 32-bit cases where it's highly likely that the cpumask set size will be small and therefore CPUMASK_OFFSTACK=n. But if that's not the case, failing the allocate will cause the same return value as the default. Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/bigsmp/apic.h4
-rw-r--r--arch/x86/include/asm/es7000/apic.h40
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h3
-rw-r--r--arch/x86/include/asm/summit/apic.h30
-rw-r--r--arch/x86/kernel/genapic_flat_64.c4
-rw-r--r--arch/x86/kernel/genx2apic_cluster.c4
-rw-r--r--arch/x86/kernel/genx2apic_phys.c4
-rw-r--r--arch/x86/kernel/genx2apic_uv_x.c4
8 files changed, 52 insertions, 41 deletions
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
index 976399debb3f..d8dd9f537911 100644
--- a/arch/x86/include/asm/bigsmp/apic.h
+++ b/arch/x86/include/asm/bigsmp/apic.h
@@ -138,7 +138,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
138 * We're using fixed IRQ delivery, can only return one phys APIC ID. 138 * We're using fixed IRQ delivery, can only return one phys APIC ID.
139 * May as well be the first. 139 * May as well be the first.
140 */ 140 */
141 cpu = cpumask_any_and(cpumask, andmask); 141 for_each_cpu_and(cpu, cpumask, andmask)
142 if (cpumask_test_cpu(cpu, cpu_online_mask))
143 break;
142 if (cpu < nr_cpu_ids) 144 if (cpu < nr_cpu_ids)
143 return cpu_to_logical_apicid(cpu); 145 return cpu_to_logical_apicid(cpu);
144 146
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
index ba8423c5363f..51ac1230294e 100644
--- a/arch/x86/include/asm/es7000/apic.h
+++ b/arch/x86/include/asm/es7000/apic.h
@@ -214,51 +214,47 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
214 return apicid; 214 return apicid;
215} 215}
216 216
217static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, 217
218static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
218 const struct cpumask *andmask) 219 const struct cpumask *andmask)
219{ 220{
220 int num_bits_set; 221 int num_bits_set;
221 int num_bits_set2;
222 int cpus_found = 0; 222 int cpus_found = 0;
223 int cpu; 223 int cpu;
224 int apicid = 0; 224 int apicid = cpu_to_logical_apicid(0);
225 cpumask_var_t cpumask;
226
227 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
228 return apicid;
229
230 cpumask_and(cpumask, inmask, andmask);
231 cpumask_and(cpumask, cpumask, cpu_online_mask);
225 232
226 num_bits_set = cpumask_weight(cpumask); 233 num_bits_set = cpumask_weight(cpumask);
227 num_bits_set2 = cpumask_weight(andmask);
228 num_bits_set = min(num_bits_set, num_bits_set2);
229 /* Return id to all */ 234 /* Return id to all */
230 if (num_bits_set >= nr_cpu_ids) 235 if (num_bits_set == NR_CPUS)
231#if defined CONFIG_ES7000_CLUSTERED_APIC 236 goto exit;
232 return 0xFF;
233#else
234 return cpu_to_logical_apicid(0);
235#endif
236 /* 237 /*
237 * The cpus in the mask must all be on the apic cluster. If are not 238 * The cpus in the mask must all be on the apic cluster. If are not
238 * on the same apicid cluster return default value of TARGET_CPUS. 239 * on the same apicid cluster return default value of TARGET_CPUS.
239 */ 240 */
240 cpu = cpumask_first_and(cpumask, andmask); 241 cpu = cpumask_first(cpumask);
241 apicid = cpu_to_logical_apicid(cpu); 242 apicid = cpu_to_logical_apicid(cpu);
242
243 while (cpus_found < num_bits_set) { 243 while (cpus_found < num_bits_set) {
244 if (cpumask_test_cpu(cpu, cpumask) && 244 if (cpumask_test_cpu(cpu, cpumask)) {
245 cpumask_test_cpu(cpu, andmask)) {
246 int new_apicid = cpu_to_logical_apicid(cpu); 245 int new_apicid = cpu_to_logical_apicid(cpu);
247 if (apicid_cluster(apicid) != 246 if (apicid_cluster(apicid) !=
248 apicid_cluster(new_apicid)) { 247 apicid_cluster(new_apicid)){
249 printk(KERN_WARNING 248 printk ("%s: Not a valid mask!\n", __func__);
250 "%s: Not a valid mask!\n", __func__);
251#if defined CONFIG_ES7000_CLUSTERED_APIC
252 return 0xFF;
253#else
254 return cpu_to_logical_apicid(0); 249 return cpu_to_logical_apicid(0);
255#endif
256 } 250 }
257 apicid = new_apicid; 251 apicid = new_apicid;
258 cpus_found++; 252 cpus_found++;
259 } 253 }
260 cpu++; 254 cpu++;
261 } 255 }
256exit:
257 free_cpumask_var(cpumask);
262 return apicid; 258 return apicid;
263} 259}
264 260
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
index 8863d978cb96..cc09cbbee27e 100644
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ b/arch/x86/include/asm/mach-default/mach_apic.h
@@ -72,8 +72,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
72{ 72{
73 unsigned long mask1 = cpumask_bits(cpumask)[0]; 73 unsigned long mask1 = cpumask_bits(cpumask)[0];
74 unsigned long mask2 = cpumask_bits(andmask)[0]; 74 unsigned long mask2 = cpumask_bits(andmask)[0];
75 unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
75 76
76 return (unsigned int)(mask1 & mask2); 77 return (unsigned int)(mask1 & mask2 & mask3);
77} 78}
78 79
79static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) 80static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
index 651a93849341..99327d1be49f 100644
--- a/arch/x86/include/asm/summit/apic.h
+++ b/arch/x86/include/asm/summit/apic.h
@@ -170,35 +170,37 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
170 return apicid; 170 return apicid;
171} 171}
172 172
173static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, 173static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
174 const struct cpumask *andmask) 174 const struct cpumask *andmask)
175{ 175{
176 int num_bits_set; 176 int num_bits_set;
177 int num_bits_set2;
178 int cpus_found = 0; 177 int cpus_found = 0;
179 int cpu; 178 int cpu;
180 int apicid = 0; 179 int apicid = 0xFF;
180 cpumask_var_t cpumask;
181
182 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
183 return (int) 0xFF;
184
185 cpumask_and(cpumask, inmask, andmask);
186 cpumask_and(cpumask, cpumask, cpu_online_mask);
181 187
182 num_bits_set = cpumask_weight(cpumask); 188 num_bits_set = cpumask_weight(cpumask);
183 num_bits_set2 = cpumask_weight(andmask);
184 num_bits_set = min(num_bits_set, num_bits_set2);
185 /* Return id to all */ 189 /* Return id to all */
186 if (num_bits_set >= nr_cpu_ids) 190 if (num_bits_set == nr_cpu_ids)
187 return 0xFF; 191 goto exit;
188 /* 192 /*
189 * The cpus in the mask must all be on the apic cluster. If are not 193 * The cpus in the mask must all be on the apic cluster. If are not
190 * on the same apicid cluster return default value of TARGET_CPUS. 194 * on the same apicid cluster return default value of TARGET_CPUS.
191 */ 195 */
192 cpu = cpumask_first_and(cpumask, andmask); 196 cpu = cpumask_first(cpumask);
193 apicid = cpu_to_logical_apicid(cpu); 197 apicid = cpu_to_logical_apicid(cpu);
194 while (cpus_found < num_bits_set) { 198 while (cpus_found < num_bits_set) {
195 if (cpumask_test_cpu(cpu, cpumask) 199 if (cpumask_test_cpu(cpu, cpumask)) {
196 && cpumask_test_cpu(cpu, andmask)) {
197 int new_apicid = cpu_to_logical_apicid(cpu); 200 int new_apicid = cpu_to_logical_apicid(cpu);
198 if (apicid_cluster(apicid) != 201 if (apicid_cluster(apicid) !=
199 apicid_cluster(new_apicid)) { 202 apicid_cluster(new_apicid)){
200 printk(KERN_WARNING 203 printk ("%s: Not a valid mask!\n", __func__);
201 "%s: Not a valid mask!\n", __func__);
202 return 0xFF; 204 return 0xFF;
203 } 205 }
204 apicid = apicid | new_apicid; 206 apicid = apicid | new_apicid;
@@ -206,6 +208,8 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
206 } 208 }
207 cpu++; 209 cpu++;
208 } 210 }
211exit:
212 free_cpumask_var(cpumask);
209 return apicid; 213 return apicid;
210} 214}
211 215
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 7fa5f49c2dda..34185488e4fb 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -276,7 +276,9 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
276 * We're using fixed IRQ delivery, can only return one phys APIC ID. 276 * We're using fixed IRQ delivery, can only return one phys APIC ID.
277 * May as well be the first. 277 * May as well be the first.
278 */ 278 */
279 cpu = cpumask_any_and(cpumask, andmask); 279 for_each_cpu_and(cpu, cpumask, andmask)
280 if (cpumask_test_cpu(cpu, cpu_online_mask))
281 break;
280 if (cpu < nr_cpu_ids) 282 if (cpu < nr_cpu_ids)
281 return per_cpu(x86_cpu_to_apicid, cpu); 283 return per_cpu(x86_cpu_to_apicid, cpu);
282 return BAD_APICID; 284 return BAD_APICID;
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c
index 4716a0c9f936..d451c9b9fdff 100644
--- a/arch/x86/kernel/genx2apic_cluster.c
+++ b/arch/x86/kernel/genx2apic_cluster.c
@@ -133,7 +133,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 * We're using fixed IRQ delivery, can only return one phys APIC ID. 133 * We're using fixed IRQ delivery, can only return one phys APIC ID.
134 * May as well be the first. 134 * May as well be the first.
135 */ 135 */
136 cpu = cpumask_any_and(cpumask, andmask); 136 for_each_cpu_and(cpu, cpumask, andmask)
137 if (cpumask_test_cpu(cpu, cpu_online_mask))
138 break;
137 if (cpu < nr_cpu_ids) 139 if (cpu < nr_cpu_ids)
138 return per_cpu(x86_cpu_to_apicid, cpu); 140 return per_cpu(x86_cpu_to_apicid, cpu);
139 return BAD_APICID; 141 return BAD_APICID;
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c
index b255507884f2..62895cf315ff 100644
--- a/arch/x86/kernel/genx2apic_phys.c
+++ b/arch/x86/kernel/genx2apic_phys.c
@@ -132,7 +132,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
132 * We're using fixed IRQ delivery, can only return one phys APIC ID. 132 * We're using fixed IRQ delivery, can only return one phys APIC ID.
133 * May as well be the first. 133 * May as well be the first.
134 */ 134 */
135 cpu = cpumask_any_and(cpumask, andmask); 135 for_each_cpu_and(cpu, cpumask, andmask)
136 if (cpumask_test_cpu(cpu, cpu_online_mask))
137 break;
136 if (cpu < nr_cpu_ids) 138 if (cpu < nr_cpu_ids)
137 return per_cpu(x86_cpu_to_apicid, cpu); 139 return per_cpu(x86_cpu_to_apicid, cpu);
138 return BAD_APICID; 140 return BAD_APICID;
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
index 3984682cd849..0e88be11227d 100644
--- a/arch/x86/kernel/genx2apic_uv_x.c
+++ b/arch/x86/kernel/genx2apic_uv_x.c
@@ -188,7 +188,9 @@ static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
188 * We're using fixed IRQ delivery, can only return one phys APIC ID. 188 * We're using fixed IRQ delivery, can only return one phys APIC ID.
189 * May as well be the first. 189 * May as well be the first.
190 */ 190 */
191 cpu = cpumask_any_and(cpumask, andmask); 191 for_each_cpu_and(cpu, cpumask, andmask)
192 if (cpumask_test_cpu(cpu, cpu_online_mask))
193 break;
192 if (cpu < nr_cpu_ids) 194 if (cpu < nr_cpu_ids)
193 return per_cpu(x86_cpu_to_apicid, cpu); 195 return per_cpu(x86_cpu_to_apicid, cpu);
194 return BAD_APICID; 196 return BAD_APICID;