diff options
author | Mike Travis <travis@sgi.com> | 2008-12-16 20:33:56 -0500 |
---|---|---|
committer | Mike Travis <travis@sgi.com> | 2008-12-16 20:40:57 -0500 |
commit | 22f65d31b25a320a5246592160bcb102d2791c45 (patch) | |
tree | 1995c354c2583a4137a51b26833054b6e1cfbab1 /arch/x86/kernel/io_apic.c | |
parent | 6eeb7c5a99434596c5953a95baa17d2f085664e3 (diff) |
x86: Update io_apic.c to use new cpumask API
Impact: cleanup, consolidate patches, use new API
Consolidate the following into a single patch to adapt to new
sparseirq code in arch/x86/kernel/io_apic.c, add allocation of
cpumask_var_t's in domain and old_domain, and reduce further
merge conflicts. Only one file (arch/x86/kernel/io_apic.c) is
changed in all of these patches.
0006-x86-io_apic-change-irq_cfg-domain-old_domain-to.patch
0007-x86-io_apic-set_desc_affinity.patch
0008-x86-io_apic-send_cleanup_vector.patch
0009-x86-io_apic-eliminate-remaining-cpumask_ts-from-st.patch
0021-x86-final-cleanups-in-io_apic-to-use-new-cpumask-AP.patch
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Diffstat (limited to 'arch/x86/kernel/io_apic.c')
-rw-r--r-- | arch/x86/kernel/io_apic.c | 302 |
1 files changed, 145 insertions, 157 deletions
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 7f23ce7f5518..60bb8b19f4cd 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -136,8 +136,8 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) | |||
136 | 136 | ||
137 | struct irq_cfg { | 137 | struct irq_cfg { |
138 | struct irq_pin_list *irq_2_pin; | 138 | struct irq_pin_list *irq_2_pin; |
139 | cpumask_t domain; | 139 | cpumask_var_t domain; |
140 | cpumask_t old_domain; | 140 | cpumask_var_t old_domain; |
141 | unsigned move_cleanup_count; | 141 | unsigned move_cleanup_count; |
142 | u8 vector; | 142 | u8 vector; |
143 | u8 move_in_progress : 1; | 143 | u8 move_in_progress : 1; |
@@ -149,22 +149,22 @@ static struct irq_cfg irq_cfgx[] = { | |||
149 | #else | 149 | #else |
150 | static struct irq_cfg irq_cfgx[NR_IRQS] = { | 150 | static struct irq_cfg irq_cfgx[NR_IRQS] = { |
151 | #endif | 151 | #endif |
152 | [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, | 152 | [0] = { .vector = IRQ0_VECTOR, }, |
153 | [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, | 153 | [1] = { .vector = IRQ1_VECTOR, }, |
154 | [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, | 154 | [2] = { .vector = IRQ2_VECTOR, }, |
155 | [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, | 155 | [3] = { .vector = IRQ3_VECTOR, }, |
156 | [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, | 156 | [4] = { .vector = IRQ4_VECTOR, }, |
157 | [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, | 157 | [5] = { .vector = IRQ5_VECTOR, }, |
158 | [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, | 158 | [6] = { .vector = IRQ6_VECTOR, }, |
159 | [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, | 159 | [7] = { .vector = IRQ7_VECTOR, }, |
160 | [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, | 160 | [8] = { .vector = IRQ8_VECTOR, }, |
161 | [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, | 161 | [9] = { .vector = IRQ9_VECTOR, }, |
162 | [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, | 162 | [10] = { .vector = IRQ10_VECTOR, }, |
163 | [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, | 163 | [11] = { .vector = IRQ11_VECTOR, }, |
164 | [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, | 164 | [12] = { .vector = IRQ12_VECTOR, }, |
165 | [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, | 165 | [13] = { .vector = IRQ13_VECTOR, }, |
166 | [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, | 166 | [14] = { .vector = IRQ14_VECTOR, }, |
167 | [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, | 167 | [15] = { .vector = IRQ15_VECTOR, }, |
168 | }; | 168 | }; |
169 | 169 | ||
170 | void __init arch_early_irq_init(void) | 170 | void __init arch_early_irq_init(void) |
@@ -180,6 +180,10 @@ void __init arch_early_irq_init(void) | |||
180 | for (i = 0; i < count; i++) { | 180 | for (i = 0; i < count; i++) { |
181 | desc = irq_to_desc(i); | 181 | desc = irq_to_desc(i); |
182 | desc->chip_data = &cfg[i]; | 182 | desc->chip_data = &cfg[i]; |
183 | alloc_bootmem_cpumask_var(&cfg[i].domain); | ||
184 | alloc_bootmem_cpumask_var(&cfg[i].old_domain); | ||
185 | if (i < NR_IRQS_LEGACY) | ||
186 | cpumask_setall(cfg[i].domain); | ||
183 | } | 187 | } |
184 | } | 188 | } |
185 | 189 | ||
@@ -204,6 +208,20 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu) | |||
204 | node = cpu_to_node(cpu); | 208 | node = cpu_to_node(cpu); |
205 | 209 | ||
206 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 210 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); |
211 | if (cfg) { | ||
212 | /* FIXME: needs alloc_cpumask_var_node() */ | ||
213 | if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) { | ||
214 | kfree(cfg); | ||
215 | cfg = NULL; | ||
216 | } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { | ||
217 | free_cpumask_var(cfg->domain); | ||
218 | kfree(cfg); | ||
219 | cfg = NULL; | ||
220 | } else { | ||
221 | cpumask_clear(cfg->domain); | ||
222 | cpumask_clear(cfg->old_domain); | ||
223 | } | ||
224 | } | ||
207 | printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); | 225 | printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); |
208 | 226 | ||
209 | return cfg; | 227 | return cfg; |
@@ -362,6 +380,26 @@ static void ioapic_mask_entry(int apic, int pin) | |||
362 | } | 380 | } |
363 | 381 | ||
364 | #ifdef CONFIG_SMP | 382 | #ifdef CONFIG_SMP |
383 | static void send_cleanup_vector(struct irq_cfg *cfg) | ||
384 | { | ||
385 | cpumask_var_t cleanup_mask; | ||
386 | |||
387 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | ||
388 | unsigned int i; | ||
389 | cfg->move_cleanup_count = 0; | ||
390 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
391 | cfg->move_cleanup_count++; | ||
392 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
393 | send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); | ||
394 | } else { | ||
395 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | ||
396 | cfg->move_cleanup_count = cpumask_weight(cleanup_mask); | ||
397 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
398 | free_cpumask_var(cleanup_mask); | ||
399 | } | ||
400 | cfg->move_in_progress = 0; | ||
401 | } | ||
402 | |||
365 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) | 403 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) |
366 | { | 404 | { |
367 | int apic, pin; | 405 | int apic, pin; |
@@ -400,40 +438,52 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
400 | static int | 438 | static int |
401 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); | 439 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); |
402 | 440 | ||
403 | static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, | 441 | /* |
404 | const struct cpumask *mask) | 442 | * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid |
443 | * of that, or returns BAD_APICID and leaves desc->affinity untouched. | ||
444 | */ | ||
445 | static unsigned int | ||
446 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) | ||
405 | { | 447 | { |
406 | struct irq_cfg *cfg; | 448 | struct irq_cfg *cfg; |
407 | unsigned long flags; | ||
408 | unsigned int dest; | ||
409 | cpumask_t tmp; | ||
410 | unsigned int irq; | 449 | unsigned int irq; |
411 | 450 | ||
412 | if (!cpumask_intersects(mask, cpu_online_mask)) | 451 | if (!cpumask_intersects(mask, cpu_online_mask)) |
413 | return; | 452 | return BAD_APICID; |
414 | 453 | ||
415 | irq = desc->irq; | 454 | irq = desc->irq; |
416 | cfg = desc->chip_data; | 455 | cfg = desc->chip_data; |
417 | if (assign_irq_vector(irq, cfg, mask)) | 456 | if (assign_irq_vector(irq, cfg, mask)) |
418 | return; | 457 | return BAD_APICID; |
419 | 458 | ||
459 | cpumask_and(&desc->affinity, cfg->domain, mask); | ||
420 | set_extra_move_desc(desc, mask); | 460 | set_extra_move_desc(desc, mask); |
461 | return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); | ||
462 | } | ||
421 | 463 | ||
422 | cpumask_and(&tmp, &cfg->domain, mask); | 464 | static void |
423 | dest = cpu_mask_to_apicid(&tmp); | 465 | set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) |
424 | /* | 466 | { |
425 | * Only the high 8 bits are valid. | 467 | struct irq_cfg *cfg; |
426 | */ | 468 | unsigned long flags; |
427 | dest = SET_APIC_LOGICAL_ID(dest); | 469 | unsigned int dest; |
470 | unsigned int irq; | ||
471 | |||
472 | irq = desc->irq; | ||
473 | cfg = desc->chip_data; | ||
428 | 474 | ||
429 | spin_lock_irqsave(&ioapic_lock, flags); | 475 | spin_lock_irqsave(&ioapic_lock, flags); |
430 | __target_IO_APIC_irq(irq, dest, cfg); | 476 | dest = set_desc_affinity(desc, mask); |
431 | cpumask_copy(&desc->affinity, mask); | 477 | if (dest != BAD_APICID) { |
478 | /* Only the high 8 bits are valid. */ | ||
479 | dest = SET_APIC_LOGICAL_ID(dest); | ||
480 | __target_IO_APIC_irq(irq, dest, cfg); | ||
481 | } | ||
432 | spin_unlock_irqrestore(&ioapic_lock, flags); | 482 | spin_unlock_irqrestore(&ioapic_lock, flags); |
433 | } | 483 | } |
434 | 484 | ||
435 | static void set_ioapic_affinity_irq(unsigned int irq, | 485 | static void |
436 | const struct cpumask *mask) | 486 | set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) |
437 | { | 487 | { |
438 | struct irq_desc *desc; | 488 | struct irq_desc *desc; |
439 | 489 | ||
@@ -1117,26 +1167,32 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | |||
1117 | */ | 1167 | */ |
1118 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; | 1168 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; |
1119 | unsigned int old_vector; | 1169 | unsigned int old_vector; |
1120 | int cpu; | 1170 | int cpu, err; |
1121 | cpumask_t tmp_mask; | 1171 | cpumask_var_t tmp_mask; |
1122 | 1172 | ||
1123 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) | 1173 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) |
1124 | return -EBUSY; | 1174 | return -EBUSY; |
1125 | 1175 | ||
1176 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) | ||
1177 | return -ENOMEM; | ||
1178 | |||
1126 | old_vector = cfg->vector; | 1179 | old_vector = cfg->vector; |
1127 | if (old_vector) { | 1180 | if (old_vector) { |
1128 | cpus_and(tmp_mask, *mask, cpu_online_map); | 1181 | cpumask_and(tmp_mask, mask, cpu_online_mask); |
1129 | cpus_and(tmp_mask, cfg->domain, tmp_mask); | 1182 | cpumask_and(tmp_mask, cfg->domain, tmp_mask); |
1130 | if (!cpus_empty(tmp_mask)) | 1183 | if (!cpumask_empty(tmp_mask)) { |
1184 | free_cpumask_var(tmp_mask); | ||
1131 | return 0; | 1185 | return 0; |
1186 | } | ||
1132 | } | 1187 | } |
1133 | 1188 | ||
1134 | /* Only try and allocate irqs on cpus that are present */ | 1189 | /* Only try and allocate irqs on cpus that are present */ |
1135 | for_each_cpu_and(cpu, mask, &cpu_online_map) { | 1190 | err = -ENOSPC; |
1191 | for_each_cpu_and(cpu, mask, cpu_online_mask) { | ||
1136 | int new_cpu; | 1192 | int new_cpu; |
1137 | int vector, offset; | 1193 | int vector, offset; |
1138 | 1194 | ||
1139 | vector_allocation_domain(cpu, &tmp_mask); | 1195 | vector_allocation_domain(cpu, tmp_mask); |
1140 | 1196 | ||
1141 | vector = current_vector; | 1197 | vector = current_vector; |
1142 | offset = current_offset; | 1198 | offset = current_offset; |
@@ -1156,7 +1212,7 @@ next: | |||
1156 | if (vector == SYSCALL_VECTOR) | 1212 | if (vector == SYSCALL_VECTOR) |
1157 | goto next; | 1213 | goto next; |
1158 | #endif | 1214 | #endif |
1159 | for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map) | 1215 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) |
1160 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 1216 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
1161 | goto next; | 1217 | goto next; |
1162 | /* Found one! */ | 1218 | /* Found one! */ |
@@ -1164,15 +1220,17 @@ next: | |||
1164 | current_offset = offset; | 1220 | current_offset = offset; |
1165 | if (old_vector) { | 1221 | if (old_vector) { |
1166 | cfg->move_in_progress = 1; | 1222 | cfg->move_in_progress = 1; |
1167 | cfg->old_domain = cfg->domain; | 1223 | cpumask_copy(cfg->old_domain, cfg->domain); |
1168 | } | 1224 | } |
1169 | for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map) | 1225 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) |
1170 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 1226 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
1171 | cfg->vector = vector; | 1227 | cfg->vector = vector; |
1172 | cfg->domain = tmp_mask; | 1228 | cpumask_copy(cfg->domain, tmp_mask); |
1173 | return 0; | 1229 | err = 0; |
1230 | break; | ||
1174 | } | 1231 | } |
1175 | return -ENOSPC; | 1232 | free_cpumask_var(tmp_mask); |
1233 | return err; | ||
1176 | } | 1234 | } |
1177 | 1235 | ||
1178 | static int | 1236 | static int |
@@ -1189,23 +1247,20 @@ assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | |||
1189 | 1247 | ||
1190 | static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | 1248 | static void __clear_irq_vector(int irq, struct irq_cfg *cfg) |
1191 | { | 1249 | { |
1192 | cpumask_t mask; | ||
1193 | int cpu, vector; | 1250 | int cpu, vector; |
1194 | 1251 | ||
1195 | BUG_ON(!cfg->vector); | 1252 | BUG_ON(!cfg->vector); |
1196 | 1253 | ||
1197 | vector = cfg->vector; | 1254 | vector = cfg->vector; |
1198 | cpus_and(mask, cfg->domain, cpu_online_map); | 1255 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) |
1199 | for_each_cpu_mask_nr(cpu, mask) | ||
1200 | per_cpu(vector_irq, cpu)[vector] = -1; | 1256 | per_cpu(vector_irq, cpu)[vector] = -1; |
1201 | 1257 | ||
1202 | cfg->vector = 0; | 1258 | cfg->vector = 0; |
1203 | cpus_clear(cfg->domain); | 1259 | cpumask_clear(cfg->domain); |
1204 | 1260 | ||
1205 | if (likely(!cfg->move_in_progress)) | 1261 | if (likely(!cfg->move_in_progress)) |
1206 | return; | 1262 | return; |
1207 | cpus_and(mask, cfg->old_domain, cpu_online_map); | 1263 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { |
1208 | for_each_cpu_mask_nr(cpu, mask) { | ||
1209 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | 1264 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; |
1210 | vector++) { | 1265 | vector++) { |
1211 | if (per_cpu(vector_irq, cpu)[vector] != irq) | 1266 | if (per_cpu(vector_irq, cpu)[vector] != irq) |
@@ -1230,7 +1285,7 @@ void __setup_vector_irq(int cpu) | |||
1230 | if (!desc) | 1285 | if (!desc) |
1231 | continue; | 1286 | continue; |
1232 | cfg = desc->chip_data; | 1287 | cfg = desc->chip_data; |
1233 | if (!cpu_isset(cpu, cfg->domain)) | 1288 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1234 | continue; | 1289 | continue; |
1235 | vector = cfg->vector; | 1290 | vector = cfg->vector; |
1236 | per_cpu(vector_irq, cpu)[vector] = irq; | 1291 | per_cpu(vector_irq, cpu)[vector] = irq; |
@@ -1242,7 +1297,7 @@ void __setup_vector_irq(int cpu) | |||
1242 | continue; | 1297 | continue; |
1243 | 1298 | ||
1244 | cfg = irq_cfg(irq); | 1299 | cfg = irq_cfg(irq); |
1245 | if (!cpu_isset(cpu, cfg->domain)) | 1300 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1246 | per_cpu(vector_irq, cpu)[vector] = -1; | 1301 | per_cpu(vector_irq, cpu)[vector] = -1; |
1247 | } | 1302 | } |
1248 | } | 1303 | } |
@@ -1378,18 +1433,17 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
1378 | { | 1433 | { |
1379 | struct irq_cfg *cfg; | 1434 | struct irq_cfg *cfg; |
1380 | struct IO_APIC_route_entry entry; | 1435 | struct IO_APIC_route_entry entry; |
1381 | cpumask_t mask; | 1436 | unsigned int dest; |
1382 | 1437 | ||
1383 | if (!IO_APIC_IRQ(irq)) | 1438 | if (!IO_APIC_IRQ(irq)) |
1384 | return; | 1439 | return; |
1385 | 1440 | ||
1386 | cfg = desc->chip_data; | 1441 | cfg = desc->chip_data; |
1387 | 1442 | ||
1388 | mask = *TARGET_CPUS; | 1443 | if (assign_irq_vector(irq, cfg, TARGET_CPUS)) |
1389 | if (assign_irq_vector(irq, cfg, &mask)) | ||
1390 | return; | 1444 | return; |
1391 | 1445 | ||
1392 | cpus_and(mask, cfg->domain, mask); | 1446 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
1393 | 1447 | ||
1394 | apic_printk(APIC_VERBOSE,KERN_DEBUG | 1448 | apic_printk(APIC_VERBOSE,KERN_DEBUG |
1395 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " | 1449 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " |
@@ -1399,8 +1453,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
1399 | 1453 | ||
1400 | 1454 | ||
1401 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, | 1455 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, |
1402 | cpu_mask_to_apicid(&mask), trigger, polarity, | 1456 | dest, trigger, polarity, cfg->vector)) { |
1403 | cfg->vector)) { | ||
1404 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", | 1457 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", |
1405 | mp_ioapics[apic].mp_apicid, pin); | 1458 | mp_ioapics[apic].mp_apicid, pin); |
1406 | __clear_irq_vector(irq, cfg); | 1459 | __clear_irq_vector(irq, cfg); |
@@ -2122,7 +2175,7 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2122 | unsigned long flags; | 2175 | unsigned long flags; |
2123 | 2176 | ||
2124 | spin_lock_irqsave(&vector_lock, flags); | 2177 | spin_lock_irqsave(&vector_lock, flags); |
2125 | send_IPI_mask(&cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); | 2178 | send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); |
2126 | spin_unlock_irqrestore(&vector_lock, flags); | 2179 | spin_unlock_irqrestore(&vector_lock, flags); |
2127 | 2180 | ||
2128 | return 1; | 2181 | return 1; |
@@ -2175,15 +2228,13 @@ static void | |||
2175 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | 2228 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) |
2176 | { | 2229 | { |
2177 | struct irq_cfg *cfg; | 2230 | struct irq_cfg *cfg; |
2178 | cpumask_t tmpmask; | ||
2179 | struct irte irte; | 2231 | struct irte irte; |
2180 | int modify_ioapic_rte; | 2232 | int modify_ioapic_rte; |
2181 | unsigned int dest; | 2233 | unsigned int dest; |
2182 | unsigned long flags; | 2234 | unsigned long flags; |
2183 | unsigned int irq; | 2235 | unsigned int irq; |
2184 | 2236 | ||
2185 | cpus_and(tmpmask, *mask, cpu_online_map); | 2237 | if (!cpumask_intersects(mask, cpu_online_mask)) |
2186 | if (cpus_empty(tmpmask)) | ||
2187 | return; | 2238 | return; |
2188 | 2239 | ||
2189 | irq = desc->irq; | 2240 | irq = desc->irq; |
@@ -2196,8 +2247,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | |||
2196 | 2247 | ||
2197 | set_extra_move_desc(desc, mask); | 2248 | set_extra_move_desc(desc, mask); |
2198 | 2249 | ||
2199 | cpus_and(tmpmask, cfg->domain, *mask); | 2250 | dest = cpu_mask_to_apicid_and(cfg->domain, mask); |
2200 | dest = cpu_mask_to_apicid(&tmpmask); | ||
2201 | 2251 | ||
2202 | modify_ioapic_rte = desc->status & IRQ_LEVEL; | 2252 | modify_ioapic_rte = desc->status & IRQ_LEVEL; |
2203 | if (modify_ioapic_rte) { | 2253 | if (modify_ioapic_rte) { |
@@ -2214,14 +2264,10 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | |||
2214 | */ | 2264 | */ |
2215 | modify_irte(irq, &irte); | 2265 | modify_irte(irq, &irte); |
2216 | 2266 | ||
2217 | if (cfg->move_in_progress) { | 2267 | if (cfg->move_in_progress) |
2218 | cpus_and(tmpmask, cfg->old_domain, cpu_online_map); | 2268 | send_cleanup_vector(cfg); |
2219 | cfg->move_cleanup_count = cpus_weight(tmpmask); | ||
2220 | send_IPI_mask(&tmpmask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2221 | cfg->move_in_progress = 0; | ||
2222 | } | ||
2223 | 2269 | ||
2224 | desc->affinity = *mask; | 2270 | cpumask_copy(&desc->affinity, mask); |
2225 | } | 2271 | } |
2226 | 2272 | ||
2227 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | 2273 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) |
@@ -2247,7 +2293,7 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | |||
2247 | 2293 | ||
2248 | ret = 0; | 2294 | ret = 0; |
2249 | desc->status &= ~IRQ_MOVE_PENDING; | 2295 | desc->status &= ~IRQ_MOVE_PENDING; |
2250 | cpus_clear(desc->pending_mask); | 2296 | cpumask_clear(&desc->pending_mask); |
2251 | 2297 | ||
2252 | unmask: | 2298 | unmask: |
2253 | unmask_IO_APIC_irq_desc(desc); | 2299 | unmask_IO_APIC_irq_desc(desc); |
@@ -2333,7 +2379,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2333 | if (!cfg->move_cleanup_count) | 2379 | if (!cfg->move_cleanup_count) |
2334 | goto unlock; | 2380 | goto unlock; |
2335 | 2381 | ||
2336 | if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) | 2382 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2337 | goto unlock; | 2383 | goto unlock; |
2338 | 2384 | ||
2339 | __get_cpu_var(vector_irq)[vector] = -1; | 2385 | __get_cpu_var(vector_irq)[vector] = -1; |
@@ -2356,14 +2402,8 @@ static void irq_complete_move(struct irq_desc **descp) | |||
2356 | 2402 | ||
2357 | vector = ~get_irq_regs()->orig_ax; | 2403 | vector = ~get_irq_regs()->orig_ax; |
2358 | me = smp_processor_id(); | 2404 | me = smp_processor_id(); |
2359 | if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) { | 2405 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2360 | cpumask_t cleanup_mask; | 2406 | send_cleanup_vector(cfg); |
2361 | |||
2362 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | ||
2363 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
2364 | send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2365 | cfg->move_in_progress = 0; | ||
2366 | } | ||
2367 | } | 2407 | } |
2368 | #else | 2408 | #else |
2369 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2409 | static inline void irq_complete_move(struct irq_desc **descp) {} |
@@ -3088,16 +3128,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3088 | struct irq_cfg *cfg; | 3128 | struct irq_cfg *cfg; |
3089 | int err; | 3129 | int err; |
3090 | unsigned dest; | 3130 | unsigned dest; |
3091 | cpumask_t tmp; | ||
3092 | 3131 | ||
3093 | cfg = irq_cfg(irq); | 3132 | cfg = irq_cfg(irq); |
3094 | tmp = *TARGET_CPUS; | 3133 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); |
3095 | err = assign_irq_vector(irq, cfg, &tmp); | ||
3096 | if (err) | 3134 | if (err) |
3097 | return err; | 3135 | return err; |
3098 | 3136 | ||
3099 | cpus_and(tmp, cfg->domain, tmp); | 3137 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
3100 | dest = cpu_mask_to_apicid(&tmp); | ||
3101 | 3138 | ||
3102 | #ifdef CONFIG_INTR_REMAP | 3139 | #ifdef CONFIG_INTR_REMAP |
3103 | if (irq_remapped(irq)) { | 3140 | if (irq_remapped(irq)) { |
@@ -3157,19 +3194,12 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3157 | struct irq_cfg *cfg; | 3194 | struct irq_cfg *cfg; |
3158 | struct msi_msg msg; | 3195 | struct msi_msg msg; |
3159 | unsigned int dest; | 3196 | unsigned int dest; |
3160 | cpumask_t tmp; | ||
3161 | 3197 | ||
3162 | if (!cpumask_intersects(mask, cpu_online_mask)) | 3198 | dest = set_desc_affinity(desc, mask); |
3199 | if (dest == BAD_APICID) | ||
3163 | return; | 3200 | return; |
3164 | 3201 | ||
3165 | cfg = desc->chip_data; | 3202 | cfg = desc->chip_data; |
3166 | if (assign_irq_vector(irq, cfg, mask)) | ||
3167 | return; | ||
3168 | |||
3169 | set_extra_move_desc(desc, mask); | ||
3170 | |||
3171 | cpumask_and(&tmp, &cfg->domain, mask); | ||
3172 | dest = cpu_mask_to_apicid(&tmp); | ||
3173 | 3203 | ||
3174 | read_msi_msg_desc(desc, &msg); | 3204 | read_msi_msg_desc(desc, &msg); |
3175 | 3205 | ||
@@ -3179,7 +3209,6 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3179 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3209 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3180 | 3210 | ||
3181 | write_msi_msg_desc(desc, &msg); | 3211 | write_msi_msg_desc(desc, &msg); |
3182 | cpumask_copy(&desc->affinity, mask); | ||
3183 | } | 3212 | } |
3184 | #ifdef CONFIG_INTR_REMAP | 3213 | #ifdef CONFIG_INTR_REMAP |
3185 | /* | 3214 | /* |
@@ -3192,24 +3221,15 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3192 | struct irq_desc *desc = irq_to_desc(irq); | 3221 | struct irq_desc *desc = irq_to_desc(irq); |
3193 | struct irq_cfg *cfg; | 3222 | struct irq_cfg *cfg; |
3194 | unsigned int dest; | 3223 | unsigned int dest; |
3195 | cpumask_t tmp, cleanup_mask; | ||
3196 | struct irte irte; | 3224 | struct irte irte; |
3197 | 3225 | ||
3198 | if (!cpumask_intersects(mask, cpu_online_mask)) | ||
3199 | return; | ||
3200 | |||
3201 | if (get_irte(irq, &irte)) | 3226 | if (get_irte(irq, &irte)) |
3202 | return; | 3227 | return; |
3203 | 3228 | ||
3204 | cfg = desc->chip_data; | 3229 | dest = set_desc_affinity(desc, mask); |
3205 | if (assign_irq_vector(irq, cfg, mask)) | 3230 | if (dest == BAD_APICID) |
3206 | return; | 3231 | return; |
3207 | 3232 | ||
3208 | set_extra_move_desc(desc, mask); | ||
3209 | |||
3210 | cpumask_and(&tmp, &cfg->domain, mask); | ||
3211 | dest = cpu_mask_to_apicid(&tmp); | ||
3212 | |||
3213 | irte.vector = cfg->vector; | 3233 | irte.vector = cfg->vector; |
3214 | irte.dest_id = IRTE_DEST(dest); | 3234 | irte.dest_id = IRTE_DEST(dest); |
3215 | 3235 | ||
@@ -3223,14 +3243,8 @@ ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | |||
3223 | * at the new destination. So, time to cleanup the previous | 3243 | * at the new destination. So, time to cleanup the previous |
3224 | * vector allocation. | 3244 | * vector allocation. |
3225 | */ | 3245 | */ |
3226 | if (cfg->move_in_progress) { | 3246 | if (cfg->move_in_progress) |
3227 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 3247 | send_cleanup_vector(cfg); |
3228 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
3229 | send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
3230 | cfg->move_in_progress = 0; | ||
3231 | } | ||
3232 | |||
3233 | cpumask_copy(&desc->affinity, mask); | ||
3234 | } | 3248 | } |
3235 | 3249 | ||
3236 | #endif | 3250 | #endif |
@@ -3421,25 +3435,18 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
3421 | 3435 | ||
3422 | #ifdef CONFIG_DMAR | 3436 | #ifdef CONFIG_DMAR |
3423 | #ifdef CONFIG_SMP | 3437 | #ifdef CONFIG_SMP |
3424 | static void dmar_msi_set_affinity(unsigned int irq, const cpumask_t *mask) | 3438 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3425 | { | 3439 | { |
3426 | struct irq_desc *desc = irq_to_desc(irq); | 3440 | struct irq_desc *desc = irq_to_desc(irq); |
3427 | struct irq_cfg *cfg; | 3441 | struct irq_cfg *cfg; |
3428 | struct msi_msg msg; | 3442 | struct msi_msg msg; |
3429 | unsigned int dest; | 3443 | unsigned int dest; |
3430 | cpumask_t tmp; | ||
3431 | 3444 | ||
3432 | if (!cpumask_intersects(mask, cpu_online_mask)) | 3445 | dest = set_desc_affinity(desc, mask); |
3446 | if (dest == BAD_APICID) | ||
3433 | return; | 3447 | return; |
3434 | 3448 | ||
3435 | cfg = desc->chip_data; | 3449 | cfg = desc->chip_data; |
3436 | if (assign_irq_vector(irq, cfg, mask)) | ||
3437 | return; | ||
3438 | |||
3439 | set_extra_move_desc(desc, mask); | ||
3440 | |||
3441 | cpumask_and(&tmp, &cfg->domain, mask); | ||
3442 | dest = cpu_mask_to_apicid(&tmp); | ||
3443 | 3450 | ||
3444 | dmar_msi_read(irq, &msg); | 3451 | dmar_msi_read(irq, &msg); |
3445 | 3452 | ||
@@ -3449,7 +3456,6 @@ static void dmar_msi_set_affinity(unsigned int irq, const cpumask_t *mask) | |||
3449 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3456 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3450 | 3457 | ||
3451 | dmar_msi_write(irq, &msg); | 3458 | dmar_msi_write(irq, &msg); |
3452 | cpumask_copy(&desc->affinity, mask); | ||
3453 | } | 3459 | } |
3454 | 3460 | ||
3455 | #endif /* CONFIG_SMP */ | 3461 | #endif /* CONFIG_SMP */ |
@@ -3483,25 +3489,18 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3483 | #ifdef CONFIG_HPET_TIMER | 3489 | #ifdef CONFIG_HPET_TIMER |
3484 | 3490 | ||
3485 | #ifdef CONFIG_SMP | 3491 | #ifdef CONFIG_SMP |
3486 | static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask) | 3492 | static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3487 | { | 3493 | { |
3488 | struct irq_desc *desc = irq_to_desc(irq); | 3494 | struct irq_desc *desc = irq_to_desc(irq); |
3489 | struct irq_cfg *cfg; | 3495 | struct irq_cfg *cfg; |
3490 | struct msi_msg msg; | 3496 | struct msi_msg msg; |
3491 | unsigned int dest; | 3497 | unsigned int dest; |
3492 | cpumask_t tmp; | ||
3493 | 3498 | ||
3494 | if (!cpumask_intersects(mask, cpu_online_mask)) | 3499 | dest = set_desc_affinity(desc, mask); |
3500 | if (dest == BAD_APICID) | ||
3495 | return; | 3501 | return; |
3496 | 3502 | ||
3497 | cfg = desc->chip_data; | 3503 | cfg = desc->chip_data; |
3498 | if (assign_irq_vector(irq, cfg, mask)) | ||
3499 | return; | ||
3500 | |||
3501 | set_extra_move_desc(desc, mask); | ||
3502 | |||
3503 | cpumask_and(&tmp, &cfg->domain, mask); | ||
3504 | dest = cpu_mask_to_apicid(&tmp); | ||
3505 | 3504 | ||
3506 | hpet_msi_read(irq, &msg); | 3505 | hpet_msi_read(irq, &msg); |
3507 | 3506 | ||
@@ -3511,7 +3510,6 @@ static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask) | |||
3511 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3510 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3512 | 3511 | ||
3513 | hpet_msi_write(irq, &msg); | 3512 | hpet_msi_write(irq, &msg); |
3514 | cpumask_copy(&desc->affinity, mask); | ||
3515 | } | 3513 | } |
3516 | 3514 | ||
3517 | #endif /* CONFIG_SMP */ | 3515 | #endif /* CONFIG_SMP */ |
@@ -3566,27 +3564,19 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
3566 | write_ht_irq_msg(irq, &msg); | 3564 | write_ht_irq_msg(irq, &msg); |
3567 | } | 3565 | } |
3568 | 3566 | ||
3569 | static void set_ht_irq_affinity(unsigned int irq, const cpumask_t *mask) | 3567 | static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3570 | { | 3568 | { |
3571 | struct irq_desc *desc = irq_to_desc(irq); | 3569 | struct irq_desc *desc = irq_to_desc(irq); |
3572 | struct irq_cfg *cfg; | 3570 | struct irq_cfg *cfg; |
3573 | unsigned int dest; | 3571 | unsigned int dest; |
3574 | cpumask_t tmp; | ||
3575 | 3572 | ||
3576 | if (!cpumask_intersects(mask, cpu_online_mask)) | 3573 | dest = set_desc_affinity(desc, mask); |
3574 | if (dest == BAD_APICID) | ||
3577 | return; | 3575 | return; |
3578 | 3576 | ||
3579 | cfg = desc->chip_data; | 3577 | cfg = desc->chip_data; |
3580 | if (assign_irq_vector(irq, cfg, mask)) | ||
3581 | return; | ||
3582 | |||
3583 | set_extra_move_desc(desc, mask); | ||
3584 | |||
3585 | cpumask_and(&tmp, &cfg->domain, mask); | ||
3586 | dest = cpu_mask_to_apicid(&tmp); | ||
3587 | 3578 | ||
3588 | target_ht_irq(irq, dest, cfg->vector); | 3579 | target_ht_irq(irq, dest, cfg->vector); |
3589 | cpumask_copy(&desc->affinity, mask); | ||
3590 | } | 3580 | } |
3591 | 3581 | ||
3592 | #endif | 3582 | #endif |
@@ -3606,7 +3596,6 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3606 | { | 3596 | { |
3607 | struct irq_cfg *cfg; | 3597 | struct irq_cfg *cfg; |
3608 | int err; | 3598 | int err; |
3609 | cpumask_t tmp; | ||
3610 | 3599 | ||
3611 | cfg = irq_cfg(irq); | 3600 | cfg = irq_cfg(irq); |
3612 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); | 3601 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); |
@@ -3614,8 +3603,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3614 | struct ht_irq_msg msg; | 3603 | struct ht_irq_msg msg; |
3615 | unsigned dest; | 3604 | unsigned dest; |
3616 | 3605 | ||
3617 | cpus_and(tmp, cfg->domain, tmp); | 3606 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
3618 | dest = cpu_mask_to_apicid(&tmp); | ||
3619 | 3607 | ||
3620 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | 3608 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); |
3621 | 3609 | ||
@@ -3651,7 +3639,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3651 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | 3639 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, |
3652 | unsigned long mmr_offset) | 3640 | unsigned long mmr_offset) |
3653 | { | 3641 | { |
3654 | const cpumask_t *eligible_cpu = &cpumask_of_cpu(cpu); | 3642 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
3655 | struct irq_cfg *cfg; | 3643 | struct irq_cfg *cfg; |
3656 | int mmr_pnode; | 3644 | int mmr_pnode; |
3657 | unsigned long mmr_value; | 3645 | unsigned long mmr_value; |
@@ -3891,7 +3879,7 @@ void __init setup_ioapic_dest(void) | |||
3891 | int pin, ioapic, irq, irq_entry; | 3879 | int pin, ioapic, irq, irq_entry; |
3892 | struct irq_desc *desc; | 3880 | struct irq_desc *desc; |
3893 | struct irq_cfg *cfg; | 3881 | struct irq_cfg *cfg; |
3894 | const cpumask_t *mask; | 3882 | const struct cpumask *mask; |
3895 | 3883 | ||
3896 | if (skip_ioapic_setup == 1) | 3884 | if (skip_ioapic_setup == 1) |
3897 | return; | 3885 | return; |