diff options
Diffstat (limited to 'arch/x86/kernel/io_apic.c')
-rw-r--r-- | arch/x86/kernel/io_apic.c | 390 |
1 files changed, 185 insertions, 205 deletions
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index f6ea94b74da1..3639442aa7a4 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -136,8 +136,8 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) | |||
136 | 136 | ||
137 | struct irq_cfg { | 137 | struct irq_cfg { |
138 | struct irq_pin_list *irq_2_pin; | 138 | struct irq_pin_list *irq_2_pin; |
139 | cpumask_t domain; | 139 | cpumask_var_t domain; |
140 | cpumask_t old_domain; | 140 | cpumask_var_t old_domain; |
141 | unsigned move_cleanup_count; | 141 | unsigned move_cleanup_count; |
142 | u8 vector; | 142 | u8 vector; |
143 | u8 move_in_progress : 1; | 143 | u8 move_in_progress : 1; |
@@ -152,25 +152,25 @@ static struct irq_cfg irq_cfgx[] = { | |||
152 | #else | 152 | #else |
153 | static struct irq_cfg irq_cfgx[NR_IRQS] = { | 153 | static struct irq_cfg irq_cfgx[NR_IRQS] = { |
154 | #endif | 154 | #endif |
155 | [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, | 155 | [0] = { .vector = IRQ0_VECTOR, }, |
156 | [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, | 156 | [1] = { .vector = IRQ1_VECTOR, }, |
157 | [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, | 157 | [2] = { .vector = IRQ2_VECTOR, }, |
158 | [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, | 158 | [3] = { .vector = IRQ3_VECTOR, }, |
159 | [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, | 159 | [4] = { .vector = IRQ4_VECTOR, }, |
160 | [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, | 160 | [5] = { .vector = IRQ5_VECTOR, }, |
161 | [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, | 161 | [6] = { .vector = IRQ6_VECTOR, }, |
162 | [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, | 162 | [7] = { .vector = IRQ7_VECTOR, }, |
163 | [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, | 163 | [8] = { .vector = IRQ8_VECTOR, }, |
164 | [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, | 164 | [9] = { .vector = IRQ9_VECTOR, }, |
165 | [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, | 165 | [10] = { .vector = IRQ10_VECTOR, }, |
166 | [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, | 166 | [11] = { .vector = IRQ11_VECTOR, }, |
167 | [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, | 167 | [12] = { .vector = IRQ12_VECTOR, }, |
168 | [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, | 168 | [13] = { .vector = IRQ13_VECTOR, }, |
169 | [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, | 169 | [14] = { .vector = IRQ14_VECTOR, }, |
170 | [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, | 170 | [15] = { .vector = IRQ15_VECTOR, }, |
171 | }; | 171 | }; |
172 | 172 | ||
173 | void __init arch_early_irq_init(void) | 173 | int __init arch_early_irq_init(void) |
174 | { | 174 | { |
175 | struct irq_cfg *cfg; | 175 | struct irq_cfg *cfg; |
176 | struct irq_desc *desc; | 176 | struct irq_desc *desc; |
@@ -183,7 +183,13 @@ void __init arch_early_irq_init(void) | |||
183 | for (i = 0; i < count; i++) { | 183 | for (i = 0; i < count; i++) { |
184 | desc = irq_to_desc(i); | 184 | desc = irq_to_desc(i); |
185 | desc->chip_data = &cfg[i]; | 185 | desc->chip_data = &cfg[i]; |
186 | alloc_bootmem_cpumask_var(&cfg[i].domain); | ||
187 | alloc_bootmem_cpumask_var(&cfg[i].old_domain); | ||
188 | if (i < NR_IRQS_LEGACY) | ||
189 | cpumask_setall(cfg[i].domain); | ||
186 | } | 190 | } |
191 | |||
192 | return 0; | ||
187 | } | 193 | } |
188 | 194 | ||
189 | #ifdef CONFIG_SPARSE_IRQ | 195 | #ifdef CONFIG_SPARSE_IRQ |
@@ -207,12 +213,26 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu) | |||
207 | node = cpu_to_node(cpu); | 213 | node = cpu_to_node(cpu); |
208 | 214 | ||
209 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 215 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); |
216 | if (cfg) { | ||
217 | if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { | ||
218 | kfree(cfg); | ||
219 | cfg = NULL; | ||
220 | } else if (!alloc_cpumask_var_node(&cfg->old_domain, | ||
221 | GFP_ATOMIC, node)) { | ||
222 | free_cpumask_var(cfg->domain); | ||
223 | kfree(cfg); | ||
224 | cfg = NULL; | ||
225 | } else { | ||
226 | cpumask_clear(cfg->domain); | ||
227 | cpumask_clear(cfg->old_domain); | ||
228 | } | ||
229 | } | ||
210 | printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); | 230 | printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); |
211 | 231 | ||
212 | return cfg; | 232 | return cfg; |
213 | } | 233 | } |
214 | 234 | ||
215 | void arch_init_chip_data(struct irq_desc *desc, int cpu) | 235 | int arch_init_chip_data(struct irq_desc *desc, int cpu) |
216 | { | 236 | { |
217 | struct irq_cfg *cfg; | 237 | struct irq_cfg *cfg; |
218 | 238 | ||
@@ -224,6 +244,8 @@ void arch_init_chip_data(struct irq_desc *desc, int cpu) | |||
224 | BUG_ON(1); | 244 | BUG_ON(1); |
225 | } | 245 | } |
226 | } | 246 | } |
247 | |||
248 | return 0; | ||
227 | } | 249 | } |
228 | 250 | ||
229 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | 251 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC |
@@ -329,13 +351,14 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | |||
329 | } | 351 | } |
330 | } | 352 | } |
331 | 353 | ||
332 | static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) | 354 | static void |
355 | set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
333 | { | 356 | { |
334 | struct irq_cfg *cfg = desc->chip_data; | 357 | struct irq_cfg *cfg = desc->chip_data; |
335 | 358 | ||
336 | if (!cfg->move_in_progress) { | 359 | if (!cfg->move_in_progress) { |
337 | /* it means that domain is not changed */ | 360 | /* it means that domain is not changed */ |
338 | if (!cpus_intersects(desc->affinity, mask)) | 361 | if (!cpumask_intersects(&desc->affinity, mask)) |
339 | cfg->move_desc_pending = 1; | 362 | cfg->move_desc_pending = 1; |
340 | } | 363 | } |
341 | } | 364 | } |
@@ -350,7 +373,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq) | |||
350 | #endif | 373 | #endif |
351 | 374 | ||
352 | #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC | 375 | #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC |
353 | static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) | 376 | static inline void |
377 | set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
354 | { | 378 | { |
355 | } | 379 | } |
356 | #endif | 380 | #endif |
@@ -481,6 +505,26 @@ static void ioapic_mask_entry(int apic, int pin) | |||
481 | } | 505 | } |
482 | 506 | ||
483 | #ifdef CONFIG_SMP | 507 | #ifdef CONFIG_SMP |
508 | static void send_cleanup_vector(struct irq_cfg *cfg) | ||
509 | { | ||
510 | cpumask_var_t cleanup_mask; | ||
511 | |||
512 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | ||
513 | unsigned int i; | ||
514 | cfg->move_cleanup_count = 0; | ||
515 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
516 | cfg->move_cleanup_count++; | ||
517 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
518 | send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); | ||
519 | } else { | ||
520 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | ||
521 | cfg->move_cleanup_count = cpumask_weight(cleanup_mask); | ||
522 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
523 | free_cpumask_var(cleanup_mask); | ||
524 | } | ||
525 | cfg->move_in_progress = 0; | ||
526 | } | ||
527 | |||
484 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) | 528 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) |
485 | { | 529 | { |
486 | int apic, pin; | 530 | int apic, pin; |
@@ -516,41 +560,55 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
516 | } | 560 | } |
517 | } | 561 | } |
518 | 562 | ||
519 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); | 563 | static int |
564 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); | ||
520 | 565 | ||
521 | static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | 566 | /* |
567 | * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid | ||
568 | * of that, or returns BAD_APICID and leaves desc->affinity untouched. | ||
569 | */ | ||
570 | static unsigned int | ||
571 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) | ||
522 | { | 572 | { |
523 | struct irq_cfg *cfg; | 573 | struct irq_cfg *cfg; |
524 | unsigned long flags; | ||
525 | unsigned int dest; | ||
526 | cpumask_t tmp; | ||
527 | unsigned int irq; | 574 | unsigned int irq; |
528 | 575 | ||
529 | cpus_and(tmp, mask, cpu_online_map); | 576 | if (!cpumask_intersects(mask, cpu_online_mask)) |
530 | if (cpus_empty(tmp)) | 577 | return BAD_APICID; |
531 | return; | ||
532 | 578 | ||
533 | irq = desc->irq; | 579 | irq = desc->irq; |
534 | cfg = desc->chip_data; | 580 | cfg = desc->chip_data; |
535 | if (assign_irq_vector(irq, cfg, mask)) | 581 | if (assign_irq_vector(irq, cfg, mask)) |
536 | return; | 582 | return BAD_APICID; |
537 | 583 | ||
584 | cpumask_and(&desc->affinity, cfg->domain, mask); | ||
538 | set_extra_move_desc(desc, mask); | 585 | set_extra_move_desc(desc, mask); |
586 | return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); | ||
587 | } | ||
539 | 588 | ||
540 | cpus_and(tmp, cfg->domain, mask); | 589 | static void |
541 | dest = cpu_mask_to_apicid(tmp); | 590 | set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) |
542 | /* | 591 | { |
543 | * Only the high 8 bits are valid. | 592 | struct irq_cfg *cfg; |
544 | */ | 593 | unsigned long flags; |
545 | dest = SET_APIC_LOGICAL_ID(dest); | 594 | unsigned int dest; |
595 | unsigned int irq; | ||
596 | |||
597 | irq = desc->irq; | ||
598 | cfg = desc->chip_data; | ||
546 | 599 | ||
547 | spin_lock_irqsave(&ioapic_lock, flags); | 600 | spin_lock_irqsave(&ioapic_lock, flags); |
548 | __target_IO_APIC_irq(irq, dest, cfg); | 601 | dest = set_desc_affinity(desc, mask); |
549 | desc->affinity = mask; | 602 | if (dest != BAD_APICID) { |
603 | /* Only the high 8 bits are valid. */ | ||
604 | dest = SET_APIC_LOGICAL_ID(dest); | ||
605 | __target_IO_APIC_irq(irq, dest, cfg); | ||
606 | } | ||
550 | spin_unlock_irqrestore(&ioapic_lock, flags); | 607 | spin_unlock_irqrestore(&ioapic_lock, flags); |
551 | } | 608 | } |
552 | 609 | ||
553 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 610 | static void |
611 | set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | ||
554 | { | 612 | { |
555 | struct irq_desc *desc; | 613 | struct irq_desc *desc; |
556 | 614 | ||
@@ -648,7 +706,7 @@ static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) | |||
648 | } | 706 | } |
649 | 707 | ||
650 | #ifdef CONFIG_X86_64 | 708 | #ifdef CONFIG_X86_64 |
651 | void io_apic_sync(struct irq_pin_list *entry) | 709 | static void io_apic_sync(struct irq_pin_list *entry) |
652 | { | 710 | { |
653 | /* | 711 | /* |
654 | * Synchronize the IO-APIC and the CPU by doing | 712 | * Synchronize the IO-APIC and the CPU by doing |
@@ -1218,7 +1276,8 @@ void unlock_vector_lock(void) | |||
1218 | spin_unlock(&vector_lock); | 1276 | spin_unlock(&vector_lock); |
1219 | } | 1277 | } |
1220 | 1278 | ||
1221 | static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | 1279 | static int |
1280 | __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1222 | { | 1281 | { |
1223 | /* | 1282 | /* |
1224 | * NOTE! The local APIC isn't very good at handling | 1283 | * NOTE! The local APIC isn't very good at handling |
@@ -1233,49 +1292,49 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | |||
1233 | */ | 1292 | */ |
1234 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; | 1293 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; |
1235 | unsigned int old_vector; | 1294 | unsigned int old_vector; |
1236 | int cpu; | 1295 | int cpu, err; |
1296 | cpumask_var_t tmp_mask; | ||
1237 | 1297 | ||
1238 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) | 1298 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) |
1239 | return -EBUSY; | 1299 | return -EBUSY; |
1240 | 1300 | ||
1241 | /* Only try and allocate irqs on cpus that are present */ | 1301 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) |
1242 | cpus_and(mask, mask, cpu_online_map); | 1302 | return -ENOMEM; |
1243 | 1303 | ||
1244 | old_vector = cfg->vector; | 1304 | old_vector = cfg->vector; |
1245 | if (old_vector) { | 1305 | if (old_vector) { |
1246 | cpumask_t tmp; | 1306 | cpumask_and(tmp_mask, mask, cpu_online_mask); |
1247 | cpus_and(tmp, cfg->domain, mask); | 1307 | cpumask_and(tmp_mask, cfg->domain, tmp_mask); |
1248 | if (!cpus_empty(tmp)) | 1308 | if (!cpumask_empty(tmp_mask)) { |
1309 | free_cpumask_var(tmp_mask); | ||
1249 | return 0; | 1310 | return 0; |
1311 | } | ||
1250 | } | 1312 | } |
1251 | 1313 | ||
1252 | for_each_cpu_mask_nr(cpu, mask) { | 1314 | /* Only try and allocate irqs on cpus that are present */ |
1253 | cpumask_t domain, new_mask; | 1315 | err = -ENOSPC; |
1316 | for_each_cpu_and(cpu, mask, cpu_online_mask) { | ||
1254 | int new_cpu; | 1317 | int new_cpu; |
1255 | int vector, offset; | 1318 | int vector, offset; |
1256 | 1319 | ||
1257 | domain = vector_allocation_domain(cpu); | 1320 | vector_allocation_domain(cpu, tmp_mask); |
1258 | cpus_and(new_mask, domain, cpu_online_map); | ||
1259 | 1321 | ||
1260 | vector = current_vector; | 1322 | vector = current_vector; |
1261 | offset = current_offset; | 1323 | offset = current_offset; |
1262 | next: | 1324 | next: |
1263 | vector += 8; | 1325 | vector += 8; |
1264 | if (vector >= first_system_vector) { | 1326 | if (vector >= first_system_vector) { |
1265 | /* If we run out of vectors on large boxen, must share them. */ | 1327 | /* If out of vectors on large boxen, must share them. */ |
1266 | offset = (offset + 1) % 8; | 1328 | offset = (offset + 1) % 8; |
1267 | vector = FIRST_DEVICE_VECTOR + offset; | 1329 | vector = FIRST_DEVICE_VECTOR + offset; |
1268 | } | 1330 | } |
1269 | if (unlikely(current_vector == vector)) | 1331 | if (unlikely(current_vector == vector)) |
1270 | continue; | 1332 | continue; |
1271 | #ifdef CONFIG_X86_64 | 1333 | |
1272 | if (vector == IA32_SYSCALL_VECTOR) | 1334 | if (test_bit(vector, used_vectors)) |
1273 | goto next; | ||
1274 | #else | ||
1275 | if (vector == SYSCALL_VECTOR) | ||
1276 | goto next; | 1335 | goto next; |
1277 | #endif | 1336 | |
1278 | for_each_cpu_mask_nr(new_cpu, new_mask) | 1337 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) |
1279 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 1338 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
1280 | goto next; | 1339 | goto next; |
1281 | /* Found one! */ | 1340 | /* Found one! */ |
@@ -1283,18 +1342,21 @@ next: | |||
1283 | current_offset = offset; | 1342 | current_offset = offset; |
1284 | if (old_vector) { | 1343 | if (old_vector) { |
1285 | cfg->move_in_progress = 1; | 1344 | cfg->move_in_progress = 1; |
1286 | cfg->old_domain = cfg->domain; | 1345 | cpumask_copy(cfg->old_domain, cfg->domain); |
1287 | } | 1346 | } |
1288 | for_each_cpu_mask_nr(new_cpu, new_mask) | 1347 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) |
1289 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 1348 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
1290 | cfg->vector = vector; | 1349 | cfg->vector = vector; |
1291 | cfg->domain = domain; | 1350 | cpumask_copy(cfg->domain, tmp_mask); |
1292 | return 0; | 1351 | err = 0; |
1352 | break; | ||
1293 | } | 1353 | } |
1294 | return -ENOSPC; | 1354 | free_cpumask_var(tmp_mask); |
1355 | return err; | ||
1295 | } | 1356 | } |
1296 | 1357 | ||
1297 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | 1358 | static int |
1359 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1298 | { | 1360 | { |
1299 | int err; | 1361 | int err; |
1300 | unsigned long flags; | 1362 | unsigned long flags; |
@@ -1307,23 +1369,20 @@ static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | |||
1307 | 1369 | ||
1308 | static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | 1370 | static void __clear_irq_vector(int irq, struct irq_cfg *cfg) |
1309 | { | 1371 | { |
1310 | cpumask_t mask; | ||
1311 | int cpu, vector; | 1372 | int cpu, vector; |
1312 | 1373 | ||
1313 | BUG_ON(!cfg->vector); | 1374 | BUG_ON(!cfg->vector); |
1314 | 1375 | ||
1315 | vector = cfg->vector; | 1376 | vector = cfg->vector; |
1316 | cpus_and(mask, cfg->domain, cpu_online_map); | 1377 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) |
1317 | for_each_cpu_mask_nr(cpu, mask) | ||
1318 | per_cpu(vector_irq, cpu)[vector] = -1; | 1378 | per_cpu(vector_irq, cpu)[vector] = -1; |
1319 | 1379 | ||
1320 | cfg->vector = 0; | 1380 | cfg->vector = 0; |
1321 | cpus_clear(cfg->domain); | 1381 | cpumask_clear(cfg->domain); |
1322 | 1382 | ||
1323 | if (likely(!cfg->move_in_progress)) | 1383 | if (likely(!cfg->move_in_progress)) |
1324 | return; | 1384 | return; |
1325 | cpus_and(mask, cfg->old_domain, cpu_online_map); | 1385 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { |
1326 | for_each_cpu_mask_nr(cpu, mask) { | ||
1327 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | 1386 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; |
1328 | vector++) { | 1387 | vector++) { |
1329 | if (per_cpu(vector_irq, cpu)[vector] != irq) | 1388 | if (per_cpu(vector_irq, cpu)[vector] != irq) |
@@ -1345,10 +1404,8 @@ void __setup_vector_irq(int cpu) | |||
1345 | 1404 | ||
1346 | /* Mark the inuse vectors */ | 1405 | /* Mark the inuse vectors */ |
1347 | for_each_irq_desc(irq, desc) { | 1406 | for_each_irq_desc(irq, desc) { |
1348 | if (!desc) | ||
1349 | continue; | ||
1350 | cfg = desc->chip_data; | 1407 | cfg = desc->chip_data; |
1351 | if (!cpu_isset(cpu, cfg->domain)) | 1408 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1352 | continue; | 1409 | continue; |
1353 | vector = cfg->vector; | 1410 | vector = cfg->vector; |
1354 | per_cpu(vector_irq, cpu)[vector] = irq; | 1411 | per_cpu(vector_irq, cpu)[vector] = irq; |
@@ -1360,7 +1417,7 @@ void __setup_vector_irq(int cpu) | |||
1360 | continue; | 1417 | continue; |
1361 | 1418 | ||
1362 | cfg = irq_cfg(irq); | 1419 | cfg = irq_cfg(irq); |
1363 | if (!cpu_isset(cpu, cfg->domain)) | 1420 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1364 | per_cpu(vector_irq, cpu)[vector] = -1; | 1421 | per_cpu(vector_irq, cpu)[vector] = -1; |
1365 | } | 1422 | } |
1366 | } | 1423 | } |
@@ -1496,18 +1553,17 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
1496 | { | 1553 | { |
1497 | struct irq_cfg *cfg; | 1554 | struct irq_cfg *cfg; |
1498 | struct IO_APIC_route_entry entry; | 1555 | struct IO_APIC_route_entry entry; |
1499 | cpumask_t mask; | 1556 | unsigned int dest; |
1500 | 1557 | ||
1501 | if (!IO_APIC_IRQ(irq)) | 1558 | if (!IO_APIC_IRQ(irq)) |
1502 | return; | 1559 | return; |
1503 | 1560 | ||
1504 | cfg = desc->chip_data; | 1561 | cfg = desc->chip_data; |
1505 | 1562 | ||
1506 | mask = TARGET_CPUS; | 1563 | if (assign_irq_vector(irq, cfg, TARGET_CPUS)) |
1507 | if (assign_irq_vector(irq, cfg, mask)) | ||
1508 | return; | 1564 | return; |
1509 | 1565 | ||
1510 | cpus_and(mask, cfg->domain, mask); | 1566 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
1511 | 1567 | ||
1512 | apic_printk(APIC_VERBOSE,KERN_DEBUG | 1568 | apic_printk(APIC_VERBOSE,KERN_DEBUG |
1513 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " | 1569 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " |
@@ -1517,8 +1573,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
1517 | 1573 | ||
1518 | 1574 | ||
1519 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, | 1575 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, |
1520 | cpu_mask_to_apicid(mask), trigger, polarity, | 1576 | dest, trigger, polarity, cfg->vector)) { |
1521 | cfg->vector)) { | ||
1522 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", | 1577 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", |
1523 | mp_ioapics[apic].mp_apicid, pin); | 1578 | mp_ioapics[apic].mp_apicid, pin); |
1524 | __clear_irq_vector(irq, cfg); | 1579 | __clear_irq_vector(irq, cfg); |
@@ -1730,8 +1785,6 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
1730 | for_each_irq_desc(irq, desc) { | 1785 | for_each_irq_desc(irq, desc) { |
1731 | struct irq_pin_list *entry; | 1786 | struct irq_pin_list *entry; |
1732 | 1787 | ||
1733 | if (!desc) | ||
1734 | continue; | ||
1735 | cfg = desc->chip_data; | 1788 | cfg = desc->chip_data; |
1736 | entry = cfg->irq_2_pin; | 1789 | entry = cfg->irq_2_pin; |
1737 | if (!entry) | 1790 | if (!entry) |
@@ -2240,7 +2293,7 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2240 | unsigned long flags; | 2293 | unsigned long flags; |
2241 | 2294 | ||
2242 | spin_lock_irqsave(&vector_lock, flags); | 2295 | spin_lock_irqsave(&vector_lock, flags); |
2243 | send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); | 2296 | send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); |
2244 | spin_unlock_irqrestore(&vector_lock, flags); | 2297 | spin_unlock_irqrestore(&vector_lock, flags); |
2245 | 2298 | ||
2246 | return 1; | 2299 | return 1; |
@@ -2289,18 +2342,17 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); | |||
2289 | * as simple as edge triggered migration and we can do the irq migration | 2342 | * as simple as edge triggered migration and we can do the irq migration |
2290 | * with a simple atomic update to IO-APIC RTE. | 2343 | * with a simple atomic update to IO-APIC RTE. |
2291 | */ | 2344 | */ |
2292 | static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | 2345 | static void |
2346 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
2293 | { | 2347 | { |
2294 | struct irq_cfg *cfg; | 2348 | struct irq_cfg *cfg; |
2295 | cpumask_t tmp, cleanup_mask; | ||
2296 | struct irte irte; | 2349 | struct irte irte; |
2297 | int modify_ioapic_rte; | 2350 | int modify_ioapic_rte; |
2298 | unsigned int dest; | 2351 | unsigned int dest; |
2299 | unsigned long flags; | 2352 | unsigned long flags; |
2300 | unsigned int irq; | 2353 | unsigned int irq; |
2301 | 2354 | ||
2302 | cpus_and(tmp, mask, cpu_online_map); | 2355 | if (!cpumask_intersects(mask, cpu_online_mask)) |
2303 | if (cpus_empty(tmp)) | ||
2304 | return; | 2356 | return; |
2305 | 2357 | ||
2306 | irq = desc->irq; | 2358 | irq = desc->irq; |
@@ -2313,8 +2365,7 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
2313 | 2365 | ||
2314 | set_extra_move_desc(desc, mask); | 2366 | set_extra_move_desc(desc, mask); |
2315 | 2367 | ||
2316 | cpus_and(tmp, cfg->domain, mask); | 2368 | dest = cpu_mask_to_apicid_and(cfg->domain, mask); |
2317 | dest = cpu_mask_to_apicid(tmp); | ||
2318 | 2369 | ||
2319 | modify_ioapic_rte = desc->status & IRQ_LEVEL; | 2370 | modify_ioapic_rte = desc->status & IRQ_LEVEL; |
2320 | if (modify_ioapic_rte) { | 2371 | if (modify_ioapic_rte) { |
@@ -2331,14 +2382,10 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
2331 | */ | 2382 | */ |
2332 | modify_irte(irq, &irte); | 2383 | modify_irte(irq, &irte); |
2333 | 2384 | ||
2334 | if (cfg->move_in_progress) { | 2385 | if (cfg->move_in_progress) |
2335 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 2386 | send_cleanup_vector(cfg); |
2336 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
2337 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2338 | cfg->move_in_progress = 0; | ||
2339 | } | ||
2340 | 2387 | ||
2341 | desc->affinity = mask; | 2388 | cpumask_copy(&desc->affinity, mask); |
2342 | } | 2389 | } |
2343 | 2390 | ||
2344 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | 2391 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) |
@@ -2360,11 +2407,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | |||
2360 | } | 2407 | } |
2361 | 2408 | ||
2362 | /* everthing is clear. we have right of way */ | 2409 | /* everthing is clear. we have right of way */ |
2363 | migrate_ioapic_irq_desc(desc, desc->pending_mask); | 2410 | migrate_ioapic_irq_desc(desc, &desc->pending_mask); |
2364 | 2411 | ||
2365 | ret = 0; | 2412 | ret = 0; |
2366 | desc->status &= ~IRQ_MOVE_PENDING; | 2413 | desc->status &= ~IRQ_MOVE_PENDING; |
2367 | cpus_clear(desc->pending_mask); | 2414 | cpumask_clear(&desc->pending_mask); |
2368 | 2415 | ||
2369 | unmask: | 2416 | unmask: |
2370 | unmask_IO_APIC_irq_desc(desc); | 2417 | unmask_IO_APIC_irq_desc(desc); |
@@ -2378,9 +2425,6 @@ static void ir_irq_migration(struct work_struct *work) | |||
2378 | struct irq_desc *desc; | 2425 | struct irq_desc *desc; |
2379 | 2426 | ||
2380 | for_each_irq_desc(irq, desc) { | 2427 | for_each_irq_desc(irq, desc) { |
2381 | if (!desc) | ||
2382 | continue; | ||
2383 | |||
2384 | if (desc->status & IRQ_MOVE_PENDING) { | 2428 | if (desc->status & IRQ_MOVE_PENDING) { |
2385 | unsigned long flags; | 2429 | unsigned long flags; |
2386 | 2430 | ||
@@ -2392,7 +2436,7 @@ static void ir_irq_migration(struct work_struct *work) | |||
2392 | continue; | 2436 | continue; |
2393 | } | 2437 | } |
2394 | 2438 | ||
2395 | desc->chip->set_affinity(irq, desc->pending_mask); | 2439 | desc->chip->set_affinity(irq, &desc->pending_mask); |
2396 | spin_unlock_irqrestore(&desc->lock, flags); | 2440 | spin_unlock_irqrestore(&desc->lock, flags); |
2397 | } | 2441 | } |
2398 | } | 2442 | } |
@@ -2401,18 +2445,20 @@ static void ir_irq_migration(struct work_struct *work) | |||
2401 | /* | 2445 | /* |
2402 | * Migrates the IRQ destination in the process context. | 2446 | * Migrates the IRQ destination in the process context. |
2403 | */ | 2447 | */ |
2404 | static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | 2448 | static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, |
2449 | const struct cpumask *mask) | ||
2405 | { | 2450 | { |
2406 | if (desc->status & IRQ_LEVEL) { | 2451 | if (desc->status & IRQ_LEVEL) { |
2407 | desc->status |= IRQ_MOVE_PENDING; | 2452 | desc->status |= IRQ_MOVE_PENDING; |
2408 | desc->pending_mask = mask; | 2453 | cpumask_copy(&desc->pending_mask, mask); |
2409 | migrate_irq_remapped_level_desc(desc); | 2454 | migrate_irq_remapped_level_desc(desc); |
2410 | return; | 2455 | return; |
2411 | } | 2456 | } |
2412 | 2457 | ||
2413 | migrate_ioapic_irq_desc(desc, mask); | 2458 | migrate_ioapic_irq_desc(desc, mask); |
2414 | } | 2459 | } |
2415 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 2460 | static void set_ir_ioapic_affinity_irq(unsigned int irq, |
2461 | const struct cpumask *mask) | ||
2416 | { | 2462 | { |
2417 | struct irq_desc *desc = irq_to_desc(irq); | 2463 | struct irq_desc *desc = irq_to_desc(irq); |
2418 | 2464 | ||
@@ -2447,7 +2493,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2447 | if (!cfg->move_cleanup_count) | 2493 | if (!cfg->move_cleanup_count) |
2448 | goto unlock; | 2494 | goto unlock; |
2449 | 2495 | ||
2450 | if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) | 2496 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2451 | goto unlock; | 2497 | goto unlock; |
2452 | 2498 | ||
2453 | __get_cpu_var(vector_irq)[vector] = -1; | 2499 | __get_cpu_var(vector_irq)[vector] = -1; |
@@ -2484,20 +2530,14 @@ static void irq_complete_move(struct irq_desc **descp) | |||
2484 | 2530 | ||
2485 | vector = ~get_irq_regs()->orig_ax; | 2531 | vector = ~get_irq_regs()->orig_ax; |
2486 | me = smp_processor_id(); | 2532 | me = smp_processor_id(); |
2487 | if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) { | ||
2488 | cpumask_t cleanup_mask; | ||
2489 | |||
2490 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | 2533 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC |
2491 | *descp = desc = move_irq_desc(desc, me); | 2534 | *descp = desc = move_irq_desc(desc, me); |
2492 | /* get the new one */ | 2535 | /* get the new one */ |
2493 | cfg = desc->chip_data; | 2536 | cfg = desc->chip_data; |
2494 | #endif | 2537 | #endif |
2495 | 2538 | ||
2496 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 2539 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2497 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | 2540 | send_cleanup_vector(cfg); |
2498 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2499 | cfg->move_in_progress = 0; | ||
2500 | } | ||
2501 | } | 2541 | } |
2502 | #else | 2542 | #else |
2503 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2543 | static inline void irq_complete_move(struct irq_desc **descp) {} |
@@ -2670,9 +2710,6 @@ static inline void init_IO_APIC_traps(void) | |||
2670 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | 2710 | * 0x80, because int 0x80 is hm, kind of importantish. ;) |
2671 | */ | 2711 | */ |
2672 | for_each_irq_desc(irq, desc) { | 2712 | for_each_irq_desc(irq, desc) { |
2673 | if (!desc) | ||
2674 | continue; | ||
2675 | |||
2676 | cfg = desc->chip_data; | 2713 | cfg = desc->chip_data; |
2677 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { | 2714 | if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { |
2678 | /* | 2715 | /* |
@@ -3222,16 +3259,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3222 | struct irq_cfg *cfg; | 3259 | struct irq_cfg *cfg; |
3223 | int err; | 3260 | int err; |
3224 | unsigned dest; | 3261 | unsigned dest; |
3225 | cpumask_t tmp; | ||
3226 | 3262 | ||
3227 | cfg = irq_cfg(irq); | 3263 | cfg = irq_cfg(irq); |
3228 | tmp = TARGET_CPUS; | 3264 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); |
3229 | err = assign_irq_vector(irq, cfg, tmp); | ||
3230 | if (err) | 3265 | if (err) |
3231 | return err; | 3266 | return err; |
3232 | 3267 | ||
3233 | cpus_and(tmp, cfg->domain, tmp); | 3268 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
3234 | dest = cpu_mask_to_apicid(tmp); | ||
3235 | 3269 | ||
3236 | #ifdef CONFIG_INTR_REMAP | 3270 | #ifdef CONFIG_INTR_REMAP |
3237 | if (irq_remapped(irq)) { | 3271 | if (irq_remapped(irq)) { |
@@ -3285,26 +3319,18 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3285 | } | 3319 | } |
3286 | 3320 | ||
3287 | #ifdef CONFIG_SMP | 3321 | #ifdef CONFIG_SMP |
3288 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3322 | static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3289 | { | 3323 | { |
3290 | struct irq_desc *desc = irq_to_desc(irq); | 3324 | struct irq_desc *desc = irq_to_desc(irq); |
3291 | struct irq_cfg *cfg; | 3325 | struct irq_cfg *cfg; |
3292 | struct msi_msg msg; | 3326 | struct msi_msg msg; |
3293 | unsigned int dest; | 3327 | unsigned int dest; |
3294 | cpumask_t tmp; | ||
3295 | 3328 | ||
3296 | cpus_and(tmp, mask, cpu_online_map); | 3329 | dest = set_desc_affinity(desc, mask); |
3297 | if (cpus_empty(tmp)) | 3330 | if (dest == BAD_APICID) |
3298 | return; | 3331 | return; |
3299 | 3332 | ||
3300 | cfg = desc->chip_data; | 3333 | cfg = desc->chip_data; |
3301 | if (assign_irq_vector(irq, cfg, mask)) | ||
3302 | return; | ||
3303 | |||
3304 | set_extra_move_desc(desc, mask); | ||
3305 | |||
3306 | cpus_and(tmp, cfg->domain, mask); | ||
3307 | dest = cpu_mask_to_apicid(tmp); | ||
3308 | 3334 | ||
3309 | read_msi_msg_desc(desc, &msg); | 3335 | read_msi_msg_desc(desc, &msg); |
3310 | 3336 | ||
@@ -3314,37 +3340,27 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3314 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3340 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3315 | 3341 | ||
3316 | write_msi_msg_desc(desc, &msg); | 3342 | write_msi_msg_desc(desc, &msg); |
3317 | desc->affinity = mask; | ||
3318 | } | 3343 | } |
3319 | #ifdef CONFIG_INTR_REMAP | 3344 | #ifdef CONFIG_INTR_REMAP |
3320 | /* | 3345 | /* |
3321 | * Migrate the MSI irq to another cpumask. This migration is | 3346 | * Migrate the MSI irq to another cpumask. This migration is |
3322 | * done in the process context using interrupt-remapping hardware. | 3347 | * done in the process context using interrupt-remapping hardware. |
3323 | */ | 3348 | */ |
3324 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3349 | static void |
3350 | ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | ||
3325 | { | 3351 | { |
3326 | struct irq_desc *desc = irq_to_desc(irq); | 3352 | struct irq_desc *desc = irq_to_desc(irq); |
3327 | struct irq_cfg *cfg; | 3353 | struct irq_cfg *cfg = desc->chip_data; |
3328 | unsigned int dest; | 3354 | unsigned int dest; |
3329 | cpumask_t tmp, cleanup_mask; | ||
3330 | struct irte irte; | 3355 | struct irte irte; |
3331 | 3356 | ||
3332 | cpus_and(tmp, mask, cpu_online_map); | ||
3333 | if (cpus_empty(tmp)) | ||
3334 | return; | ||
3335 | |||
3336 | if (get_irte(irq, &irte)) | 3357 | if (get_irte(irq, &irte)) |
3337 | return; | 3358 | return; |
3338 | 3359 | ||
3339 | cfg = desc->chip_data; | 3360 | dest = set_desc_affinity(desc, mask); |
3340 | if (assign_irq_vector(irq, cfg, mask)) | 3361 | if (dest == BAD_APICID) |
3341 | return; | 3362 | return; |
3342 | 3363 | ||
3343 | set_extra_move_desc(desc, mask); | ||
3344 | |||
3345 | cpus_and(tmp, cfg->domain, mask); | ||
3346 | dest = cpu_mask_to_apicid(tmp); | ||
3347 | |||
3348 | irte.vector = cfg->vector; | 3364 | irte.vector = cfg->vector; |
3349 | irte.dest_id = IRTE_DEST(dest); | 3365 | irte.dest_id = IRTE_DEST(dest); |
3350 | 3366 | ||
@@ -3358,14 +3374,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3358 | * at the new destination. So, time to cleanup the previous | 3374 | * at the new destination. So, time to cleanup the previous |
3359 | * vector allocation. | 3375 | * vector allocation. |
3360 | */ | 3376 | */ |
3361 | if (cfg->move_in_progress) { | 3377 | if (cfg->move_in_progress) |
3362 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 3378 | send_cleanup_vector(cfg); |
3363 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
3364 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
3365 | cfg->move_in_progress = 0; | ||
3366 | } | ||
3367 | |||
3368 | desc->affinity = mask; | ||
3369 | } | 3379 | } |
3370 | 3380 | ||
3371 | #endif | 3381 | #endif |
@@ -3556,26 +3566,18 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
3556 | 3566 | ||
3557 | #ifdef CONFIG_DMAR | 3567 | #ifdef CONFIG_DMAR |
3558 | #ifdef CONFIG_SMP | 3568 | #ifdef CONFIG_SMP |
3559 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3569 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3560 | { | 3570 | { |
3561 | struct irq_desc *desc = irq_to_desc(irq); | 3571 | struct irq_desc *desc = irq_to_desc(irq); |
3562 | struct irq_cfg *cfg; | 3572 | struct irq_cfg *cfg; |
3563 | struct msi_msg msg; | 3573 | struct msi_msg msg; |
3564 | unsigned int dest; | 3574 | unsigned int dest; |
3565 | cpumask_t tmp; | ||
3566 | 3575 | ||
3567 | cpus_and(tmp, mask, cpu_online_map); | 3576 | dest = set_desc_affinity(desc, mask); |
3568 | if (cpus_empty(tmp)) | 3577 | if (dest == BAD_APICID) |
3569 | return; | 3578 | return; |
3570 | 3579 | ||
3571 | cfg = desc->chip_data; | 3580 | cfg = desc->chip_data; |
3572 | if (assign_irq_vector(irq, cfg, mask)) | ||
3573 | return; | ||
3574 | |||
3575 | set_extra_move_desc(desc, mask); | ||
3576 | |||
3577 | cpus_and(tmp, cfg->domain, mask); | ||
3578 | dest = cpu_mask_to_apicid(tmp); | ||
3579 | 3581 | ||
3580 | dmar_msi_read(irq, &msg); | 3582 | dmar_msi_read(irq, &msg); |
3581 | 3583 | ||
@@ -3585,7 +3587,6 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3585 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3587 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3586 | 3588 | ||
3587 | dmar_msi_write(irq, &msg); | 3589 | dmar_msi_write(irq, &msg); |
3588 | desc->affinity = mask; | ||
3589 | } | 3590 | } |
3590 | 3591 | ||
3591 | #endif /* CONFIG_SMP */ | 3592 | #endif /* CONFIG_SMP */ |
@@ -3619,26 +3620,18 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3619 | #ifdef CONFIG_HPET_TIMER | 3620 | #ifdef CONFIG_HPET_TIMER |
3620 | 3621 | ||
3621 | #ifdef CONFIG_SMP | 3622 | #ifdef CONFIG_SMP |
3622 | static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3623 | static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3623 | { | 3624 | { |
3624 | struct irq_desc *desc = irq_to_desc(irq); | 3625 | struct irq_desc *desc = irq_to_desc(irq); |
3625 | struct irq_cfg *cfg; | 3626 | struct irq_cfg *cfg; |
3626 | struct msi_msg msg; | 3627 | struct msi_msg msg; |
3627 | unsigned int dest; | 3628 | unsigned int dest; |
3628 | cpumask_t tmp; | ||
3629 | 3629 | ||
3630 | cpus_and(tmp, mask, cpu_online_map); | 3630 | dest = set_desc_affinity(desc, mask); |
3631 | if (cpus_empty(tmp)) | 3631 | if (dest == BAD_APICID) |
3632 | return; | 3632 | return; |
3633 | 3633 | ||
3634 | cfg = desc->chip_data; | 3634 | cfg = desc->chip_data; |
3635 | if (assign_irq_vector(irq, cfg, mask)) | ||
3636 | return; | ||
3637 | |||
3638 | set_extra_move_desc(desc, mask); | ||
3639 | |||
3640 | cpus_and(tmp, cfg->domain, mask); | ||
3641 | dest = cpu_mask_to_apicid(tmp); | ||
3642 | 3635 | ||
3643 | hpet_msi_read(irq, &msg); | 3636 | hpet_msi_read(irq, &msg); |
3644 | 3637 | ||
@@ -3648,7 +3641,6 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3648 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3641 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3649 | 3642 | ||
3650 | hpet_msi_write(irq, &msg); | 3643 | hpet_msi_write(irq, &msg); |
3651 | desc->affinity = mask; | ||
3652 | } | 3644 | } |
3653 | 3645 | ||
3654 | #endif /* CONFIG_SMP */ | 3646 | #endif /* CONFIG_SMP */ |
@@ -3703,28 +3695,19 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
3703 | write_ht_irq_msg(irq, &msg); | 3695 | write_ht_irq_msg(irq, &msg); |
3704 | } | 3696 | } |
3705 | 3697 | ||
3706 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | 3698 | static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3707 | { | 3699 | { |
3708 | struct irq_desc *desc = irq_to_desc(irq); | 3700 | struct irq_desc *desc = irq_to_desc(irq); |
3709 | struct irq_cfg *cfg; | 3701 | struct irq_cfg *cfg; |
3710 | unsigned int dest; | 3702 | unsigned int dest; |
3711 | cpumask_t tmp; | ||
3712 | 3703 | ||
3713 | cpus_and(tmp, mask, cpu_online_map); | 3704 | dest = set_desc_affinity(desc, mask); |
3714 | if (cpus_empty(tmp)) | 3705 | if (dest == BAD_APICID) |
3715 | return; | 3706 | return; |
3716 | 3707 | ||
3717 | cfg = desc->chip_data; | 3708 | cfg = desc->chip_data; |
3718 | if (assign_irq_vector(irq, cfg, mask)) | ||
3719 | return; | ||
3720 | |||
3721 | set_extra_move_desc(desc, mask); | ||
3722 | |||
3723 | cpus_and(tmp, cfg->domain, mask); | ||
3724 | dest = cpu_mask_to_apicid(tmp); | ||
3725 | 3709 | ||
3726 | target_ht_irq(irq, dest, cfg->vector); | 3710 | target_ht_irq(irq, dest, cfg->vector); |
3727 | desc->affinity = mask; | ||
3728 | } | 3711 | } |
3729 | 3712 | ||
3730 | #endif | 3713 | #endif |
@@ -3744,17 +3727,14 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3744 | { | 3727 | { |
3745 | struct irq_cfg *cfg; | 3728 | struct irq_cfg *cfg; |
3746 | int err; | 3729 | int err; |
3747 | cpumask_t tmp; | ||
3748 | 3730 | ||
3749 | cfg = irq_cfg(irq); | 3731 | cfg = irq_cfg(irq); |
3750 | tmp = TARGET_CPUS; | 3732 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); |
3751 | err = assign_irq_vector(irq, cfg, tmp); | ||
3752 | if (!err) { | 3733 | if (!err) { |
3753 | struct ht_irq_msg msg; | 3734 | struct ht_irq_msg msg; |
3754 | unsigned dest; | 3735 | unsigned dest; |
3755 | 3736 | ||
3756 | cpus_and(tmp, cfg->domain, tmp); | 3737 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
3757 | dest = cpu_mask_to_apicid(tmp); | ||
3758 | 3738 | ||
3759 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | 3739 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); |
3760 | 3740 | ||
@@ -3790,7 +3770,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3790 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | 3770 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, |
3791 | unsigned long mmr_offset) | 3771 | unsigned long mmr_offset) |
3792 | { | 3772 | { |
3793 | const cpumask_t *eligible_cpu = get_cpu_mask(cpu); | 3773 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
3794 | struct irq_cfg *cfg; | 3774 | struct irq_cfg *cfg; |
3795 | int mmr_pnode; | 3775 | int mmr_pnode; |
3796 | unsigned long mmr_value; | 3776 | unsigned long mmr_value; |
@@ -3800,7 +3780,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3800 | 3780 | ||
3801 | cfg = irq_cfg(irq); | 3781 | cfg = irq_cfg(irq); |
3802 | 3782 | ||
3803 | err = assign_irq_vector(irq, cfg, *eligible_cpu); | 3783 | err = assign_irq_vector(irq, cfg, eligible_cpu); |
3804 | if (err != 0) | 3784 | if (err != 0) |
3805 | return err; | 3785 | return err; |
3806 | 3786 | ||
@@ -3819,7 +3799,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3819 | entry->polarity = 0; | 3799 | entry->polarity = 0; |
3820 | entry->trigger = 0; | 3800 | entry->trigger = 0; |
3821 | entry->mask = 0; | 3801 | entry->mask = 0; |
3822 | entry->dest = cpu_mask_to_apicid(*eligible_cpu); | 3802 | entry->dest = cpu_mask_to_apicid(eligible_cpu); |
3823 | 3803 | ||
3824 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | 3804 | mmr_pnode = uv_blade_to_pnode(mmr_blade); |
3825 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 3805 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
@@ -4030,7 +4010,7 @@ void __init setup_ioapic_dest(void) | |||
4030 | int pin, ioapic, irq, irq_entry; | 4010 | int pin, ioapic, irq, irq_entry; |
4031 | struct irq_desc *desc; | 4011 | struct irq_desc *desc; |
4032 | struct irq_cfg *cfg; | 4012 | struct irq_cfg *cfg; |
4033 | cpumask_t mask; | 4013 | const struct cpumask *mask; |
4034 | 4014 | ||
4035 | if (skip_ioapic_setup == 1) | 4015 | if (skip_ioapic_setup == 1) |
4036 | return; | 4016 | return; |
@@ -4061,7 +4041,7 @@ void __init setup_ioapic_dest(void) | |||
4061 | */ | 4041 | */ |
4062 | if (desc->status & | 4042 | if (desc->status & |
4063 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 4043 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) |
4064 | mask = desc->affinity; | 4044 | mask = &desc->affinity; |
4065 | else | 4045 | else |
4066 | mask = TARGET_CPUS; | 4046 | mask = TARGET_CPUS; |
4067 | 4047 | ||