diff options
Diffstat (limited to 'arch/x86/kernel/io_apic.c')
| -rw-r--r-- | arch/x86/kernel/io_apic.c | 372 |
1 files changed, 179 insertions, 193 deletions
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 74917658b004..3639442aa7a4 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
| @@ -136,8 +136,8 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) | |||
| 136 | 136 | ||
| 137 | struct irq_cfg { | 137 | struct irq_cfg { |
| 138 | struct irq_pin_list *irq_2_pin; | 138 | struct irq_pin_list *irq_2_pin; |
| 139 | cpumask_t domain; | 139 | cpumask_var_t domain; |
| 140 | cpumask_t old_domain; | 140 | cpumask_var_t old_domain; |
| 141 | unsigned move_cleanup_count; | 141 | unsigned move_cleanup_count; |
| 142 | u8 vector; | 142 | u8 vector; |
| 143 | u8 move_in_progress : 1; | 143 | u8 move_in_progress : 1; |
| @@ -152,22 +152,22 @@ static struct irq_cfg irq_cfgx[] = { | |||
| 152 | #else | 152 | #else |
| 153 | static struct irq_cfg irq_cfgx[NR_IRQS] = { | 153 | static struct irq_cfg irq_cfgx[NR_IRQS] = { |
| 154 | #endif | 154 | #endif |
| 155 | [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, | 155 | [0] = { .vector = IRQ0_VECTOR, }, |
| 156 | [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, | 156 | [1] = { .vector = IRQ1_VECTOR, }, |
| 157 | [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, | 157 | [2] = { .vector = IRQ2_VECTOR, }, |
| 158 | [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, | 158 | [3] = { .vector = IRQ3_VECTOR, }, |
| 159 | [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, | 159 | [4] = { .vector = IRQ4_VECTOR, }, |
| 160 | [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, | 160 | [5] = { .vector = IRQ5_VECTOR, }, |
| 161 | [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, | 161 | [6] = { .vector = IRQ6_VECTOR, }, |
| 162 | [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, | 162 | [7] = { .vector = IRQ7_VECTOR, }, |
| 163 | [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, | 163 | [8] = { .vector = IRQ8_VECTOR, }, |
| 164 | [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, | 164 | [9] = { .vector = IRQ9_VECTOR, }, |
| 165 | [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, | 165 | [10] = { .vector = IRQ10_VECTOR, }, |
| 166 | [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, | 166 | [11] = { .vector = IRQ11_VECTOR, }, |
| 167 | [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, | 167 | [12] = { .vector = IRQ12_VECTOR, }, |
| 168 | [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, | 168 | [13] = { .vector = IRQ13_VECTOR, }, |
| 169 | [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, | 169 | [14] = { .vector = IRQ14_VECTOR, }, |
| 170 | [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, | 170 | [15] = { .vector = IRQ15_VECTOR, }, |
| 171 | }; | 171 | }; |
| 172 | 172 | ||
| 173 | int __init arch_early_irq_init(void) | 173 | int __init arch_early_irq_init(void) |
| @@ -183,6 +183,10 @@ int __init arch_early_irq_init(void) | |||
| 183 | for (i = 0; i < count; i++) { | 183 | for (i = 0; i < count; i++) { |
| 184 | desc = irq_to_desc(i); | 184 | desc = irq_to_desc(i); |
| 185 | desc->chip_data = &cfg[i]; | 185 | desc->chip_data = &cfg[i]; |
| 186 | alloc_bootmem_cpumask_var(&cfg[i].domain); | ||
| 187 | alloc_bootmem_cpumask_var(&cfg[i].old_domain); | ||
| 188 | if (i < NR_IRQS_LEGACY) | ||
| 189 | cpumask_setall(cfg[i].domain); | ||
| 186 | } | 190 | } |
| 187 | 191 | ||
| 188 | return 0; | 192 | return 0; |
| @@ -209,6 +213,20 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu) | |||
| 209 | node = cpu_to_node(cpu); | 213 | node = cpu_to_node(cpu); |
| 210 | 214 | ||
| 211 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 215 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); |
| 216 | if (cfg) { | ||
| 217 | if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { | ||
| 218 | kfree(cfg); | ||
| 219 | cfg = NULL; | ||
| 220 | } else if (!alloc_cpumask_var_node(&cfg->old_domain, | ||
| 221 | GFP_ATOMIC, node)) { | ||
| 222 | free_cpumask_var(cfg->domain); | ||
| 223 | kfree(cfg); | ||
| 224 | cfg = NULL; | ||
| 225 | } else { | ||
| 226 | cpumask_clear(cfg->domain); | ||
| 227 | cpumask_clear(cfg->old_domain); | ||
| 228 | } | ||
| 229 | } | ||
| 212 | printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); | 230 | printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); |
| 213 | 231 | ||
| 214 | return cfg; | 232 | return cfg; |
| @@ -333,13 +351,14 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | |||
| 333 | } | 351 | } |
| 334 | } | 352 | } |
| 335 | 353 | ||
| 336 | static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) | 354 | static void |
| 355 | set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
| 337 | { | 356 | { |
| 338 | struct irq_cfg *cfg = desc->chip_data; | 357 | struct irq_cfg *cfg = desc->chip_data; |
| 339 | 358 | ||
| 340 | if (!cfg->move_in_progress) { | 359 | if (!cfg->move_in_progress) { |
| 341 | /* it means that domain is not changed */ | 360 | /* it means that domain is not changed */ |
| 342 | if (!cpus_intersects(desc->affinity, mask)) | 361 | if (!cpumask_intersects(&desc->affinity, mask)) |
| 343 | cfg->move_desc_pending = 1; | 362 | cfg->move_desc_pending = 1; |
| 344 | } | 363 | } |
| 345 | } | 364 | } |
| @@ -354,7 +373,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq) | |||
| 354 | #endif | 373 | #endif |
| 355 | 374 | ||
| 356 | #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC | 375 | #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC |
| 357 | static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) | 376 | static inline void |
| 377 | set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
| 358 | { | 378 | { |
| 359 | } | 379 | } |
| 360 | #endif | 380 | #endif |
| @@ -485,6 +505,26 @@ static void ioapic_mask_entry(int apic, int pin) | |||
| 485 | } | 505 | } |
| 486 | 506 | ||
| 487 | #ifdef CONFIG_SMP | 507 | #ifdef CONFIG_SMP |
| 508 | static void send_cleanup_vector(struct irq_cfg *cfg) | ||
| 509 | { | ||
| 510 | cpumask_var_t cleanup_mask; | ||
| 511 | |||
| 512 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | ||
| 513 | unsigned int i; | ||
| 514 | cfg->move_cleanup_count = 0; | ||
| 515 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
| 516 | cfg->move_cleanup_count++; | ||
| 517 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
| 518 | send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); | ||
| 519 | } else { | ||
| 520 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | ||
| 521 | cfg->move_cleanup_count = cpumask_weight(cleanup_mask); | ||
| 522 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
| 523 | free_cpumask_var(cleanup_mask); | ||
| 524 | } | ||
| 525 | cfg->move_in_progress = 0; | ||
| 526 | } | ||
| 527 | |||
| 488 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) | 528 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) |
| 489 | { | 529 | { |
| 490 | int apic, pin; | 530 | int apic, pin; |
| @@ -520,41 +560,55 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
| 520 | } | 560 | } |
| 521 | } | 561 | } |
| 522 | 562 | ||
| 523 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); | 563 | static int |
| 564 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); | ||
| 524 | 565 | ||
| 525 | static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | 566 | /* |
| 567 | * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid | ||
| 568 | * of that, or returns BAD_APICID and leaves desc->affinity untouched. | ||
| 569 | */ | ||
| 570 | static unsigned int | ||
| 571 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) | ||
| 526 | { | 572 | { |
| 527 | struct irq_cfg *cfg; | 573 | struct irq_cfg *cfg; |
| 528 | unsigned long flags; | ||
| 529 | unsigned int dest; | ||
| 530 | cpumask_t tmp; | ||
| 531 | unsigned int irq; | 574 | unsigned int irq; |
| 532 | 575 | ||
| 533 | cpus_and(tmp, mask, cpu_online_map); | 576 | if (!cpumask_intersects(mask, cpu_online_mask)) |
| 534 | if (cpus_empty(tmp)) | 577 | return BAD_APICID; |
| 535 | return; | ||
| 536 | 578 | ||
| 537 | irq = desc->irq; | 579 | irq = desc->irq; |
| 538 | cfg = desc->chip_data; | 580 | cfg = desc->chip_data; |
| 539 | if (assign_irq_vector(irq, cfg, mask)) | 581 | if (assign_irq_vector(irq, cfg, mask)) |
| 540 | return; | 582 | return BAD_APICID; |
| 541 | 583 | ||
| 584 | cpumask_and(&desc->affinity, cfg->domain, mask); | ||
| 542 | set_extra_move_desc(desc, mask); | 585 | set_extra_move_desc(desc, mask); |
| 586 | return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); | ||
| 587 | } | ||
| 543 | 588 | ||
| 544 | cpus_and(tmp, cfg->domain, mask); | 589 | static void |
| 545 | dest = cpu_mask_to_apicid(tmp); | 590 | set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) |
| 546 | /* | 591 | { |
| 547 | * Only the high 8 bits are valid. | 592 | struct irq_cfg *cfg; |
| 548 | */ | 593 | unsigned long flags; |
| 549 | dest = SET_APIC_LOGICAL_ID(dest); | 594 | unsigned int dest; |
| 595 | unsigned int irq; | ||
| 596 | |||
| 597 | irq = desc->irq; | ||
| 598 | cfg = desc->chip_data; | ||
| 550 | 599 | ||
| 551 | spin_lock_irqsave(&ioapic_lock, flags); | 600 | spin_lock_irqsave(&ioapic_lock, flags); |
| 552 | __target_IO_APIC_irq(irq, dest, cfg); | 601 | dest = set_desc_affinity(desc, mask); |
| 553 | desc->affinity = mask; | 602 | if (dest != BAD_APICID) { |
| 603 | /* Only the high 8 bits are valid. */ | ||
| 604 | dest = SET_APIC_LOGICAL_ID(dest); | ||
| 605 | __target_IO_APIC_irq(irq, dest, cfg); | ||
| 606 | } | ||
| 554 | spin_unlock_irqrestore(&ioapic_lock, flags); | 607 | spin_unlock_irqrestore(&ioapic_lock, flags); |
| 555 | } | 608 | } |
| 556 | 609 | ||
| 557 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 610 | static void |
| 611 | set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | ||
| 558 | { | 612 | { |
| 559 | struct irq_desc *desc; | 613 | struct irq_desc *desc; |
| 560 | 614 | ||
| @@ -652,7 +706,7 @@ static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) | |||
| 652 | } | 706 | } |
| 653 | 707 | ||
| 654 | #ifdef CONFIG_X86_64 | 708 | #ifdef CONFIG_X86_64 |
| 655 | void io_apic_sync(struct irq_pin_list *entry) | 709 | static void io_apic_sync(struct irq_pin_list *entry) |
| 656 | { | 710 | { |
| 657 | /* | 711 | /* |
| 658 | * Synchronize the IO-APIC and the CPU by doing | 712 | * Synchronize the IO-APIC and the CPU by doing |
| @@ -1222,7 +1276,8 @@ void unlock_vector_lock(void) | |||
| 1222 | spin_unlock(&vector_lock); | 1276 | spin_unlock(&vector_lock); |
| 1223 | } | 1277 | } |
| 1224 | 1278 | ||
| 1225 | static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | 1279 | static int |
| 1280 | __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
| 1226 | { | 1281 | { |
| 1227 | /* | 1282 | /* |
| 1228 | * NOTE! The local APIC isn't very good at handling | 1283 | * NOTE! The local APIC isn't very good at handling |
| @@ -1237,49 +1292,49 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | |||
| 1237 | */ | 1292 | */ |
| 1238 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; | 1293 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; |
| 1239 | unsigned int old_vector; | 1294 | unsigned int old_vector; |
| 1240 | int cpu; | 1295 | int cpu, err; |
| 1296 | cpumask_var_t tmp_mask; | ||
| 1241 | 1297 | ||
| 1242 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) | 1298 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) |
| 1243 | return -EBUSY; | 1299 | return -EBUSY; |
| 1244 | 1300 | ||
| 1245 | /* Only try and allocate irqs on cpus that are present */ | 1301 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) |
| 1246 | cpus_and(mask, mask, cpu_online_map); | 1302 | return -ENOMEM; |
| 1247 | 1303 | ||
| 1248 | old_vector = cfg->vector; | 1304 | old_vector = cfg->vector; |
| 1249 | if (old_vector) { | 1305 | if (old_vector) { |
| 1250 | cpumask_t tmp; | 1306 | cpumask_and(tmp_mask, mask, cpu_online_mask); |
| 1251 | cpus_and(tmp, cfg->domain, mask); | 1307 | cpumask_and(tmp_mask, cfg->domain, tmp_mask); |
| 1252 | if (!cpus_empty(tmp)) | 1308 | if (!cpumask_empty(tmp_mask)) { |
| 1309 | free_cpumask_var(tmp_mask); | ||
| 1253 | return 0; | 1310 | return 0; |
| 1311 | } | ||
| 1254 | } | 1312 | } |
| 1255 | 1313 | ||
| 1256 | for_each_cpu_mask_nr(cpu, mask) { | 1314 | /* Only try and allocate irqs on cpus that are present */ |
| 1257 | cpumask_t domain, new_mask; | 1315 | err = -ENOSPC; |
| 1316 | for_each_cpu_and(cpu, mask, cpu_online_mask) { | ||
| 1258 | int new_cpu; | 1317 | int new_cpu; |
| 1259 | int vector, offset; | 1318 | int vector, offset; |
| 1260 | 1319 | ||
| 1261 | domain = vector_allocation_domain(cpu); | 1320 | vector_allocation_domain(cpu, tmp_mask); |
| 1262 | cpus_and(new_mask, domain, cpu_online_map); | ||
| 1263 | 1321 | ||
| 1264 | vector = current_vector; | 1322 | vector = current_vector; |
| 1265 | offset = current_offset; | 1323 | offset = current_offset; |
| 1266 | next: | 1324 | next: |
| 1267 | vector += 8; | 1325 | vector += 8; |
| 1268 | if (vector >= first_system_vector) { | 1326 | if (vector >= first_system_vector) { |
| 1269 | /* If we run out of vectors on large boxen, must share them. */ | 1327 | /* If out of vectors on large boxen, must share them. */ |
| 1270 | offset = (offset + 1) % 8; | 1328 | offset = (offset + 1) % 8; |
| 1271 | vector = FIRST_DEVICE_VECTOR + offset; | 1329 | vector = FIRST_DEVICE_VECTOR + offset; |
| 1272 | } | 1330 | } |
| 1273 | if (unlikely(current_vector == vector)) | 1331 | if (unlikely(current_vector == vector)) |
| 1274 | continue; | 1332 | continue; |
| 1275 | #ifdef CONFIG_X86_64 | 1333 | |
| 1276 | if (vector == IA32_SYSCALL_VECTOR) | 1334 | if (test_bit(vector, used_vectors)) |
| 1277 | goto next; | ||
| 1278 | #else | ||
| 1279 | if (vector == SYSCALL_VECTOR) | ||
| 1280 | goto next; | 1335 | goto next; |
| 1281 | #endif | 1336 | |
| 1282 | for_each_cpu_mask_nr(new_cpu, new_mask) | 1337 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) |
| 1283 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 1338 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
| 1284 | goto next; | 1339 | goto next; |
| 1285 | /* Found one! */ | 1340 | /* Found one! */ |
| @@ -1287,18 +1342,21 @@ next: | |||
| 1287 | current_offset = offset; | 1342 | current_offset = offset; |
| 1288 | if (old_vector) { | 1343 | if (old_vector) { |
| 1289 | cfg->move_in_progress = 1; | 1344 | cfg->move_in_progress = 1; |
| 1290 | cfg->old_domain = cfg->domain; | 1345 | cpumask_copy(cfg->old_domain, cfg->domain); |
| 1291 | } | 1346 | } |
| 1292 | for_each_cpu_mask_nr(new_cpu, new_mask) | 1347 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) |
| 1293 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 1348 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
| 1294 | cfg->vector = vector; | 1349 | cfg->vector = vector; |
| 1295 | cfg->domain = domain; | 1350 | cpumask_copy(cfg->domain, tmp_mask); |
| 1296 | return 0; | 1351 | err = 0; |
| 1352 | break; | ||
| 1297 | } | 1353 | } |
| 1298 | return -ENOSPC; | 1354 | free_cpumask_var(tmp_mask); |
| 1355 | return err; | ||
| 1299 | } | 1356 | } |
| 1300 | 1357 | ||
| 1301 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | 1358 | static int |
| 1359 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
| 1302 | { | 1360 | { |
| 1303 | int err; | 1361 | int err; |
| 1304 | unsigned long flags; | 1362 | unsigned long flags; |
| @@ -1311,23 +1369,20 @@ static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | |||
| 1311 | 1369 | ||
| 1312 | static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | 1370 | static void __clear_irq_vector(int irq, struct irq_cfg *cfg) |
| 1313 | { | 1371 | { |
| 1314 | cpumask_t mask; | ||
| 1315 | int cpu, vector; | 1372 | int cpu, vector; |
| 1316 | 1373 | ||
| 1317 | BUG_ON(!cfg->vector); | 1374 | BUG_ON(!cfg->vector); |
| 1318 | 1375 | ||
| 1319 | vector = cfg->vector; | 1376 | vector = cfg->vector; |
| 1320 | cpus_and(mask, cfg->domain, cpu_online_map); | 1377 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) |
| 1321 | for_each_cpu_mask_nr(cpu, mask) | ||
| 1322 | per_cpu(vector_irq, cpu)[vector] = -1; | 1378 | per_cpu(vector_irq, cpu)[vector] = -1; |
| 1323 | 1379 | ||
| 1324 | cfg->vector = 0; | 1380 | cfg->vector = 0; |
| 1325 | cpus_clear(cfg->domain); | 1381 | cpumask_clear(cfg->domain); |
| 1326 | 1382 | ||
| 1327 | if (likely(!cfg->move_in_progress)) | 1383 | if (likely(!cfg->move_in_progress)) |
| 1328 | return; | 1384 | return; |
| 1329 | cpus_and(mask, cfg->old_domain, cpu_online_map); | 1385 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { |
| 1330 | for_each_cpu_mask_nr(cpu, mask) { | ||
| 1331 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | 1386 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; |
| 1332 | vector++) { | 1387 | vector++) { |
| 1333 | if (per_cpu(vector_irq, cpu)[vector] != irq) | 1388 | if (per_cpu(vector_irq, cpu)[vector] != irq) |
| @@ -1350,7 +1405,7 @@ void __setup_vector_irq(int cpu) | |||
| 1350 | /* Mark the inuse vectors */ | 1405 | /* Mark the inuse vectors */ |
| 1351 | for_each_irq_desc(irq, desc) { | 1406 | for_each_irq_desc(irq, desc) { |
| 1352 | cfg = desc->chip_data; | 1407 | cfg = desc->chip_data; |
| 1353 | if (!cpu_isset(cpu, cfg->domain)) | 1408 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
| 1354 | continue; | 1409 | continue; |
| 1355 | vector = cfg->vector; | 1410 | vector = cfg->vector; |
| 1356 | per_cpu(vector_irq, cpu)[vector] = irq; | 1411 | per_cpu(vector_irq, cpu)[vector] = irq; |
| @@ -1362,7 +1417,7 @@ void __setup_vector_irq(int cpu) | |||
| 1362 | continue; | 1417 | continue; |
| 1363 | 1418 | ||
| 1364 | cfg = irq_cfg(irq); | 1419 | cfg = irq_cfg(irq); |
| 1365 | if (!cpu_isset(cpu, cfg->domain)) | 1420 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
| 1366 | per_cpu(vector_irq, cpu)[vector] = -1; | 1421 | per_cpu(vector_irq, cpu)[vector] = -1; |
| 1367 | } | 1422 | } |
| 1368 | } | 1423 | } |
| @@ -1498,18 +1553,17 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
| 1498 | { | 1553 | { |
| 1499 | struct irq_cfg *cfg; | 1554 | struct irq_cfg *cfg; |
| 1500 | struct IO_APIC_route_entry entry; | 1555 | struct IO_APIC_route_entry entry; |
| 1501 | cpumask_t mask; | 1556 | unsigned int dest; |
| 1502 | 1557 | ||
| 1503 | if (!IO_APIC_IRQ(irq)) | 1558 | if (!IO_APIC_IRQ(irq)) |
| 1504 | return; | 1559 | return; |
| 1505 | 1560 | ||
| 1506 | cfg = desc->chip_data; | 1561 | cfg = desc->chip_data; |
| 1507 | 1562 | ||
| 1508 | mask = TARGET_CPUS; | 1563 | if (assign_irq_vector(irq, cfg, TARGET_CPUS)) |
| 1509 | if (assign_irq_vector(irq, cfg, mask)) | ||
| 1510 | return; | 1564 | return; |
| 1511 | 1565 | ||
| 1512 | cpus_and(mask, cfg->domain, mask); | 1566 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
| 1513 | 1567 | ||
| 1514 | apic_printk(APIC_VERBOSE,KERN_DEBUG | 1568 | apic_printk(APIC_VERBOSE,KERN_DEBUG |
| 1515 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " | 1569 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " |
| @@ -1519,8 +1573,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
| 1519 | 1573 | ||
| 1520 | 1574 | ||
| 1521 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, | 1575 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, |
| 1522 | cpu_mask_to_apicid(mask), trigger, polarity, | 1576 | dest, trigger, polarity, cfg->vector)) { |
| 1523 | cfg->vector)) { | ||
| 1524 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", | 1577 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", |
| 1525 | mp_ioapics[apic].mp_apicid, pin); | 1578 | mp_ioapics[apic].mp_apicid, pin); |
| 1526 | __clear_irq_vector(irq, cfg); | 1579 | __clear_irq_vector(irq, cfg); |
| @@ -2240,7 +2293,7 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
| 2240 | unsigned long flags; | 2293 | unsigned long flags; |
| 2241 | 2294 | ||
| 2242 | spin_lock_irqsave(&vector_lock, flags); | 2295 | spin_lock_irqsave(&vector_lock, flags); |
| 2243 | send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); | 2296 | send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); |
| 2244 | spin_unlock_irqrestore(&vector_lock, flags); | 2297 | spin_unlock_irqrestore(&vector_lock, flags); |
| 2245 | 2298 | ||
| 2246 | return 1; | 2299 | return 1; |
| @@ -2289,18 +2342,17 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); | |||
| 2289 | * as simple as edge triggered migration and we can do the irq migration | 2342 | * as simple as edge triggered migration and we can do the irq migration |
| 2290 | * with a simple atomic update to IO-APIC RTE. | 2343 | * with a simple atomic update to IO-APIC RTE. |
| 2291 | */ | 2344 | */ |
| 2292 | static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | 2345 | static void |
| 2346 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
| 2293 | { | 2347 | { |
| 2294 | struct irq_cfg *cfg; | 2348 | struct irq_cfg *cfg; |
| 2295 | cpumask_t tmp, cleanup_mask; | ||
| 2296 | struct irte irte; | 2349 | struct irte irte; |
| 2297 | int modify_ioapic_rte; | 2350 | int modify_ioapic_rte; |
| 2298 | unsigned int dest; | 2351 | unsigned int dest; |
| 2299 | unsigned long flags; | 2352 | unsigned long flags; |
| 2300 | unsigned int irq; | 2353 | unsigned int irq; |
| 2301 | 2354 | ||
| 2302 | cpus_and(tmp, mask, cpu_online_map); | 2355 | if (!cpumask_intersects(mask, cpu_online_mask)) |
| 2303 | if (cpus_empty(tmp)) | ||
| 2304 | return; | 2356 | return; |
| 2305 | 2357 | ||
| 2306 | irq = desc->irq; | 2358 | irq = desc->irq; |
| @@ -2313,8 +2365,7 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
| 2313 | 2365 | ||
| 2314 | set_extra_move_desc(desc, mask); | 2366 | set_extra_move_desc(desc, mask); |
| 2315 | 2367 | ||
| 2316 | cpus_and(tmp, cfg->domain, mask); | 2368 | dest = cpu_mask_to_apicid_and(cfg->domain, mask); |
| 2317 | dest = cpu_mask_to_apicid(tmp); | ||
| 2318 | 2369 | ||
| 2319 | modify_ioapic_rte = desc->status & IRQ_LEVEL; | 2370 | modify_ioapic_rte = desc->status & IRQ_LEVEL; |
| 2320 | if (modify_ioapic_rte) { | 2371 | if (modify_ioapic_rte) { |
| @@ -2331,14 +2382,10 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
| 2331 | */ | 2382 | */ |
| 2332 | modify_irte(irq, &irte); | 2383 | modify_irte(irq, &irte); |
| 2333 | 2384 | ||
| 2334 | if (cfg->move_in_progress) { | 2385 | if (cfg->move_in_progress) |
| 2335 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 2386 | send_cleanup_vector(cfg); |
| 2336 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
| 2337 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
| 2338 | cfg->move_in_progress = 0; | ||
| 2339 | } | ||
| 2340 | 2387 | ||
| 2341 | desc->affinity = mask; | 2388 | cpumask_copy(&desc->affinity, mask); |
| 2342 | } | 2389 | } |
| 2343 | 2390 | ||
| 2344 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | 2391 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) |
| @@ -2360,11 +2407,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | |||
| 2360 | } | 2407 | } |
| 2361 | 2408 | ||
| 2362 | /* everthing is clear. we have right of way */ | 2409 | /* everthing is clear. we have right of way */ |
| 2363 | migrate_ioapic_irq_desc(desc, desc->pending_mask); | 2410 | migrate_ioapic_irq_desc(desc, &desc->pending_mask); |
| 2364 | 2411 | ||
| 2365 | ret = 0; | 2412 | ret = 0; |
| 2366 | desc->status &= ~IRQ_MOVE_PENDING; | 2413 | desc->status &= ~IRQ_MOVE_PENDING; |
| 2367 | cpus_clear(desc->pending_mask); | 2414 | cpumask_clear(&desc->pending_mask); |
| 2368 | 2415 | ||
| 2369 | unmask: | 2416 | unmask: |
| 2370 | unmask_IO_APIC_irq_desc(desc); | 2417 | unmask_IO_APIC_irq_desc(desc); |
| @@ -2389,7 +2436,7 @@ static void ir_irq_migration(struct work_struct *work) | |||
| 2389 | continue; | 2436 | continue; |
| 2390 | } | 2437 | } |
| 2391 | 2438 | ||
| 2392 | desc->chip->set_affinity(irq, desc->pending_mask); | 2439 | desc->chip->set_affinity(irq, &desc->pending_mask); |
| 2393 | spin_unlock_irqrestore(&desc->lock, flags); | 2440 | spin_unlock_irqrestore(&desc->lock, flags); |
| 2394 | } | 2441 | } |
| 2395 | } | 2442 | } |
| @@ -2398,18 +2445,20 @@ static void ir_irq_migration(struct work_struct *work) | |||
| 2398 | /* | 2445 | /* |
| 2399 | * Migrates the IRQ destination in the process context. | 2446 | * Migrates the IRQ destination in the process context. |
| 2400 | */ | 2447 | */ |
| 2401 | static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | 2448 | static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, |
| 2449 | const struct cpumask *mask) | ||
| 2402 | { | 2450 | { |
| 2403 | if (desc->status & IRQ_LEVEL) { | 2451 | if (desc->status & IRQ_LEVEL) { |
| 2404 | desc->status |= IRQ_MOVE_PENDING; | 2452 | desc->status |= IRQ_MOVE_PENDING; |
| 2405 | desc->pending_mask = mask; | 2453 | cpumask_copy(&desc->pending_mask, mask); |
| 2406 | migrate_irq_remapped_level_desc(desc); | 2454 | migrate_irq_remapped_level_desc(desc); |
| 2407 | return; | 2455 | return; |
| 2408 | } | 2456 | } |
| 2409 | 2457 | ||
| 2410 | migrate_ioapic_irq_desc(desc, mask); | 2458 | migrate_ioapic_irq_desc(desc, mask); |
| 2411 | } | 2459 | } |
| 2412 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 2460 | static void set_ir_ioapic_affinity_irq(unsigned int irq, |
| 2461 | const struct cpumask *mask) | ||
| 2413 | { | 2462 | { |
| 2414 | struct irq_desc *desc = irq_to_desc(irq); | 2463 | struct irq_desc *desc = irq_to_desc(irq); |
| 2415 | 2464 | ||
| @@ -2444,7 +2493,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
| 2444 | if (!cfg->move_cleanup_count) | 2493 | if (!cfg->move_cleanup_count) |
| 2445 | goto unlock; | 2494 | goto unlock; |
| 2446 | 2495 | ||
| 2447 | if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) | 2496 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
| 2448 | goto unlock; | 2497 | goto unlock; |
| 2449 | 2498 | ||
| 2450 | __get_cpu_var(vector_irq)[vector] = -1; | 2499 | __get_cpu_var(vector_irq)[vector] = -1; |
| @@ -2481,20 +2530,14 @@ static void irq_complete_move(struct irq_desc **descp) | |||
| 2481 | 2530 | ||
| 2482 | vector = ~get_irq_regs()->orig_ax; | 2531 | vector = ~get_irq_regs()->orig_ax; |
| 2483 | me = smp_processor_id(); | 2532 | me = smp_processor_id(); |
| 2484 | if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) { | ||
| 2485 | cpumask_t cleanup_mask; | ||
| 2486 | |||
| 2487 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | 2533 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC |
| 2488 | *descp = desc = move_irq_desc(desc, me); | 2534 | *descp = desc = move_irq_desc(desc, me); |
| 2489 | /* get the new one */ | 2535 | /* get the new one */ |
| 2490 | cfg = desc->chip_data; | 2536 | cfg = desc->chip_data; |
| 2491 | #endif | 2537 | #endif |
| 2492 | 2538 | ||
| 2493 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 2539 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
| 2494 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | 2540 | send_cleanup_vector(cfg); |
| 2495 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
| 2496 | cfg->move_in_progress = 0; | ||
| 2497 | } | ||
| 2498 | } | 2541 | } |
| 2499 | #else | 2542 | #else |
| 2500 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2543 | static inline void irq_complete_move(struct irq_desc **descp) {} |
| @@ -3216,16 +3259,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
| 3216 | struct irq_cfg *cfg; | 3259 | struct irq_cfg *cfg; |
| 3217 | int err; | 3260 | int err; |
| 3218 | unsigned dest; | 3261 | unsigned dest; |
| 3219 | cpumask_t tmp; | ||
| 3220 | 3262 | ||
| 3221 | cfg = irq_cfg(irq); | 3263 | cfg = irq_cfg(irq); |
| 3222 | tmp = TARGET_CPUS; | 3264 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); |
| 3223 | err = assign_irq_vector(irq, cfg, tmp); | ||
| 3224 | if (err) | 3265 | if (err) |
| 3225 | return err; | 3266 | return err; |
| 3226 | 3267 | ||
| 3227 | cpus_and(tmp, cfg->domain, tmp); | 3268 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
| 3228 | dest = cpu_mask_to_apicid(tmp); | ||
| 3229 | 3269 | ||
| 3230 | #ifdef CONFIG_INTR_REMAP | 3270 | #ifdef CONFIG_INTR_REMAP |
| 3231 | if (irq_remapped(irq)) { | 3271 | if (irq_remapped(irq)) { |
| @@ -3279,26 +3319,18 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
| 3279 | } | 3319 | } |
| 3280 | 3320 | ||
| 3281 | #ifdef CONFIG_SMP | 3321 | #ifdef CONFIG_SMP |
| 3282 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3322 | static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) |
| 3283 | { | 3323 | { |
| 3284 | struct irq_desc *desc = irq_to_desc(irq); | 3324 | struct irq_desc *desc = irq_to_desc(irq); |
| 3285 | struct irq_cfg *cfg; | 3325 | struct irq_cfg *cfg; |
| 3286 | struct msi_msg msg; | 3326 | struct msi_msg msg; |
| 3287 | unsigned int dest; | 3327 | unsigned int dest; |
| 3288 | cpumask_t tmp; | ||
| 3289 | 3328 | ||
| 3290 | cpus_and(tmp, mask, cpu_online_map); | 3329 | dest = set_desc_affinity(desc, mask); |
| 3291 | if (cpus_empty(tmp)) | 3330 | if (dest == BAD_APICID) |
| 3292 | return; | 3331 | return; |
| 3293 | 3332 | ||
| 3294 | cfg = desc->chip_data; | 3333 | cfg = desc->chip_data; |
| 3295 | if (assign_irq_vector(irq, cfg, mask)) | ||
| 3296 | return; | ||
| 3297 | |||
| 3298 | set_extra_move_desc(desc, mask); | ||
| 3299 | |||
| 3300 | cpus_and(tmp, cfg->domain, mask); | ||
| 3301 | dest = cpu_mask_to_apicid(tmp); | ||
| 3302 | 3334 | ||
| 3303 | read_msi_msg_desc(desc, &msg); | 3335 | read_msi_msg_desc(desc, &msg); |
| 3304 | 3336 | ||
| @@ -3308,37 +3340,27 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
| 3308 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3340 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
| 3309 | 3341 | ||
| 3310 | write_msi_msg_desc(desc, &msg); | 3342 | write_msi_msg_desc(desc, &msg); |
| 3311 | desc->affinity = mask; | ||
| 3312 | } | 3343 | } |
| 3313 | #ifdef CONFIG_INTR_REMAP | 3344 | #ifdef CONFIG_INTR_REMAP |
| 3314 | /* | 3345 | /* |
| 3315 | * Migrate the MSI irq to another cpumask. This migration is | 3346 | * Migrate the MSI irq to another cpumask. This migration is |
| 3316 | * done in the process context using interrupt-remapping hardware. | 3347 | * done in the process context using interrupt-remapping hardware. |
| 3317 | */ | 3348 | */ |
| 3318 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3349 | static void |
| 3350 | ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | ||
| 3319 | { | 3351 | { |
| 3320 | struct irq_desc *desc = irq_to_desc(irq); | 3352 | struct irq_desc *desc = irq_to_desc(irq); |
| 3321 | struct irq_cfg *cfg; | 3353 | struct irq_cfg *cfg = desc->chip_data; |
| 3322 | unsigned int dest; | 3354 | unsigned int dest; |
| 3323 | cpumask_t tmp, cleanup_mask; | ||
| 3324 | struct irte irte; | 3355 | struct irte irte; |
| 3325 | 3356 | ||
| 3326 | cpus_and(tmp, mask, cpu_online_map); | ||
| 3327 | if (cpus_empty(tmp)) | ||
| 3328 | return; | ||
| 3329 | |||
| 3330 | if (get_irte(irq, &irte)) | 3357 | if (get_irte(irq, &irte)) |
| 3331 | return; | 3358 | return; |
| 3332 | 3359 | ||
| 3333 | cfg = desc->chip_data; | 3360 | dest = set_desc_affinity(desc, mask); |
| 3334 | if (assign_irq_vector(irq, cfg, mask)) | 3361 | if (dest == BAD_APICID) |
| 3335 | return; | 3362 | return; |
| 3336 | 3363 | ||
| 3337 | set_extra_move_desc(desc, mask); | ||
| 3338 | |||
| 3339 | cpus_and(tmp, cfg->domain, mask); | ||
| 3340 | dest = cpu_mask_to_apicid(tmp); | ||
| 3341 | |||
| 3342 | irte.vector = cfg->vector; | 3364 | irte.vector = cfg->vector; |
| 3343 | irte.dest_id = IRTE_DEST(dest); | 3365 | irte.dest_id = IRTE_DEST(dest); |
| 3344 | 3366 | ||
| @@ -3352,14 +3374,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
| 3352 | * at the new destination. So, time to cleanup the previous | 3374 | * at the new destination. So, time to cleanup the previous |
| 3353 | * vector allocation. | 3375 | * vector allocation. |
| 3354 | */ | 3376 | */ |
| 3355 | if (cfg->move_in_progress) { | 3377 | if (cfg->move_in_progress) |
| 3356 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 3378 | send_cleanup_vector(cfg); |
| 3357 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
| 3358 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
| 3359 | cfg->move_in_progress = 0; | ||
| 3360 | } | ||
| 3361 | |||
| 3362 | desc->affinity = mask; | ||
| 3363 | } | 3379 | } |
| 3364 | 3380 | ||
| 3365 | #endif | 3381 | #endif |
| @@ -3550,26 +3566,18 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
| 3550 | 3566 | ||
| 3551 | #ifdef CONFIG_DMAR | 3567 | #ifdef CONFIG_DMAR |
| 3552 | #ifdef CONFIG_SMP | 3568 | #ifdef CONFIG_SMP |
| 3553 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3569 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
| 3554 | { | 3570 | { |
| 3555 | struct irq_desc *desc = irq_to_desc(irq); | 3571 | struct irq_desc *desc = irq_to_desc(irq); |
| 3556 | struct irq_cfg *cfg; | 3572 | struct irq_cfg *cfg; |
| 3557 | struct msi_msg msg; | 3573 | struct msi_msg msg; |
| 3558 | unsigned int dest; | 3574 | unsigned int dest; |
| 3559 | cpumask_t tmp; | ||
| 3560 | 3575 | ||
| 3561 | cpus_and(tmp, mask, cpu_online_map); | 3576 | dest = set_desc_affinity(desc, mask); |
| 3562 | if (cpus_empty(tmp)) | 3577 | if (dest == BAD_APICID) |
| 3563 | return; | 3578 | return; |
| 3564 | 3579 | ||
| 3565 | cfg = desc->chip_data; | 3580 | cfg = desc->chip_data; |
| 3566 | if (assign_irq_vector(irq, cfg, mask)) | ||
| 3567 | return; | ||
| 3568 | |||
| 3569 | set_extra_move_desc(desc, mask); | ||
| 3570 | |||
| 3571 | cpus_and(tmp, cfg->domain, mask); | ||
| 3572 | dest = cpu_mask_to_apicid(tmp); | ||
| 3573 | 3581 | ||
| 3574 | dmar_msi_read(irq, &msg); | 3582 | dmar_msi_read(irq, &msg); |
| 3575 | 3583 | ||
| @@ -3579,7 +3587,6 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
| 3579 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3587 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
| 3580 | 3588 | ||
| 3581 | dmar_msi_write(irq, &msg); | 3589 | dmar_msi_write(irq, &msg); |
| 3582 | desc->affinity = mask; | ||
| 3583 | } | 3590 | } |
| 3584 | 3591 | ||
| 3585 | #endif /* CONFIG_SMP */ | 3592 | #endif /* CONFIG_SMP */ |
| @@ -3613,26 +3620,18 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
| 3613 | #ifdef CONFIG_HPET_TIMER | 3620 | #ifdef CONFIG_HPET_TIMER |
| 3614 | 3621 | ||
| 3615 | #ifdef CONFIG_SMP | 3622 | #ifdef CONFIG_SMP |
| 3616 | static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3623 | static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
| 3617 | { | 3624 | { |
| 3618 | struct irq_desc *desc = irq_to_desc(irq); | 3625 | struct irq_desc *desc = irq_to_desc(irq); |
| 3619 | struct irq_cfg *cfg; | 3626 | struct irq_cfg *cfg; |
| 3620 | struct msi_msg msg; | 3627 | struct msi_msg msg; |
| 3621 | unsigned int dest; | 3628 | unsigned int dest; |
| 3622 | cpumask_t tmp; | ||
| 3623 | 3629 | ||
| 3624 | cpus_and(tmp, mask, cpu_online_map); | 3630 | dest = set_desc_affinity(desc, mask); |
| 3625 | if (cpus_empty(tmp)) | 3631 | if (dest == BAD_APICID) |
| 3626 | return; | 3632 | return; |
| 3627 | 3633 | ||
| 3628 | cfg = desc->chip_data; | 3634 | cfg = desc->chip_data; |
| 3629 | if (assign_irq_vector(irq, cfg, mask)) | ||
| 3630 | return; | ||
| 3631 | |||
| 3632 | set_extra_move_desc(desc, mask); | ||
| 3633 | |||
| 3634 | cpus_and(tmp, cfg->domain, mask); | ||
| 3635 | dest = cpu_mask_to_apicid(tmp); | ||
| 3636 | 3635 | ||
| 3637 | hpet_msi_read(irq, &msg); | 3636 | hpet_msi_read(irq, &msg); |
| 3638 | 3637 | ||
| @@ -3642,7 +3641,6 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
| 3642 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3641 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
| 3643 | 3642 | ||
| 3644 | hpet_msi_write(irq, &msg); | 3643 | hpet_msi_write(irq, &msg); |
| 3645 | desc->affinity = mask; | ||
| 3646 | } | 3644 | } |
| 3647 | 3645 | ||
| 3648 | #endif /* CONFIG_SMP */ | 3646 | #endif /* CONFIG_SMP */ |
| @@ -3697,28 +3695,19 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
| 3697 | write_ht_irq_msg(irq, &msg); | 3695 | write_ht_irq_msg(irq, &msg); |
| 3698 | } | 3696 | } |
| 3699 | 3697 | ||
| 3700 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | 3698 | static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) |
| 3701 | { | 3699 | { |
| 3702 | struct irq_desc *desc = irq_to_desc(irq); | 3700 | struct irq_desc *desc = irq_to_desc(irq); |
| 3703 | struct irq_cfg *cfg; | 3701 | struct irq_cfg *cfg; |
| 3704 | unsigned int dest; | 3702 | unsigned int dest; |
| 3705 | cpumask_t tmp; | ||
| 3706 | 3703 | ||
| 3707 | cpus_and(tmp, mask, cpu_online_map); | 3704 | dest = set_desc_affinity(desc, mask); |
| 3708 | if (cpus_empty(tmp)) | 3705 | if (dest == BAD_APICID) |
| 3709 | return; | 3706 | return; |
| 3710 | 3707 | ||
| 3711 | cfg = desc->chip_data; | 3708 | cfg = desc->chip_data; |
| 3712 | if (assign_irq_vector(irq, cfg, mask)) | ||
| 3713 | return; | ||
| 3714 | |||
| 3715 | set_extra_move_desc(desc, mask); | ||
| 3716 | |||
| 3717 | cpus_and(tmp, cfg->domain, mask); | ||
| 3718 | dest = cpu_mask_to_apicid(tmp); | ||
| 3719 | 3709 | ||
| 3720 | target_ht_irq(irq, dest, cfg->vector); | 3710 | target_ht_irq(irq, dest, cfg->vector); |
| 3721 | desc->affinity = mask; | ||
| 3722 | } | 3711 | } |
| 3723 | 3712 | ||
| 3724 | #endif | 3713 | #endif |
| @@ -3738,17 +3727,14 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
| 3738 | { | 3727 | { |
| 3739 | struct irq_cfg *cfg; | 3728 | struct irq_cfg *cfg; |
| 3740 | int err; | 3729 | int err; |
| 3741 | cpumask_t tmp; | ||
| 3742 | 3730 | ||
| 3743 | cfg = irq_cfg(irq); | 3731 | cfg = irq_cfg(irq); |
| 3744 | tmp = TARGET_CPUS; | 3732 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); |
| 3745 | err = assign_irq_vector(irq, cfg, tmp); | ||
| 3746 | if (!err) { | 3733 | if (!err) { |
| 3747 | struct ht_irq_msg msg; | 3734 | struct ht_irq_msg msg; |
| 3748 | unsigned dest; | 3735 | unsigned dest; |
| 3749 | 3736 | ||
| 3750 | cpus_and(tmp, cfg->domain, tmp); | 3737 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
| 3751 | dest = cpu_mask_to_apicid(tmp); | ||
| 3752 | 3738 | ||
| 3753 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | 3739 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); |
| 3754 | 3740 | ||
| @@ -3784,7 +3770,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
| 3784 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | 3770 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, |
| 3785 | unsigned long mmr_offset) | 3771 | unsigned long mmr_offset) |
| 3786 | { | 3772 | { |
| 3787 | const cpumask_t *eligible_cpu = get_cpu_mask(cpu); | 3773 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
| 3788 | struct irq_cfg *cfg; | 3774 | struct irq_cfg *cfg; |
| 3789 | int mmr_pnode; | 3775 | int mmr_pnode; |
| 3790 | unsigned long mmr_value; | 3776 | unsigned long mmr_value; |
| @@ -3794,7 +3780,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
| 3794 | 3780 | ||
| 3795 | cfg = irq_cfg(irq); | 3781 | cfg = irq_cfg(irq); |
| 3796 | 3782 | ||
| 3797 | err = assign_irq_vector(irq, cfg, *eligible_cpu); | 3783 | err = assign_irq_vector(irq, cfg, eligible_cpu); |
| 3798 | if (err != 0) | 3784 | if (err != 0) |
| 3799 | return err; | 3785 | return err; |
| 3800 | 3786 | ||
| @@ -3813,7 +3799,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
| 3813 | entry->polarity = 0; | 3799 | entry->polarity = 0; |
| 3814 | entry->trigger = 0; | 3800 | entry->trigger = 0; |
| 3815 | entry->mask = 0; | 3801 | entry->mask = 0; |
| 3816 | entry->dest = cpu_mask_to_apicid(*eligible_cpu); | 3802 | entry->dest = cpu_mask_to_apicid(eligible_cpu); |
| 3817 | 3803 | ||
| 3818 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | 3804 | mmr_pnode = uv_blade_to_pnode(mmr_blade); |
| 3819 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 3805 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
| @@ -4024,7 +4010,7 @@ void __init setup_ioapic_dest(void) | |||
| 4024 | int pin, ioapic, irq, irq_entry; | 4010 | int pin, ioapic, irq, irq_entry; |
| 4025 | struct irq_desc *desc; | 4011 | struct irq_desc *desc; |
| 4026 | struct irq_cfg *cfg; | 4012 | struct irq_cfg *cfg; |
| 4027 | cpumask_t mask; | 4013 | const struct cpumask *mask; |
| 4028 | 4014 | ||
| 4029 | if (skip_ioapic_setup == 1) | 4015 | if (skip_ioapic_setup == 1) |
| 4030 | return; | 4016 | return; |
| @@ -4055,7 +4041,7 @@ void __init setup_ioapic_dest(void) | |||
| 4055 | */ | 4041 | */ |
| 4056 | if (desc->status & | 4042 | if (desc->status & |
| 4057 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 4043 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) |
| 4058 | mask = desc->affinity; | 4044 | mask = &desc->affinity; |
| 4059 | else | 4045 | else |
| 4060 | mask = TARGET_CPUS; | 4046 | mask = TARGET_CPUS; |
| 4061 | 4047 | ||
