aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/io_apic.c
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-12-16 20:33:52 -0500
committerMike Travis <travis@sgi.com>2008-12-16 20:40:56 -0500
commite7986739a76cde5079da08809d8bbc6878387ae0 (patch)
treedd99ed6af66d459fe164f75ded7f95262dc0fb0d /arch/x86/kernel/io_apic.c
parent36f5101a60de8f79c0d1ca06e50660bf5129e02c (diff)
x86 smp: modify send_IPI_mask interface to accept cpumask_t pointers
Impact: cleanup, change parameter passing * Change genapic interfaces to accept cpumask_t pointers where possible. * Modify external callers to use cpumask_t pointers in function calls. * Create new send_IPI_mask_allbutself which is the same as the send_IPI_mask functions but removes smp_processor_id() from list. This removes another common need for a temporary cpumask_t variable. * Functions that used a temp cpumask_t variable for: cpumask_t allbutme = cpu_online_map; cpu_clear(smp_processor_id(), allbutme); if (!cpus_empty(allbutme)) ... become: if (!cpus_equal(cpu_online_map, cpumask_of_cpu(cpu))) ... * Other minor code optimizations (like using cpus_clear instead of CPU_MASK_NONE, etc.) Applies to linux-2.6.tip/master. Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/io_apic.c')
-rw-r--r--arch/x86/kernel/io_apic.c145
1 files changed, 73 insertions, 72 deletions
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 3d7d0d55253f..7f23ce7f5518 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -231,7 +231,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq)
231 231
232#endif 232#endif
233 233
234static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) 234static inline void
235set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
235{ 236{
236} 237}
237 238
@@ -396,7 +397,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
396 } 397 }
397} 398}
398 399
399static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); 400static int
401assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
400 402
401static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, 403static void set_ioapic_affinity_irq_desc(struct irq_desc *desc,
402 const struct cpumask *mask) 404 const struct cpumask *mask)
@@ -412,13 +414,13 @@ static void set_ioapic_affinity_irq_desc(struct irq_desc *desc,
412 414
413 irq = desc->irq; 415 irq = desc->irq;
414 cfg = desc->chip_data; 416 cfg = desc->chip_data;
415 if (assign_irq_vector(irq, cfg, *mask)) 417 if (assign_irq_vector(irq, cfg, mask))
416 return; 418 return;
417 419
418 set_extra_move_desc(desc, *mask); 420 set_extra_move_desc(desc, mask);
419 421
420 cpumask_and(&tmp, &cfg->domain, mask); 422 cpumask_and(&tmp, &cfg->domain, mask);
421 dest = cpu_mask_to_apicid(tmp); 423 dest = cpu_mask_to_apicid(&tmp);
422 /* 424 /*
423 * Only the high 8 bits are valid. 425 * Only the high 8 bits are valid.
424 */ 426 */
@@ -1099,7 +1101,8 @@ void unlock_vector_lock(void)
1099 spin_unlock(&vector_lock); 1101 spin_unlock(&vector_lock);
1100} 1102}
1101 1103
1102static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) 1104static int
1105__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1103{ 1106{
1104 /* 1107 /*
1105 * NOTE! The local APIC isn't very good at handling 1108 * NOTE! The local APIC isn't very good at handling
@@ -1115,35 +1118,32 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
1115 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1118 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1116 unsigned int old_vector; 1119 unsigned int old_vector;
1117 int cpu; 1120 int cpu;
1121 cpumask_t tmp_mask;
1118 1122
1119 if ((cfg->move_in_progress) || cfg->move_cleanup_count) 1123 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1120 return -EBUSY; 1124 return -EBUSY;
1121 1125
1122 /* Only try and allocate irqs on cpus that are present */
1123 cpus_and(mask, mask, cpu_online_map);
1124
1125 old_vector = cfg->vector; 1126 old_vector = cfg->vector;
1126 if (old_vector) { 1127 if (old_vector) {
1127 cpumask_t tmp; 1128 cpus_and(tmp_mask, *mask, cpu_online_map);
1128 cpus_and(tmp, cfg->domain, mask); 1129 cpus_and(tmp_mask, cfg->domain, tmp_mask);
1129 if (!cpus_empty(tmp)) 1130 if (!cpus_empty(tmp_mask))
1130 return 0; 1131 return 0;
1131 } 1132 }
1132 1133
1133 for_each_cpu_mask_nr(cpu, mask) { 1134 /* Only try and allocate irqs on cpus that are present */
1134 cpumask_t domain, new_mask; 1135 for_each_cpu_and(cpu, mask, &cpu_online_map) {
1135 int new_cpu; 1136 int new_cpu;
1136 int vector, offset; 1137 int vector, offset;
1137 1138
1138 domain = vector_allocation_domain(cpu); 1139 vector_allocation_domain(cpu, &tmp_mask);
1139 cpus_and(new_mask, domain, cpu_online_map);
1140 1140
1141 vector = current_vector; 1141 vector = current_vector;
1142 offset = current_offset; 1142 offset = current_offset;
1143next: 1143next:
1144 vector += 8; 1144 vector += 8;
1145 if (vector >= first_system_vector) { 1145 if (vector >= first_system_vector) {
1146 /* If we run out of vectors on large boxen, must share them. */ 1146 /* If out of vectors on large boxen, must share them. */
1147 offset = (offset + 1) % 8; 1147 offset = (offset + 1) % 8;
1148 vector = FIRST_DEVICE_VECTOR + offset; 1148 vector = FIRST_DEVICE_VECTOR + offset;
1149 } 1149 }
@@ -1156,7 +1156,7 @@ next:
1156 if (vector == SYSCALL_VECTOR) 1156 if (vector == SYSCALL_VECTOR)
1157 goto next; 1157 goto next;
1158#endif 1158#endif
1159 for_each_cpu_mask_nr(new_cpu, new_mask) 1159 for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map)
1160 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1160 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1161 goto next; 1161 goto next;
1162 /* Found one! */ 1162 /* Found one! */
@@ -1166,16 +1166,17 @@ next:
1166 cfg->move_in_progress = 1; 1166 cfg->move_in_progress = 1;
1167 cfg->old_domain = cfg->domain; 1167 cfg->old_domain = cfg->domain;
1168 } 1168 }
1169 for_each_cpu_mask_nr(new_cpu, new_mask) 1169 for_each_cpu_and(new_cpu, &tmp_mask, &cpu_online_map)
1170 per_cpu(vector_irq, new_cpu)[vector] = irq; 1170 per_cpu(vector_irq, new_cpu)[vector] = irq;
1171 cfg->vector = vector; 1171 cfg->vector = vector;
1172 cfg->domain = domain; 1172 cfg->domain = tmp_mask;
1173 return 0; 1173 return 0;
1174 } 1174 }
1175 return -ENOSPC; 1175 return -ENOSPC;
1176} 1176}
1177 1177
1178static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) 1178static int
1179assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1179{ 1180{
1180 int err; 1181 int err;
1181 unsigned long flags; 1182 unsigned long flags;
@@ -1384,8 +1385,8 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1384 1385
1385 cfg = desc->chip_data; 1386 cfg = desc->chip_data;
1386 1387
1387 mask = TARGET_CPUS; 1388 mask = *TARGET_CPUS;
1388 if (assign_irq_vector(irq, cfg, mask)) 1389 if (assign_irq_vector(irq, cfg, &mask))
1389 return; 1390 return;
1390 1391
1391 cpus_and(mask, cfg->domain, mask); 1392 cpus_and(mask, cfg->domain, mask);
@@ -1398,7 +1399,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
1398 1399
1399 1400
1400 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, 1401 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1401 cpu_mask_to_apicid(mask), trigger, polarity, 1402 cpu_mask_to_apicid(&mask), trigger, polarity,
1402 cfg->vector)) { 1403 cfg->vector)) {
1403 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1404 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1404 mp_ioapics[apic].mp_apicid, pin); 1405 mp_ioapics[apic].mp_apicid, pin);
@@ -2121,7 +2122,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
2121 unsigned long flags; 2122 unsigned long flags;
2122 2123
2123 spin_lock_irqsave(&vector_lock, flags); 2124 spin_lock_irqsave(&vector_lock, flags);
2124 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); 2125 send_IPI_mask(&cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
2125 spin_unlock_irqrestore(&vector_lock, flags); 2126 spin_unlock_irqrestore(&vector_lock, flags);
2126 2127
2127 return 1; 2128 return 1;
@@ -2170,18 +2171,19 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
2170 * as simple as edge triggered migration and we can do the irq migration 2171 * as simple as edge triggered migration and we can do the irq migration
2171 * with a simple atomic update to IO-APIC RTE. 2172 * with a simple atomic update to IO-APIC RTE.
2172 */ 2173 */
2173static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) 2174static void
2175migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2174{ 2176{
2175 struct irq_cfg *cfg; 2177 struct irq_cfg *cfg;
2176 cpumask_t tmp, cleanup_mask; 2178 cpumask_t tmpmask;
2177 struct irte irte; 2179 struct irte irte;
2178 int modify_ioapic_rte; 2180 int modify_ioapic_rte;
2179 unsigned int dest; 2181 unsigned int dest;
2180 unsigned long flags; 2182 unsigned long flags;
2181 unsigned int irq; 2183 unsigned int irq;
2182 2184
2183 cpus_and(tmp, mask, cpu_online_map); 2185 cpus_and(tmpmask, *mask, cpu_online_map);
2184 if (cpus_empty(tmp)) 2186 if (cpus_empty(tmpmask))
2185 return; 2187 return;
2186 2188
2187 irq = desc->irq; 2189 irq = desc->irq;
@@ -2194,8 +2196,8 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
2194 2196
2195 set_extra_move_desc(desc, mask); 2197 set_extra_move_desc(desc, mask);
2196 2198
2197 cpus_and(tmp, cfg->domain, mask); 2199 cpus_and(tmpmask, cfg->domain, *mask);
2198 dest = cpu_mask_to_apicid(tmp); 2200 dest = cpu_mask_to_apicid(&tmpmask);
2199 2201
2200 modify_ioapic_rte = desc->status & IRQ_LEVEL; 2202 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2201 if (modify_ioapic_rte) { 2203 if (modify_ioapic_rte) {
@@ -2213,13 +2215,13 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
2213 modify_irte(irq, &irte); 2215 modify_irte(irq, &irte);
2214 2216
2215 if (cfg->move_in_progress) { 2217 if (cfg->move_in_progress) {
2216 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 2218 cpus_and(tmpmask, cfg->old_domain, cpu_online_map);
2217 cfg->move_cleanup_count = cpus_weight(cleanup_mask); 2219 cfg->move_cleanup_count = cpus_weight(tmpmask);
2218 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2220 send_IPI_mask(&tmpmask, IRQ_MOVE_CLEANUP_VECTOR);
2219 cfg->move_in_progress = 0; 2221 cfg->move_in_progress = 0;
2220 } 2222 }
2221 2223
2222 desc->affinity = mask; 2224 desc->affinity = *mask;
2223} 2225}
2224 2226
2225static int migrate_irq_remapped_level_desc(struct irq_desc *desc) 2227static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2241,7 +2243,7 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
2241 } 2243 }
2242 2244
2243 /* everthing is clear. we have right of way */ 2245 /* everthing is clear. we have right of way */
2244 migrate_ioapic_irq_desc(desc, desc->pending_mask); 2246 migrate_ioapic_irq_desc(desc, &desc->pending_mask);
2245 2247
2246 ret = 0; 2248 ret = 0;
2247 desc->status &= ~IRQ_MOVE_PENDING; 2249 desc->status &= ~IRQ_MOVE_PENDING;
@@ -2292,7 +2294,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2292 return; 2294 return;
2293 } 2295 }
2294 2296
2295 migrate_ioapic_irq_desc(desc, *mask); 2297 migrate_ioapic_irq_desc(desc, mask);
2296} 2298}
2297static void set_ir_ioapic_affinity_irq(unsigned int irq, 2299static void set_ir_ioapic_affinity_irq(unsigned int irq,
2298 const struct cpumask *mask) 2300 const struct cpumask *mask)
@@ -2359,7 +2361,7 @@ static void irq_complete_move(struct irq_desc **descp)
2359 2361
2360 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 2362 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2361 cfg->move_cleanup_count = cpus_weight(cleanup_mask); 2363 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2362 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2364 send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2363 cfg->move_in_progress = 0; 2365 cfg->move_in_progress = 0;
2364 } 2366 }
2365} 2367}
@@ -3089,13 +3091,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
3089 cpumask_t tmp; 3091 cpumask_t tmp;
3090 3092
3091 cfg = irq_cfg(irq); 3093 cfg = irq_cfg(irq);
3092 tmp = TARGET_CPUS; 3094 tmp = *TARGET_CPUS;
3093 err = assign_irq_vector(irq, cfg, tmp); 3095 err = assign_irq_vector(irq, cfg, &tmp);
3094 if (err) 3096 if (err)
3095 return err; 3097 return err;
3096 3098
3097 cpus_and(tmp, cfg->domain, tmp); 3099 cpus_and(tmp, cfg->domain, tmp);
3098 dest = cpu_mask_to_apicid(tmp); 3100 dest = cpu_mask_to_apicid(&tmp);
3099 3101
3100#ifdef CONFIG_INTR_REMAP 3102#ifdef CONFIG_INTR_REMAP
3101 if (irq_remapped(irq)) { 3103 if (irq_remapped(irq)) {
@@ -3161,13 +3163,13 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3161 return; 3163 return;
3162 3164
3163 cfg = desc->chip_data; 3165 cfg = desc->chip_data;
3164 if (assign_irq_vector(irq, cfg, *mask)) 3166 if (assign_irq_vector(irq, cfg, mask))
3165 return; 3167 return;
3166 3168
3167 set_extra_move_desc(desc, *mask); 3169 set_extra_move_desc(desc, mask);
3168 3170
3169 cpumask_and(&tmp, &cfg->domain, mask); 3171 cpumask_and(&tmp, &cfg->domain, mask);
3170 dest = cpu_mask_to_apicid(tmp); 3172 dest = cpu_mask_to_apicid(&tmp);
3171 3173
3172 read_msi_msg_desc(desc, &msg); 3174 read_msi_msg_desc(desc, &msg);
3173 3175
@@ -3184,8 +3186,8 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3184 * Migrate the MSI irq to another cpumask. This migration is 3186 * Migrate the MSI irq to another cpumask. This migration is
3185 * done in the process context using interrupt-remapping hardware. 3187 * done in the process context using interrupt-remapping hardware.
3186 */ 3188 */
3187static void ir_set_msi_irq_affinity(unsigned int irq, 3189static void
3188 const struct cpumask *mask) 3190ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3189{ 3191{
3190 struct irq_desc *desc = irq_to_desc(irq); 3192 struct irq_desc *desc = irq_to_desc(irq);
3191 struct irq_cfg *cfg; 3193 struct irq_cfg *cfg;
@@ -3200,13 +3202,13 @@ static void ir_set_msi_irq_affinity(unsigned int irq,
3200 return; 3202 return;
3201 3203
3202 cfg = desc->chip_data; 3204 cfg = desc->chip_data;
3203 if (assign_irq_vector(irq, cfg, *mask)) 3205 if (assign_irq_vector(irq, cfg, mask))
3204 return; 3206 return;
3205 3207
3206 set_extra_move_desc(desc, *mask); 3208 set_extra_move_desc(desc, mask);
3207 3209
3208 cpumask_and(&tmp, &cfg->domain, mask); 3210 cpumask_and(&tmp, &cfg->domain, mask);
3209 dest = cpu_mask_to_apicid(tmp); 3211 dest = cpu_mask_to_apicid(&tmp);
3210 3212
3211 irte.vector = cfg->vector; 3213 irte.vector = cfg->vector;
3212 irte.dest_id = IRTE_DEST(dest); 3214 irte.dest_id = IRTE_DEST(dest);
@@ -3224,7 +3226,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq,
3224 if (cfg->move_in_progress) { 3226 if (cfg->move_in_progress) {
3225 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 3227 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
3226 cfg->move_cleanup_count = cpus_weight(cleanup_mask); 3228 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3227 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 3229 send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3228 cfg->move_in_progress = 0; 3230 cfg->move_in_progress = 0;
3229 } 3231 }
3230 3232
@@ -3419,7 +3421,7 @@ void arch_teardown_msi_irq(unsigned int irq)
3419 3421
3420#ifdef CONFIG_DMAR 3422#ifdef CONFIG_DMAR
3421#ifdef CONFIG_SMP 3423#ifdef CONFIG_SMP
3422static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 3424static void dmar_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
3423{ 3425{
3424 struct irq_desc *desc = irq_to_desc(irq); 3426 struct irq_desc *desc = irq_to_desc(irq);
3425 struct irq_cfg *cfg; 3427 struct irq_cfg *cfg;
@@ -3431,13 +3433,13 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3431 return; 3433 return;
3432 3434
3433 cfg = desc->chip_data; 3435 cfg = desc->chip_data;
3434 if (assign_irq_vector(irq, cfg, *mask)) 3436 if (assign_irq_vector(irq, cfg, mask))
3435 return; 3437 return;
3436 3438
3437 set_extra_move_desc(desc, *mask); 3439 set_extra_move_desc(desc, mask);
3438 3440
3439 cpumask_and(&tmp, &cfg->domain, mask); 3441 cpumask_and(&tmp, &cfg->domain, mask);
3440 dest = cpu_mask_to_apicid(tmp); 3442 dest = cpu_mask_to_apicid(&tmp);
3441 3443
3442 dmar_msi_read(irq, &msg); 3444 dmar_msi_read(irq, &msg);
3443 3445
@@ -3481,7 +3483,7 @@ int arch_setup_dmar_msi(unsigned int irq)
3481#ifdef CONFIG_HPET_TIMER 3483#ifdef CONFIG_HPET_TIMER
3482 3484
3483#ifdef CONFIG_SMP 3485#ifdef CONFIG_SMP
3484static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) 3486static void hpet_msi_set_affinity(unsigned int irq, const cpumask_t *mask)
3485{ 3487{
3486 struct irq_desc *desc = irq_to_desc(irq); 3488 struct irq_desc *desc = irq_to_desc(irq);
3487 struct irq_cfg *cfg; 3489 struct irq_cfg *cfg;
@@ -3493,13 +3495,13 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3493 return; 3495 return;
3494 3496
3495 cfg = desc->chip_data; 3497 cfg = desc->chip_data;
3496 if (assign_irq_vector(irq, cfg, *mask)) 3498 if (assign_irq_vector(irq, cfg, mask))
3497 return; 3499 return;
3498 3500
3499 set_extra_move_desc(desc, *mask); 3501 set_extra_move_desc(desc, mask);
3500 3502
3501 cpumask_and(&tmp, &cfg->domain, mask); 3503 cpumask_and(&tmp, &cfg->domain, mask);
3502 dest = cpu_mask_to_apicid(tmp); 3504 dest = cpu_mask_to_apicid(&tmp);
3503 3505
3504 hpet_msi_read(irq, &msg); 3506 hpet_msi_read(irq, &msg);
3505 3507
@@ -3564,7 +3566,7 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3564 write_ht_irq_msg(irq, &msg); 3566 write_ht_irq_msg(irq, &msg);
3565} 3567}
3566 3568
3567static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) 3569static void set_ht_irq_affinity(unsigned int irq, const cpumask_t *mask)
3568{ 3570{
3569 struct irq_desc *desc = irq_to_desc(irq); 3571 struct irq_desc *desc = irq_to_desc(irq);
3570 struct irq_cfg *cfg; 3572 struct irq_cfg *cfg;
@@ -3575,13 +3577,13 @@ static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3575 return; 3577 return;
3576 3578
3577 cfg = desc->chip_data; 3579 cfg = desc->chip_data;
3578 if (assign_irq_vector(irq, cfg, *mask)) 3580 if (assign_irq_vector(irq, cfg, mask))
3579 return; 3581 return;
3580 3582
3581 set_extra_move_desc(desc, *mask); 3583 set_extra_move_desc(desc, mask);
3582 3584
3583 cpumask_and(&tmp, &cfg->domain, mask); 3585 cpumask_and(&tmp, &cfg->domain, mask);
3584 dest = cpu_mask_to_apicid(tmp); 3586 dest = cpu_mask_to_apicid(&tmp);
3585 3587
3586 target_ht_irq(irq, dest, cfg->vector); 3588 target_ht_irq(irq, dest, cfg->vector);
3587 cpumask_copy(&desc->affinity, mask); 3589 cpumask_copy(&desc->affinity, mask);
@@ -3607,14 +3609,13 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3607 cpumask_t tmp; 3609 cpumask_t tmp;
3608 3610
3609 cfg = irq_cfg(irq); 3611 cfg = irq_cfg(irq);
3610 tmp = TARGET_CPUS; 3612 err = assign_irq_vector(irq, cfg, TARGET_CPUS);
3611 err = assign_irq_vector(irq, cfg, tmp);
3612 if (!err) { 3613 if (!err) {
3613 struct ht_irq_msg msg; 3614 struct ht_irq_msg msg;
3614 unsigned dest; 3615 unsigned dest;
3615 3616
3616 cpus_and(tmp, cfg->domain, tmp); 3617 cpus_and(tmp, cfg->domain, tmp);
3617 dest = cpu_mask_to_apicid(tmp); 3618 dest = cpu_mask_to_apicid(&tmp);
3618 3619
3619 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3620 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3620 3621
@@ -3650,7 +3651,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3650int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, 3651int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3651 unsigned long mmr_offset) 3652 unsigned long mmr_offset)
3652{ 3653{
3653 const cpumask_t *eligible_cpu = get_cpu_mask(cpu); 3654 const cpumask_t *eligible_cpu = &cpumask_of_cpu(cpu);
3654 struct irq_cfg *cfg; 3655 struct irq_cfg *cfg;
3655 int mmr_pnode; 3656 int mmr_pnode;
3656 unsigned long mmr_value; 3657 unsigned long mmr_value;
@@ -3660,7 +3661,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3660 3661
3661 cfg = irq_cfg(irq); 3662 cfg = irq_cfg(irq);
3662 3663
3663 err = assign_irq_vector(irq, cfg, *eligible_cpu); 3664 err = assign_irq_vector(irq, cfg, eligible_cpu);
3664 if (err != 0) 3665 if (err != 0)
3665 return err; 3666 return err;
3666 3667
@@ -3679,7 +3680,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
3679 entry->polarity = 0; 3680 entry->polarity = 0;
3680 entry->trigger = 0; 3681 entry->trigger = 0;
3681 entry->mask = 0; 3682 entry->mask = 0;
3682 entry->dest = cpu_mask_to_apicid(*eligible_cpu); 3683 entry->dest = cpu_mask_to_apicid(eligible_cpu);
3683 3684
3684 mmr_pnode = uv_blade_to_pnode(mmr_blade); 3685 mmr_pnode = uv_blade_to_pnode(mmr_blade);
3685 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); 3686 uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -3890,7 +3891,7 @@ void __init setup_ioapic_dest(void)
3890 int pin, ioapic, irq, irq_entry; 3891 int pin, ioapic, irq, irq_entry;
3891 struct irq_desc *desc; 3892 struct irq_desc *desc;
3892 struct irq_cfg *cfg; 3893 struct irq_cfg *cfg;
3893 cpumask_t mask; 3894 const cpumask_t *mask;
3894 3895
3895 if (skip_ioapic_setup == 1) 3896 if (skip_ioapic_setup == 1)
3896 return; 3897 return;
@@ -3921,16 +3922,16 @@ void __init setup_ioapic_dest(void)
3921 */ 3922 */
3922 if (desc->status & 3923 if (desc->status &
3923 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 3924 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
3924 mask = desc->affinity; 3925 mask = &desc->affinity;
3925 else 3926 else
3926 mask = TARGET_CPUS; 3927 mask = TARGET_CPUS;
3927 3928
3928#ifdef CONFIG_INTR_REMAP 3929#ifdef CONFIG_INTR_REMAP
3929 if (intr_remapping_enabled) 3930 if (intr_remapping_enabled)
3930 set_ir_ioapic_affinity_irq_desc(desc, &mask); 3931 set_ir_ioapic_affinity_irq_desc(desc, mask);
3931 else 3932 else
3932#endif 3933#endif
3933 set_ioapic_affinity_irq_desc(desc, &mask); 3934 set_ioapic_affinity_irq_desc(desc, mask);
3934 } 3935 }
3935 3936
3936 } 3937 }