diff options
| -rw-r--r-- | arch/alpha/kernel/irq.c | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/irq.c | 18 | ||||
| -rw-r--r-- | arch/arm/oprofile/op_model_mpcore.c | 2 | ||||
| -rw-r--r-- | arch/blackfin/kernel/irqchip.c | 5 | ||||
| -rw-r--r-- | arch/ia64/kernel/iosapic.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/irq.c | 4 | ||||
| -rw-r--r-- | arch/ia64/kernel/msi_ia64.c | 4 | ||||
| -rw-r--r-- | arch/ia64/sn/kernel/msi_sn.c | 2 | ||||
| -rw-r--r-- | arch/mips/include/asm/irq.h | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/irq-gic.c | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/smtc.c | 2 | ||||
| -rw-r--r-- | arch/mips/mti-malta/malta-smtc.c | 5 | ||||
| -rw-r--r-- | arch/parisc/kernel/irq.c | 8 | ||||
| -rw-r--r-- | arch/powerpc/kernel/irq.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/platforms/pseries/xics.c | 5 | ||||
| -rw-r--r-- | arch/powerpc/sysdev/mpic.c | 3 | ||||
| -rw-r--r-- | arch/sparc/kernel/irq_64.c | 5 |
17 files changed, 44 insertions, 29 deletions
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 703731accda6..7bc7489223f3 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
| @@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq) | |||
| 55 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); | 55 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); |
| 56 | last_cpu = cpu; | 56 | last_cpu = cpu; |
| 57 | 57 | ||
| 58 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); | 58 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); |
| 59 | irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); | 59 | irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); |
| 60 | return 0; | 60 | return 0; |
| 61 | } | 61 | } |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 7141cee1fab7..4bb723eadad1 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
| @@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = { | |||
| 104 | .lock = SPIN_LOCK_UNLOCKED | 104 | .lock = SPIN_LOCK_UNLOCKED |
| 105 | }; | 105 | }; |
| 106 | 106 | ||
| 107 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
| 108 | /* We are not allocating bad_irq_desc.affinity or .pending_mask */ | ||
| 109 | #error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK." | ||
| 110 | #endif | ||
| 111 | |||
| 107 | /* | 112 | /* |
| 108 | * do_IRQ handles all hardware IRQ's. Decoded IRQs should not | 113 | * do_IRQ handles all hardware IRQ's. Decoded IRQs should not |
| 109 | * come via this function. Instead, they should provide their | 114 | * come via this function. Instead, they should provide their |
| @@ -161,7 +166,7 @@ void __init init_IRQ(void) | |||
| 161 | irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; | 166 | irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; |
| 162 | 167 | ||
| 163 | #ifdef CONFIG_SMP | 168 | #ifdef CONFIG_SMP |
| 164 | bad_irq_desc.affinity = CPU_MASK_ALL; | 169 | cpumask_setall(bad_irq_desc.affinity); |
| 165 | bad_irq_desc.cpu = smp_processor_id(); | 170 | bad_irq_desc.cpu = smp_processor_id(); |
| 166 | #endif | 171 | #endif |
| 167 | init_arch_irq(); | 172 | init_arch_irq(); |
| @@ -191,15 +196,16 @@ void migrate_irqs(void) | |||
| 191 | struct irq_desc *desc = irq_desc + i; | 196 | struct irq_desc *desc = irq_desc + i; |
| 192 | 197 | ||
| 193 | if (desc->cpu == cpu) { | 198 | if (desc->cpu == cpu) { |
| 194 | unsigned int newcpu = any_online_cpu(desc->affinity); | 199 | unsigned int newcpu = cpumask_any_and(desc->affinity, |
| 195 | 200 | cpu_online_mask); | |
| 196 | if (newcpu == NR_CPUS) { | 201 | if (newcpu >= nr_cpu_ids) { |
| 197 | if (printk_ratelimit()) | 202 | if (printk_ratelimit()) |
| 198 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", | 203 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", |
| 199 | i, cpu); | 204 | i, cpu); |
| 200 | 205 | ||
| 201 | cpus_setall(desc->affinity); | 206 | cpumask_setall(desc->affinity); |
| 202 | newcpu = any_online_cpu(desc->affinity); | 207 | newcpu = cpumask_any_and(desc->affinity, |
| 208 | cpu_online_mask); | ||
| 203 | } | 209 | } |
| 204 | 210 | ||
| 205 | route_irq(desc, i, newcpu); | 211 | route_irq(desc, i, newcpu); |
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c index 6d6bd5899240..853d42bb8682 100644 --- a/arch/arm/oprofile/op_model_mpcore.c +++ b/arch/arm/oprofile/op_model_mpcore.c | |||
| @@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu) | |||
| 263 | const struct cpumask *mask = cpumask_of(cpu); | 263 | const struct cpumask *mask = cpumask_of(cpu); |
| 264 | 264 | ||
| 265 | spin_lock_irq(&desc->lock); | 265 | spin_lock_irq(&desc->lock); |
| 266 | desc->affinity = *mask; | 266 | cpumask_copy(desc->affinity, mask); |
| 267 | desc->chip->set_affinity(irq, mask); | 267 | desc->chip->set_affinity(irq, mask); |
| 268 | spin_unlock_irq(&desc->lock); | 268 | spin_unlock_irq(&desc->lock); |
| 269 | } | 269 | } |
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index ab8209cbbad0..5780d6df1542 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c | |||
| @@ -69,6 +69,11 @@ static struct irq_desc bad_irq_desc = { | |||
| 69 | #endif | 69 | #endif |
| 70 | }; | 70 | }; |
| 71 | 71 | ||
| 72 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
| 73 | /* We are not allocating a variable-sized bad_irq_desc.affinity */ | ||
| 74 | #error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK." | ||
| 75 | #endif | ||
| 76 | |||
| 72 | int show_interrupts(struct seq_file *p, void *v) | 77 | int show_interrupts(struct seq_file *p, void *v) |
| 73 | { | 78 | { |
| 74 | int i = *(loff_t *) v, j; | 79 | int i = *(loff_t *) v, j; |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 5cfd3d91001a..006ad366a454 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
| @@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi) | |||
| 880 | if (iosapic_intr_info[irq].count == 0) { | 880 | if (iosapic_intr_info[irq].count == 0) { |
| 881 | #ifdef CONFIG_SMP | 881 | #ifdef CONFIG_SMP |
| 882 | /* Clear affinity */ | 882 | /* Clear affinity */ |
| 883 | cpus_setall(idesc->affinity); | 883 | cpumask_setall(idesc->affinity); |
| 884 | #endif | 884 | #endif |
| 885 | /* Clear the interrupt information */ | 885 | /* Clear the interrupt information */ |
| 886 | iosapic_intr_info[irq].dest = 0; | 886 | iosapic_intr_info[irq].dest = 0; |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index a58f64ca9f0e..226233a6fa19 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
| @@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; | |||
| 103 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | 103 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) |
| 104 | { | 104 | { |
| 105 | if (irq < NR_IRQS) { | 105 | if (irq < NR_IRQS) { |
| 106 | cpumask_copy(&irq_desc[irq].affinity, | 106 | cpumask_copy(irq_desc[irq].affinity, |
| 107 | cpumask_of(cpu_logical_id(hwid))); | 107 | cpumask_of(cpu_logical_id(hwid))); |
| 108 | irq_redir[irq] = (char) (redir & 0xff); | 108 | irq_redir[irq] = (char) (redir & 0xff); |
| 109 | } | 109 | } |
| @@ -148,7 +148,7 @@ static void migrate_irqs(void) | |||
| 148 | if (desc->status == IRQ_PER_CPU) | 148 | if (desc->status == IRQ_PER_CPU) |
| 149 | continue; | 149 | continue; |
| 150 | 150 | ||
| 151 | if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) | 151 | if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask) |
| 152 | >= nr_cpu_ids) { | 152 | >= nr_cpu_ids) { |
| 153 | /* | 153 | /* |
| 154 | * Save it for phase 2 processing | 154 | * Save it for phase 2 processing |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 890339339035..dcb6b7c51ea7 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
| @@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq, | |||
| 75 | msg.data = data; | 75 | msg.data = data; |
| 76 | 76 | ||
| 77 | write_msi_msg(irq, &msg); | 77 | write_msi_msg(irq, &msg); |
| 78 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); | 78 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); |
| 79 | } | 79 | } |
| 80 | #endif /* CONFIG_SMP */ | 80 | #endif /* CONFIG_SMP */ |
| 81 | 81 | ||
| @@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
| 187 | msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); | 187 | msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); |
| 188 | 188 | ||
| 189 | dmar_msi_write(irq, &msg); | 189 | dmar_msi_write(irq, &msg); |
| 190 | irq_desc[irq].affinity = *mask; | 190 | cpumask_copy(irq_desc[irq].affinity, mask); |
| 191 | } | 191 | } |
| 192 | #endif /* CONFIG_SMP */ | 192 | #endif /* CONFIG_SMP */ |
| 193 | 193 | ||
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index ca553b0429ce..81e428943d73 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c | |||
| @@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, | |||
| 205 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); | 205 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); |
| 206 | 206 | ||
| 207 | write_msi_msg(irq, &msg); | 207 | write_msi_msg(irq, &msg); |
| 208 | irq_desc[irq].affinity = *cpu_mask; | 208 | cpumask_copy(irq_desc[irq].affinity, cpu_mask); |
| 209 | } | 209 | } |
| 210 | #endif /* CONFIG_SMP */ | 210 | #endif /* CONFIG_SMP */ |
| 211 | 211 | ||
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index abc62aa744ac..3214ade02d10 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h | |||
| @@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq); | |||
| 66 | */ | 66 | */ |
| 67 | #define IRQ_AFFINITY_HOOK(irq) \ | 67 | #define IRQ_AFFINITY_HOOK(irq) \ |
| 68 | do { \ | 68 | do { \ |
| 69 | if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \ | 69 | if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\ |
| 70 | smtc_forward_irq(irq); \ | 70 | smtc_forward_irq(irq); \ |
| 71 | irq_exit(); \ | 71 | irq_exit(); \ |
| 72 | return; \ | 72 | return; \ |
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 494a49a317e9..87deb8f6c458 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
| @@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 187 | set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); | 187 | set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); |
| 188 | 188 | ||
| 189 | } | 189 | } |
| 190 | irq_desc[irq].affinity = *cpumask; | 190 | cpumask_copy(irq_desc[irq].affinity, cpumask); |
| 191 | spin_unlock_irqrestore(&gic_lock, flags); | 191 | spin_unlock_irqrestore(&gic_lock, flags); |
| 192 | 192 | ||
| 193 | } | 193 | } |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index b6cca01ff82b..d2c1ab12425a 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
| @@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq) | |||
| 686 | * and efficiency, we just pick the easiest one to find. | 686 | * and efficiency, we just pick the easiest one to find. |
| 687 | */ | 687 | */ |
| 688 | 688 | ||
| 689 | target = first_cpu(irq_desc[irq].affinity); | 689 | target = cpumask_first(irq_desc[irq].affinity); |
| 690 | 690 | ||
| 691 | /* | 691 | /* |
| 692 | * We depend on the platform code to have correctly processed | 692 | * We depend on the platform code to have correctly processed |
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index aabd7274507b..5ba31888fefb 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c | |||
| @@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = { | |||
| 116 | 116 | ||
| 117 | void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) | 117 | void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) |
| 118 | { | 118 | { |
| 119 | cpumask_t tmask = *affinity; | 119 | cpumask_t tmask; |
| 120 | int cpu = 0; | 120 | int cpu = 0; |
| 121 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); | 121 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); |
| 122 | 122 | ||
| @@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) | |||
| 139 | * be made to forward to an offline "CPU". | 139 | * be made to forward to an offline "CPU". |
| 140 | */ | 140 | */ |
| 141 | 141 | ||
| 142 | cpumask_copy(&tmask, affinity); | ||
| 142 | for_each_cpu(cpu, affinity) { | 143 | for_each_cpu(cpu, affinity) { |
| 143 | if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) | 144 | if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) |
| 144 | cpu_clear(cpu, tmask); | 145 | cpu_clear(cpu, tmask); |
| 145 | } | 146 | } |
| 146 | irq_desc[irq].affinity = tmask; | 147 | cpumask_copy(irq_desc[irq].affinity, &tmask); |
| 147 | 148 | ||
| 148 | if (cpus_empty(tmask)) | 149 | if (cpus_empty(tmask)) |
| 149 | /* | 150 | /* |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index ac2c822928c7..49482806863f 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
| @@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest) | |||
| 120 | if (CHECK_IRQ_PER_CPU(irq)) { | 120 | if (CHECK_IRQ_PER_CPU(irq)) { |
| 121 | /* Bad linux design decision. The mask has already | 121 | /* Bad linux design decision. The mask has already |
| 122 | * been set; we must reset it */ | 122 | * been set; we must reset it */ |
| 123 | irq_desc[irq].affinity = CPU_MASK_ALL; | 123 | cpumask_setall(irq_desc[irq].affinity); |
| 124 | return -EINVAL; | 124 | return -EINVAL; |
| 125 | } | 125 | } |
| 126 | 126 | ||
| @@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) | |||
| 136 | if (cpu_check_affinity(irq, dest)) | 136 | if (cpu_check_affinity(irq, dest)) |
| 137 | return; | 137 | return; |
| 138 | 138 | ||
| 139 | irq_desc[irq].affinity = *dest; | 139 | cpumask_copy(irq_desc[irq].affinity, dest); |
| 140 | } | 140 | } |
| 141 | #endif | 141 | #endif |
| 142 | 142 | ||
| @@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide) | |||
| 295 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) | 295 | unsigned long txn_affinity_addr(unsigned int irq, int cpu) |
| 296 | { | 296 | { |
| 297 | #ifdef CONFIG_SMP | 297 | #ifdef CONFIG_SMP |
| 298 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); | 298 | cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu)); |
| 299 | #endif | 299 | #endif |
| 300 | 300 | ||
| 301 | return per_cpu(cpu_data, cpu).txn_addr; | 301 | return per_cpu(cpu_data, cpu).txn_addr; |
| @@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs) | |||
| 352 | irq = eirr_to_irq(eirr_val); | 352 | irq = eirr_to_irq(eirr_val); |
| 353 | 353 | ||
| 354 | #ifdef CONFIG_SMP | 354 | #ifdef CONFIG_SMP |
| 355 | dest = irq_desc[irq].affinity; | 355 | cpumask_copy(&dest, irq_desc[irq].affinity); |
| 356 | if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && | 356 | if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && |
| 357 | !cpu_isset(smp_processor_id(), dest)) { | 357 | !cpu_isset(smp_processor_id(), dest)) { |
| 358 | int cpu = first_cpu(dest); | 358 | int cpu = first_cpu(dest); |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 23b8b5e36f98..ad1e5ac721d8 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
| @@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map) | |||
| 231 | if (irq_desc[irq].status & IRQ_PER_CPU) | 231 | if (irq_desc[irq].status & IRQ_PER_CPU) |
| 232 | continue; | 232 | continue; |
| 233 | 233 | ||
| 234 | cpus_and(mask, irq_desc[irq].affinity, map); | 234 | cpumask_and(&mask, irq_desc[irq].affinity, &map); |
| 235 | if (any_online_cpu(mask) == NR_CPUS) { | 235 | if (any_online_cpu(mask) == NR_CPUS) { |
| 236 | printk("Breaking affinity for irq %i\n", irq); | 236 | printk("Breaking affinity for irq %i\n", irq); |
| 237 | mask = map; | 237 | mask = map; |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 84e058f1e1cc..80b513449f4c 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
| @@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check) | |||
| 153 | { | 153 | { |
| 154 | int server; | 154 | int server; |
| 155 | /* For the moment only implement delivery to all cpus or one cpu */ | 155 | /* For the moment only implement delivery to all cpus or one cpu */ |
| 156 | cpumask_t cpumask = irq_desc[virq].affinity; | 156 | cpumask_t cpumask; |
| 157 | cpumask_t tmp = CPU_MASK_NONE; | 157 | cpumask_t tmp = CPU_MASK_NONE; |
| 158 | 158 | ||
| 159 | cpumask_copy(&cpumask, irq_desc[virq].affinity); | ||
| 159 | if (!distribute_irqs) | 160 | if (!distribute_irqs) |
| 160 | return default_server; | 161 | return default_server; |
| 161 | 162 | ||
| @@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void) | |||
| 869 | virq, cpu); | 870 | virq, cpu); |
| 870 | 871 | ||
| 871 | /* Reset affinity to all cpus */ | 872 | /* Reset affinity to all cpus */ |
| 872 | irq_desc[virq].affinity = CPU_MASK_ALL; | 873 | cpumask_setall(irq_desc[virq].affinity); |
| 873 | desc->chip->set_affinity(virq, cpu_all_mask); | 874 | desc->chip->set_affinity(virq, cpu_all_mask); |
| 874 | unlock: | 875 | unlock: |
| 875 | spin_unlock_irqrestore(&desc->lock, flags); | 876 | spin_unlock_irqrestore(&desc->lock, flags); |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 3e0d89dcdba2..0afd21f9a222 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
| @@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic) | |||
| 566 | #ifdef CONFIG_SMP | 566 | #ifdef CONFIG_SMP |
| 567 | static int irq_choose_cpu(unsigned int virt_irq) | 567 | static int irq_choose_cpu(unsigned int virt_irq) |
| 568 | { | 568 | { |
| 569 | cpumask_t mask = irq_desc[virt_irq].affinity; | 569 | cpumask_t mask; |
| 570 | int cpuid; | 570 | int cpuid; |
| 571 | 571 | ||
| 572 | cpumask_copy(&mask, irq_desc[virt_irq].affinity); | ||
| 572 | if (cpus_equal(mask, CPU_MASK_ALL)) { | 573 | if (cpus_equal(mask, CPU_MASK_ALL)) { |
| 573 | static int irq_rover; | 574 | static int irq_rover; |
| 574 | static DEFINE_SPINLOCK(irq_rover_lock); | 575 | static DEFINE_SPINLOCK(irq_rover_lock); |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index cab8e0286871..4ac5c651e00d 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
| @@ -247,9 +247,10 @@ struct irq_handler_data { | |||
| 247 | #ifdef CONFIG_SMP | 247 | #ifdef CONFIG_SMP |
| 248 | static int irq_choose_cpu(unsigned int virt_irq) | 248 | static int irq_choose_cpu(unsigned int virt_irq) |
| 249 | { | 249 | { |
| 250 | cpumask_t mask = irq_desc[virt_irq].affinity; | 250 | cpumask_t mask; |
| 251 | int cpuid; | 251 | int cpuid; |
| 252 | 252 | ||
| 253 | cpumask_copy(&mask, irq_desc[virt_irq].affinity); | ||
| 253 | if (cpus_equal(mask, CPU_MASK_ALL)) { | 254 | if (cpus_equal(mask, CPU_MASK_ALL)) { |
| 254 | static int irq_rover; | 255 | static int irq_rover; |
| 255 | static DEFINE_SPINLOCK(irq_rover_lock); | 256 | static DEFINE_SPINLOCK(irq_rover_lock); |
| @@ -854,7 +855,7 @@ void fixup_irqs(void) | |||
| 854 | !(irq_desc[irq].status & IRQ_PER_CPU)) { | 855 | !(irq_desc[irq].status & IRQ_PER_CPU)) { |
| 855 | if (irq_desc[irq].chip->set_affinity) | 856 | if (irq_desc[irq].chip->set_affinity) |
| 856 | irq_desc[irq].chip->set_affinity(irq, | 857 | irq_desc[irq].chip->set_affinity(irq, |
| 857 | &irq_desc[irq].affinity); | 858 | irq_desc[irq].affinity); |
| 858 | } | 859 | } |
| 859 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); | 860 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); |
| 860 | } | 861 | } |
