diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-13 05:50:26 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-13 05:50:26 -0500 |
commit | 0de26520c7cabf36e1de090ea8092f011a6106ce (patch) | |
tree | 3d02e509b6315fdfd9cdb8c9e0b9ed0a30cf9384 | |
parent | 29c0177e6a4ac094302bed54a1d4bbb6b740a9ef (diff) |
cpumask: make irq_set_affinity() take a const struct cpumask
Impact: change existing irq_chip API
Not much point with gentle transition here: the struct irq_chip's
setaffinity method signature needs to change.
Fortunately, not widely used code, but hits a few architectures.
Note: In irq_select_affinity() I save a temporary in by mangling
irq_desc[irq].affinity directly. Ingo, does this break anything?
(Folded in fix from KOSAKI Motohiro)
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Reviewed-by: Grant Grundler <grundler@parisc-linux.org>
Acked-by: Ingo Molnar <mingo@redhat.com>
Cc: ralf@linux-mips.org
Cc: grundler@parisc-linux.org
Cc: jeremy@xensource.com
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
42 files changed, 171 insertions, 161 deletions
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index c626a821cdcb..d0f1620007f7 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq) | |||
55 | last_cpu = cpu; | 55 | last_cpu = cpu; |
56 | 56 | ||
57 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); | 57 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); |
58 | irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu)); | 58 | irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); |
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | #endif /* CONFIG_SMP */ | 61 | #endif /* CONFIG_SMP */ |
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index c71b0fd7a61f..ab44c164d9d4 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c | |||
@@ -177,19 +177,19 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
177 | } | 177 | } |
178 | 178 | ||
179 | static void | 179 | static void |
180 | dp264_set_affinity(unsigned int irq, cpumask_t affinity) | 180 | dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) |
181 | { | 181 | { |
182 | spin_lock(&dp264_irq_lock); | 182 | spin_lock(&dp264_irq_lock); |
183 | cpu_set_irq_affinity(irq, affinity); | 183 | cpu_set_irq_affinity(irq, *affinity); |
184 | tsunami_update_irq_hw(cached_irq_mask); | 184 | tsunami_update_irq_hw(cached_irq_mask); |
185 | spin_unlock(&dp264_irq_lock); | 185 | spin_unlock(&dp264_irq_lock); |
186 | } | 186 | } |
187 | 187 | ||
188 | static void | 188 | static void |
189 | clipper_set_affinity(unsigned int irq, cpumask_t affinity) | 189 | clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) |
190 | { | 190 | { |
191 | spin_lock(&dp264_irq_lock); | 191 | spin_lock(&dp264_irq_lock); |
192 | cpu_set_irq_affinity(irq - 16, affinity); | 192 | cpu_set_irq_affinity(irq - 16, *affinity); |
193 | tsunami_update_irq_hw(cached_irq_mask); | 193 | tsunami_update_irq_hw(cached_irq_mask); |
194 | spin_unlock(&dp264_irq_lock); | 194 | spin_unlock(&dp264_irq_lock); |
195 | } | 195 | } |
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 52c91ccc1648..27f840a4ad3d 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
@@ -158,10 +158,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
158 | } | 158 | } |
159 | 159 | ||
160 | static void | 160 | static void |
161 | titan_set_irq_affinity(unsigned int irq, cpumask_t affinity) | 161 | titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) |
162 | { | 162 | { |
163 | spin_lock(&titan_irq_lock); | 163 | spin_lock(&titan_irq_lock); |
164 | titan_cpu_set_irq_affinity(irq - 16, affinity); | 164 | titan_cpu_set_irq_affinity(irq - 16, *affinity); |
165 | titan_update_irq_hw(titan_cached_irq_mask); | 165 | titan_update_irq_hw(titan_cached_irq_mask); |
166 | spin_unlock(&titan_irq_lock); | 166 | spin_unlock(&titan_irq_lock); |
167 | } | 167 | } |
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 7fc9860a97d7..c6884ba1d5ed 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -109,11 +109,11 @@ static void gic_unmask_irq(unsigned int irq) | |||
109 | } | 109 | } |
110 | 110 | ||
111 | #ifdef CONFIG_SMP | 111 | #ifdef CONFIG_SMP |
112 | static void gic_set_cpu(unsigned int irq, cpumask_t mask_val) | 112 | static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) |
113 | { | 113 | { |
114 | void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); | 114 | void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); |
115 | unsigned int shift = (irq % 4) * 8; | 115 | unsigned int shift = (irq % 4) * 8; |
116 | unsigned int cpu = first_cpu(mask_val); | 116 | unsigned int cpu = cpumask_first(mask_val); |
117 | u32 val; | 117 | u32 val; |
118 | 118 | ||
119 | spin_lock(&irq_controller_lock); | 119 | spin_lock(&irq_controller_lock); |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2f3eb795fa6e..7141cee1fab7 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -174,7 +174,7 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) | |||
174 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); | 174 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); |
175 | 175 | ||
176 | spin_lock_irq(&desc->lock); | 176 | spin_lock_irq(&desc->lock); |
177 | desc->chip->set_affinity(irq, cpumask_of_cpu(cpu)); | 177 | desc->chip->set_affinity(irq, cpumask_of(cpu)); |
178 | spin_unlock_irq(&desc->lock); | 178 | spin_unlock_irq(&desc->lock); |
179 | } | 179 | } |
180 | 180 | ||
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c index 4de366e8b4c5..6d6bd5899240 100644 --- a/arch/arm/oprofile/op_model_mpcore.c +++ b/arch/arm/oprofile/op_model_mpcore.c | |||
@@ -260,10 +260,10 @@ static void em_stop(void) | |||
260 | static void em_route_irq(int irq, unsigned int cpu) | 260 | static void em_route_irq(int irq, unsigned int cpu) |
261 | { | 261 | { |
262 | struct irq_desc *desc = irq_desc + irq; | 262 | struct irq_desc *desc = irq_desc + irq; |
263 | cpumask_t mask = cpumask_of_cpu(cpu); | 263 | const struct cpumask *mask = cpumask_of(cpu); |
264 | 264 | ||
265 | spin_lock_irq(&desc->lock); | 265 | spin_lock_irq(&desc->lock); |
266 | desc->affinity = mask; | 266 | desc->affinity = *mask; |
267 | desc->chip->set_affinity(irq, mask); | 267 | desc->chip->set_affinity(irq, mask); |
268 | spin_unlock_irq(&desc->lock); | 268 | spin_unlock_irq(&desc->lock); |
269 | } | 269 | } |
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c index 173c141ac9ba..295131fee710 100644 --- a/arch/cris/arch-v32/kernel/irq.c +++ b/arch/cris/arch-v32/kernel/irq.c | |||
@@ -325,11 +325,11 @@ static void end_crisv32_irq(unsigned int irq) | |||
325 | { | 325 | { |
326 | } | 326 | } |
327 | 327 | ||
328 | void set_affinity_crisv32_irq(unsigned int irq, cpumask_t dest) | 328 | void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest) |
329 | { | 329 | { |
330 | unsigned long flags; | 330 | unsigned long flags; |
331 | spin_lock_irqsave(&irq_lock, flags); | 331 | spin_lock_irqsave(&irq_lock, flags); |
332 | irq_allocations[irq - FIRST_IRQ].mask = dest; | 332 | irq_allocations[irq - FIRST_IRQ].mask = *dest; |
333 | spin_unlock_irqrestore(&irq_lock, flags); | 333 | spin_unlock_irqrestore(&irq_lock, flags); |
334 | } | 334 | } |
335 | 335 | ||
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c index c2f58ff364e7..cc0a3182db3c 100644 --- a/arch/ia64/hp/sim/hpsim_irq.c +++ b/arch/ia64/hp/sim/hpsim_irq.c | |||
@@ -22,7 +22,7 @@ hpsim_irq_noop (unsigned int irq) | |||
22 | } | 22 | } |
23 | 23 | ||
24 | static void | 24 | static void |
25 | hpsim_set_affinity_noop (unsigned int a, cpumask_t b) | 25 | hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) |
26 | { | 26 | { |
27 | } | 27 | } |
28 | 28 | ||
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 5c4674ae8aea..c8adecd5b416 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -330,25 +330,25 @@ unmask_irq (unsigned int irq) | |||
330 | 330 | ||
331 | 331 | ||
332 | static void | 332 | static void |
333 | iosapic_set_affinity (unsigned int irq, cpumask_t mask) | 333 | iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) |
334 | { | 334 | { |
335 | #ifdef CONFIG_SMP | 335 | #ifdef CONFIG_SMP |
336 | u32 high32, low32; | 336 | u32 high32, low32; |
337 | int dest, rte_index; | 337 | int cpu, dest, rte_index; |
338 | int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; | 338 | int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; |
339 | struct iosapic_rte_info *rte; | 339 | struct iosapic_rte_info *rte; |
340 | struct iosapic *iosapic; | 340 | struct iosapic *iosapic; |
341 | 341 | ||
342 | irq &= (~IA64_IRQ_REDIRECTED); | 342 | irq &= (~IA64_IRQ_REDIRECTED); |
343 | 343 | ||
344 | cpus_and(mask, mask, cpu_online_map); | 344 | cpu = cpumask_first_and(cpu_online_mask, mask); |
345 | if (cpus_empty(mask)) | 345 | if (cpu >= nr_cpu_ids) |
346 | return; | 346 | return; |
347 | 347 | ||
348 | if (irq_prepare_move(irq, first_cpu(mask))) | 348 | if (irq_prepare_move(irq, cpu)) |
349 | return; | 349 | return; |
350 | 350 | ||
351 | dest = cpu_physical_id(first_cpu(mask)); | 351 | dest = cpu_physical_id(cpu); |
352 | 352 | ||
353 | if (!iosapic_intr_info[irq].count) | 353 | if (!iosapic_intr_info[irq].count) |
354 | return; /* not an IOSAPIC interrupt */ | 354 | return; /* not an IOSAPIC interrupt */ |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7fd18f54c056..0b6db53fedcf 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -133,7 +133,6 @@ unsigned int vectors_in_migration[NR_IRQS]; | |||
133 | */ | 133 | */ |
134 | static void migrate_irqs(void) | 134 | static void migrate_irqs(void) |
135 | { | 135 | { |
136 | cpumask_t mask; | ||
137 | irq_desc_t *desc; | 136 | irq_desc_t *desc; |
138 | int irq, new_cpu; | 137 | int irq, new_cpu; |
139 | 138 | ||
@@ -152,15 +151,14 @@ static void migrate_irqs(void) | |||
152 | if (desc->status == IRQ_PER_CPU) | 151 | if (desc->status == IRQ_PER_CPU) |
153 | continue; | 152 | continue; |
154 | 153 | ||
155 | cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); | 154 | if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) |
156 | if (any_online_cpu(mask) == NR_CPUS) { | 155 | >= nr_cpu_ids) { |
157 | /* | 156 | /* |
158 | * Save it for phase 2 processing | 157 | * Save it for phase 2 processing |
159 | */ | 158 | */ |
160 | vectors_in_migration[irq] = irq; | 159 | vectors_in_migration[irq] = irq; |
161 | 160 | ||
162 | new_cpu = any_online_cpu(cpu_online_map); | 161 | new_cpu = any_online_cpu(cpu_online_map); |
163 | mask = cpumask_of_cpu(new_cpu); | ||
164 | 162 | ||
165 | /* | 163 | /* |
166 | * Al three are essential, currently WARN_ON.. maybe panic? | 164 | * Al three are essential, currently WARN_ON.. maybe panic? |
@@ -168,7 +166,8 @@ static void migrate_irqs(void) | |||
168 | if (desc->chip && desc->chip->disable && | 166 | if (desc->chip && desc->chip->disable && |
169 | desc->chip->enable && desc->chip->set_affinity) { | 167 | desc->chip->enable && desc->chip->set_affinity) { |
170 | desc->chip->disable(irq); | 168 | desc->chip->disable(irq); |
171 | desc->chip->set_affinity(irq, mask); | 169 | desc->chip->set_affinity(irq, |
170 | cpumask_of(new_cpu)); | ||
172 | desc->chip->enable(irq); | 171 | desc->chip->enable(irq); |
173 | } else { | 172 | } else { |
174 | WARN_ON((!(desc->chip) || !(desc->chip->disable) || | 173 | WARN_ON((!(desc->chip) || !(desc->chip->disable) || |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 702a09c13238..890339339035 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -49,11 +49,12 @@ | |||
49 | static struct irq_chip ia64_msi_chip; | 49 | static struct irq_chip ia64_msi_chip; |
50 | 50 | ||
51 | #ifdef CONFIG_SMP | 51 | #ifdef CONFIG_SMP |
52 | static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | 52 | static void ia64_set_msi_irq_affinity(unsigned int irq, |
53 | const cpumask_t *cpu_mask) | ||
53 | { | 54 | { |
54 | struct msi_msg msg; | 55 | struct msi_msg msg; |
55 | u32 addr, data; | 56 | u32 addr, data; |
56 | int cpu = first_cpu(cpu_mask); | 57 | int cpu = first_cpu(*cpu_mask); |
57 | 58 | ||
58 | if (!cpu_online(cpu)) | 59 | if (!cpu_online(cpu)) |
59 | return; | 60 | return; |
@@ -166,12 +167,11 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
166 | 167 | ||
167 | #ifdef CONFIG_DMAR | 168 | #ifdef CONFIG_DMAR |
168 | #ifdef CONFIG_SMP | 169 | #ifdef CONFIG_SMP |
169 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | 170 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
170 | { | 171 | { |
171 | struct irq_cfg *cfg = irq_cfg + irq; | 172 | struct irq_cfg *cfg = irq_cfg + irq; |
172 | struct msi_msg msg; | 173 | struct msi_msg msg; |
173 | int cpu = first_cpu(mask); | 174 | int cpu = cpumask_first(mask); |
174 | |||
175 | 175 | ||
176 | if (!cpu_online(cpu)) | 176 | if (!cpu_online(cpu)) |
177 | return; | 177 | return; |
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
187 | msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); | 187 | msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); |
188 | 188 | ||
189 | dmar_msi_write(irq, &msg); | 189 | dmar_msi_write(irq, &msg); |
190 | irq_desc[irq].affinity = mask; | 190 | irq_desc[irq].affinity = *mask; |
191 | } | 191 | } |
192 | #endif /* CONFIG_SMP */ | 192 | #endif /* CONFIG_SMP */ |
193 | 193 | ||
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 4ede6e571c38..11463994a7d5 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -682,7 +682,7 @@ int migrate_platform_irqs(unsigned int cpu) | |||
682 | { | 682 | { |
683 | int new_cpei_cpu; | 683 | int new_cpei_cpu; |
684 | irq_desc_t *desc = NULL; | 684 | irq_desc_t *desc = NULL; |
685 | cpumask_t mask; | 685 | const struct cpumask *mask; |
686 | int retval = 0; | 686 | int retval = 0; |
687 | 687 | ||
688 | /* | 688 | /* |
@@ -695,7 +695,7 @@ int migrate_platform_irqs(unsigned int cpu) | |||
695 | * Now re-target the CPEI to a different processor | 695 | * Now re-target the CPEI to a different processor |
696 | */ | 696 | */ |
697 | new_cpei_cpu = any_online_cpu(cpu_online_map); | 697 | new_cpei_cpu = any_online_cpu(cpu_online_map); |
698 | mask = cpumask_of_cpu(new_cpei_cpu); | 698 | mask = cpumask_of(new_cpei_cpu); |
699 | set_cpei_target_cpu(new_cpei_cpu); | 699 | set_cpei_target_cpu(new_cpei_cpu); |
700 | desc = irq_desc + ia64_cpe_irq; | 700 | desc = irq_desc + ia64_cpe_irq; |
701 | /* | 701 | /* |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 0c66dbdd1d72..66fd705e82c0 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -227,14 +227,14 @@ finish_up: | |||
227 | return new_irq_info; | 227 | return new_irq_info; |
228 | } | 228 | } |
229 | 229 | ||
230 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | 230 | static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) |
231 | { | 231 | { |
232 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; | 232 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
233 | nasid_t nasid; | 233 | nasid_t nasid; |
234 | int slice; | 234 | int slice; |
235 | 235 | ||
236 | nasid = cpuid_to_nasid(first_cpu(mask)); | 236 | nasid = cpuid_to_nasid(cpumask_first(mask)); |
237 | slice = cpuid_to_slice(first_cpu(mask)); | 237 | slice = cpuid_to_slice(cpumask_first(mask)); |
238 | 238 | ||
239 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, | 239 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
240 | sn_irq_lh[irq], list) | 240 | sn_irq_lh[irq], list) |
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index 83f190ffe350..ca553b0429ce 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c | |||
@@ -151,7 +151,8 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) | |||
151 | } | 151 | } |
152 | 152 | ||
153 | #ifdef CONFIG_SMP | 153 | #ifdef CONFIG_SMP |
154 | static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | 154 | static void sn_set_msi_irq_affinity(unsigned int irq, |
155 | const struct cpumask *cpu_mask) | ||
155 | { | 156 | { |
156 | struct msi_msg msg; | 157 | struct msi_msg msg; |
157 | int slice; | 158 | int slice; |
@@ -164,7 +165,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | |||
164 | struct sn_pcibus_provider *provider; | 165 | struct sn_pcibus_provider *provider; |
165 | unsigned int cpu; | 166 | unsigned int cpu; |
166 | 167 | ||
167 | cpu = first_cpu(cpu_mask); | 168 | cpu = cpumask_first(cpu_mask); |
168 | sn_irq_info = sn_msi_info[irq].sn_irq_info; | 169 | sn_irq_info = sn_msi_info[irq].sn_irq_info; |
169 | if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) | 170 | if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) |
170 | return; | 171 | return; |
@@ -204,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | |||
204 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); | 205 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); |
205 | 206 | ||
206 | write_msi_msg(irq, &msg); | 207 | write_msi_msg(irq, &msg); |
207 | irq_desc[irq].affinity = cpu_mask; | 208 | irq_desc[irq].affinity = *cpu_mask; |
208 | } | 209 | } |
209 | #endif /* CONFIG_SMP */ | 210 | #endif /* CONFIG_SMP */ |
210 | 211 | ||
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index a58f0eecc68f..abc62aa744ac 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h | |||
@@ -49,7 +49,8 @@ static inline void smtc_im_ack_irq(unsigned int irq) | |||
49 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | 49 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
50 | #include <linux/cpumask.h> | 50 | #include <linux/cpumask.h> |
51 | 51 | ||
52 | extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); | 52 | extern void plat_set_irq_affinity(unsigned int irq, |
53 | const struct cpumask *affinity); | ||
53 | extern void smtc_forward_irq(unsigned int irq); | 54 | extern void smtc_forward_irq(unsigned int irq); |
54 | 55 | ||
55 | /* | 56 | /* |
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index 0a57f86945f1..d7e21bc8cd21 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c | |||
@@ -148,6 +148,6 @@ void __cpuinit sb1480_clockevent_init(void) | |||
148 | action->name = name; | 148 | action->name = name; |
149 | action->dev_id = cd; | 149 | action->dev_id = cd; |
150 | 150 | ||
151 | irq_set_affinity(irq, cpumask_of_cpu(cpu)); | 151 | irq_set_affinity(irq, cpumask_of(cpu)); |
152 | setup_irq(irq, action); | 152 | setup_irq(irq, action); |
153 | } | 153 | } |
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c index 63ac3ad462bc..0f188cd46e03 100644 --- a/arch/mips/kernel/cevt-sb1250.c +++ b/arch/mips/kernel/cevt-sb1250.c | |||
@@ -147,6 +147,6 @@ void __cpuinit sb1250_clockevent_init(void) | |||
147 | action->name = name; | 147 | action->name = name; |
148 | action->dev_id = cd; | 148 | action->dev_id = cd; |
149 | 149 | ||
150 | irq_set_affinity(irq, cpumask_of_cpu(cpu)); | 150 | irq_set_affinity(irq, cpumask_of(cpu)); |
151 | setup_irq(irq, action); | 151 | setup_irq(irq, action); |
152 | } | 152 | } |
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index f0a4bb19e096..494a49a317e9 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
@@ -155,7 +155,7 @@ static void gic_unmask_irq(unsigned int irq) | |||
155 | 155 | ||
156 | static DEFINE_SPINLOCK(gic_lock); | 156 | static DEFINE_SPINLOCK(gic_lock); |
157 | 157 | ||
158 | static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) | 158 | static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
159 | { | 159 | { |
160 | cpumask_t tmp = CPU_MASK_NONE; | 160 | cpumask_t tmp = CPU_MASK_NONE; |
161 | unsigned long flags; | 161 | unsigned long flags; |
@@ -164,7 +164,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
164 | pr_debug(KERN_DEBUG "%s called\n", __func__); | 164 | pr_debug(KERN_DEBUG "%s called\n", __func__); |
165 | irq -= _irqbase; | 165 | irq -= _irqbase; |
166 | 166 | ||
167 | cpus_and(tmp, cpumask, cpu_online_map); | 167 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
168 | if (cpus_empty(tmp)) | 168 | if (cpus_empty(tmp)) |
169 | return; | 169 | return; |
170 | 170 | ||
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
187 | set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); | 187 | set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); |
188 | 188 | ||
189 | } | 189 | } |
190 | irq_desc[irq].affinity = cpumask; | 190 | irq_desc[irq].affinity = *cpumask; |
191 | spin_unlock_irqrestore(&gic_lock, flags); | 191 | spin_unlock_irqrestore(&gic_lock, flags); |
192 | 192 | ||
193 | } | 193 | } |
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index f84a46a8ae6e..aabd7274507b 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c | |||
@@ -114,9 +114,9 @@ struct plat_smp_ops msmtc_smp_ops = { | |||
114 | */ | 114 | */ |
115 | 115 | ||
116 | 116 | ||
117 | void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) | 117 | void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) |
118 | { | 118 | { |
119 | cpumask_t tmask = affinity; | 119 | cpumask_t tmask = *affinity; |
120 | int cpu = 0; | 120 | int cpu = 0; |
121 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); | 121 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); |
122 | 122 | ||
@@ -139,7 +139,7 @@ void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
139 | * be made to forward to an offline "CPU". | 139 | * be made to forward to an offline "CPU". |
140 | */ | 140 | */ |
141 | 141 | ||
142 | for_each_cpu_mask(cpu, affinity) { | 142 | for_each_cpu(cpu, affinity) { |
143 | if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) | 143 | if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) |
144 | cpu_clear(cpu, tmask); | 144 | cpu_clear(cpu, tmask); |
145 | } | 145 | } |
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c index a35818ed4263..12b465d404df 100644 --- a/arch/mips/sibyte/bcm1480/irq.c +++ b/arch/mips/sibyte/bcm1480/irq.c | |||
@@ -50,7 +50,7 @@ static void enable_bcm1480_irq(unsigned int irq); | |||
50 | static void disable_bcm1480_irq(unsigned int irq); | 50 | static void disable_bcm1480_irq(unsigned int irq); |
51 | static void ack_bcm1480_irq(unsigned int irq); | 51 | static void ack_bcm1480_irq(unsigned int irq); |
52 | #ifdef CONFIG_SMP | 52 | #ifdef CONFIG_SMP |
53 | static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask); | 53 | static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask); |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_PCI | 56 | #ifdef CONFIG_PCI |
@@ -109,7 +109,7 @@ void bcm1480_unmask_irq(int cpu, int irq) | |||
109 | } | 109 | } |
110 | 110 | ||
111 | #ifdef CONFIG_SMP | 111 | #ifdef CONFIG_SMP |
112 | static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) | 112 | static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) |
113 | { | 113 | { |
114 | int i = 0, old_cpu, cpu, int_on, k; | 114 | int i = 0, old_cpu, cpu, int_on, k; |
115 | u64 cur_ints; | 115 | u64 cur_ints; |
@@ -117,11 +117,11 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) | |||
117 | unsigned long flags; | 117 | unsigned long flags; |
118 | unsigned int irq_dirty; | 118 | unsigned int irq_dirty; |
119 | 119 | ||
120 | if (cpus_weight(mask) != 1) { | 120 | if (cpumask_weight(mask) != 1) { |
121 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); | 121 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); |
122 | return; | 122 | return; |
123 | } | 123 | } |
124 | i = first_cpu(mask); | 124 | i = cpumask_first(mask); |
125 | 125 | ||
126 | /* Convert logical CPU to physical CPU */ | 126 | /* Convert logical CPU to physical CPU */ |
127 | cpu = cpu_logical_map(i); | 127 | cpu = cpu_logical_map(i); |
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index a5158483986e..808ac2959b8c 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c | |||
@@ -50,7 +50,7 @@ static void enable_sb1250_irq(unsigned int irq); | |||
50 | static void disable_sb1250_irq(unsigned int irq); | 50 | static void disable_sb1250_irq(unsigned int irq); |
51 | static void ack_sb1250_irq(unsigned int irq); | 51 | static void ack_sb1250_irq(unsigned int irq); |
52 | #ifdef CONFIG_SMP | 52 | #ifdef CONFIG_SMP |
53 | static void sb1250_set_affinity(unsigned int irq, cpumask_t mask); | 53 | static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask); |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_SIBYTE_HAS_LDT | 56 | #ifdef CONFIG_SIBYTE_HAS_LDT |
@@ -103,16 +103,16 @@ void sb1250_unmask_irq(int cpu, int irq) | |||
103 | } | 103 | } |
104 | 104 | ||
105 | #ifdef CONFIG_SMP | 105 | #ifdef CONFIG_SMP |
106 | static void sb1250_set_affinity(unsigned int irq, cpumask_t mask) | 106 | static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) |
107 | { | 107 | { |
108 | int i = 0, old_cpu, cpu, int_on; | 108 | int i = 0, old_cpu, cpu, int_on; |
109 | u64 cur_ints; | 109 | u64 cur_ints; |
110 | struct irq_desc *desc = irq_desc + irq; | 110 | struct irq_desc *desc = irq_desc + irq; |
111 | unsigned long flags; | 111 | unsigned long flags; |
112 | 112 | ||
113 | i = first_cpu(mask); | 113 | i = cpumask_first(mask); |
114 | 114 | ||
115 | if (cpus_weight(mask) > 1) { | 115 | if (cpumask_weight(mask) > 1) { |
116 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); | 116 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); |
117 | return; | 117 | return; |
118 | } | 118 | } |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 23ef950df008..4cea935e2f99 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -131,12 +131,12 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest) | |||
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
133 | 133 | ||
134 | static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) | 134 | static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) |
135 | { | 135 | { |
136 | if (cpu_check_affinity(irq, &dest)) | 136 | if (cpu_check_affinity(irq, dest)) |
137 | return; | 137 | return; |
138 | 138 | ||
139 | irq_desc[irq].affinity = dest; | 139 | irq_desc[irq].affinity = *dest; |
140 | } | 140 | } |
141 | #endif | 141 | #endif |
142 | 142 | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index ac222d0ab12e..23b8b5e36f98 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -237,7 +237,7 @@ void fixup_irqs(cpumask_t map) | |||
237 | mask = map; | 237 | mask = map; |
238 | } | 238 | } |
239 | if (irq_desc[irq].chip->set_affinity) | 239 | if (irq_desc[irq].chip->set_affinity) |
240 | irq_desc[irq].chip->set_affinity(irq, mask); | 240 | irq_desc[irq].chip->set_affinity(irq, &mask); |
241 | else if (irq_desc[irq].action && !(warned++)) | 241 | else if (irq_desc[irq].action && !(warned++)) |
242 | printk("Cannot set affinity for irq %i\n", irq); | 242 | printk("Cannot set affinity for irq %i\n", irq); |
243 | } | 243 | } |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 64d24310ce7e..424b335a71c8 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -332,7 +332,7 @@ static void xics_eoi_lpar(unsigned int virq) | |||
332 | lpar_xirr_info_set((0xff << 24) | irq); | 332 | lpar_xirr_info_set((0xff << 24) | irq); |
333 | } | 333 | } |
334 | 334 | ||
335 | static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | 335 | static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) |
336 | { | 336 | { |
337 | unsigned int irq; | 337 | unsigned int irq; |
338 | int status; | 338 | int status; |
@@ -358,7 +358,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | |||
358 | irq_server = get_irq_server(virq, 1); | 358 | irq_server = get_irq_server(virq, 1); |
359 | if (irq_server == -1) { | 359 | if (irq_server == -1) { |
360 | char cpulist[128]; | 360 | char cpulist[128]; |
361 | cpumask_scnprintf(cpulist, sizeof(cpulist), &cpumask); | 361 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); |
362 | printk(KERN_WARNING | 362 | printk(KERN_WARNING |
363 | "%s: No online cpus in the mask %s for irq %d\n", | 363 | "%s: No online cpus in the mask %s for irq %d\n", |
364 | __func__, cpulist, virq); | 364 | __func__, cpulist, virq); |
@@ -845,7 +845,7 @@ void xics_migrate_irqs_away(void) | |||
845 | 845 | ||
846 | /* Reset affinity to all cpus */ | 846 | /* Reset affinity to all cpus */ |
847 | irq_desc[virq].affinity = CPU_MASK_ALL; | 847 | irq_desc[virq].affinity = CPU_MASK_ALL; |
848 | desc->chip->set_affinity(virq, CPU_MASK_ALL); | 848 | desc->chip->set_affinity(virq, cpu_all_mask); |
849 | unlock: | 849 | unlock: |
850 | spin_unlock_irqrestore(&desc->lock, flags); | 850 | spin_unlock_irqrestore(&desc->lock, flags); |
851 | } | 851 | } |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 1890fb085cde..5d7f9f0c93c3 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -817,7 +817,7 @@ static void mpic_end_ipi(unsigned int irq) | |||
817 | 817 | ||
818 | #endif /* CONFIG_SMP */ | 818 | #endif /* CONFIG_SMP */ |
819 | 819 | ||
820 | void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) | 820 | void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
821 | { | 821 | { |
822 | struct mpic *mpic = mpic_from_irq(irq); | 822 | struct mpic *mpic = mpic_from_irq(irq); |
823 | unsigned int src = mpic_irq_to_hw(irq); | 823 | unsigned int src = mpic_irq_to_hw(irq); |
@@ -829,7 +829,7 @@ void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
829 | } else { | 829 | } else { |
830 | cpumask_t tmp; | 830 | cpumask_t tmp; |
831 | 831 | ||
832 | cpus_and(tmp, cpumask, cpu_online_map); | 832 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
833 | 833 | ||
834 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), | 834 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), |
835 | mpic_physmask(cpus_addr(tmp)[0])); | 835 | mpic_physmask(cpus_addr(tmp)[0])); |
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h index 6209c62a426d..3cef2af10f42 100644 --- a/arch/powerpc/sysdev/mpic.h +++ b/arch/powerpc/sysdev/mpic.h | |||
@@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic) | |||
36 | 36 | ||
37 | extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); | 37 | extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); |
38 | extern void mpic_set_vector(unsigned int virq, unsigned int vector); | 38 | extern void mpic_set_vector(unsigned int virq, unsigned int vector); |
39 | extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask); | 39 | extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
40 | 40 | ||
41 | #endif /* _POWERPC_SYSDEV_MPIC_H */ | 41 | #endif /* _POWERPC_SYSDEV_MPIC_H */ |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 52fc836f464d..4aaf18e83c8c 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -312,7 +312,8 @@ static void sun4u_irq_enable(unsigned int virt_irq) | |||
312 | } | 312 | } |
313 | } | 313 | } |
314 | 314 | ||
315 | static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask) | 315 | static void sun4u_set_affinity(unsigned int virt_irq, |
316 | const struct cpumask *mask) | ||
316 | { | 317 | { |
317 | sun4u_irq_enable(virt_irq); | 318 | sun4u_irq_enable(virt_irq); |
318 | } | 319 | } |
@@ -362,7 +363,8 @@ static void sun4v_irq_enable(unsigned int virt_irq) | |||
362 | ino, err); | 363 | ino, err); |
363 | } | 364 | } |
364 | 365 | ||
365 | static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) | 366 | static void sun4v_set_affinity(unsigned int virt_irq, |
367 | const struct cpumask *mask) | ||
366 | { | 368 | { |
367 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | 369 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; |
368 | unsigned long cpuid = irq_choose_cpu(virt_irq); | 370 | unsigned long cpuid = irq_choose_cpu(virt_irq); |
@@ -429,7 +431,8 @@ static void sun4v_virq_enable(unsigned int virt_irq) | |||
429 | dev_handle, dev_ino, err); | 431 | dev_handle, dev_ino, err); |
430 | } | 432 | } |
431 | 433 | ||
432 | static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) | 434 | static void sun4v_virt_set_affinity(unsigned int virt_irq, |
435 | const struct cpumask *mask) | ||
433 | { | 436 | { |
434 | unsigned long cpuid, dev_handle, dev_ino; | 437 | unsigned long cpuid, dev_handle, dev_ino; |
435 | int err; | 438 | int err; |
@@ -788,7 +791,7 @@ void fixup_irqs(void) | |||
788 | !(irq_desc[irq].status & IRQ_PER_CPU)) { | 791 | !(irq_desc[irq].status & IRQ_PER_CPU)) { |
789 | if (irq_desc[irq].chip->set_affinity) | 792 | if (irq_desc[irq].chip->set_affinity) |
790 | irq_desc[irq].chip->set_affinity(irq, | 793 | irq_desc[irq].chip->set_affinity(irq, |
791 | irq_desc[irq].affinity); | 794 | &irq_desc[irq].affinity); |
792 | } | 795 | } |
793 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); | 796 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); |
794 | } | 797 | } |
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c index 0f616ae3246c..df2efb7fc14c 100644 --- a/arch/sparc64/kernel/of_device.c +++ b/arch/sparc64/kernel/of_device.c | |||
@@ -780,7 +780,7 @@ out: | |||
780 | if (nid != -1) { | 780 | if (nid != -1) { |
781 | cpumask_t numa_mask = node_to_cpumask(nid); | 781 | cpumask_t numa_mask = node_to_cpumask(nid); |
782 | 782 | ||
783 | irq_set_affinity(irq, numa_mask); | 783 | irq_set_affinity(irq, &numa_mask); |
784 | } | 784 | } |
785 | 785 | ||
786 | return irq; | 786 | return irq; |
diff --git a/arch/sparc64/kernel/pci_msi.c b/arch/sparc64/kernel/pci_msi.c index 2e680f34f727..0d0cd815e83e 100644 --- a/arch/sparc64/kernel/pci_msi.c +++ b/arch/sparc64/kernel/pci_msi.c | |||
@@ -288,7 +288,7 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm, | |||
288 | if (nid != -1) { | 288 | if (nid != -1) { |
289 | cpumask_t numa_mask = node_to_cpumask(nid); | 289 | cpumask_t numa_mask = node_to_cpumask(nid); |
290 | 290 | ||
291 | irq_set_affinity(irq, numa_mask); | 291 | irq_set_affinity(irq, &numa_mask); |
292 | } | 292 | } |
293 | err = request_irq(irq, sparc64_msiq_interrupt, 0, | 293 | err = request_irq(irq, sparc64_msiq_interrupt, 0, |
294 | "MSIQ", | 294 | "MSIQ", |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 067d8de913f6..940f25851e1e 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -301,7 +301,7 @@ static void hpet_set_mode(enum clock_event_mode mode, | |||
301 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | 301 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); |
302 | hpet_setup_msi_irq(hdev->irq); | 302 | hpet_setup_msi_irq(hdev->irq); |
303 | disable_irq(hdev->irq); | 303 | disable_irq(hdev->irq); |
304 | irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu)); | 304 | irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); |
305 | enable_irq(hdev->irq); | 305 | enable_irq(hdev->irq); |
306 | } | 306 | } |
307 | break; | 307 | break; |
@@ -449,7 +449,7 @@ static int hpet_setup_irq(struct hpet_dev *dev) | |||
449 | return -1; | 449 | return -1; |
450 | 450 | ||
451 | disable_irq(dev->irq); | 451 | disable_irq(dev->irq); |
452 | irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu)); | 452 | irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); |
453 | enable_irq(dev->irq); | 453 | enable_irq(dev->irq); |
454 | 454 | ||
455 | printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", | 455 | printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 9043251210fb..1184210e6d0c 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -361,7 +361,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
361 | 361 | ||
362 | static int assign_irq_vector(int irq, cpumask_t mask); | 362 | static int assign_irq_vector(int irq, cpumask_t mask); |
363 | 363 | ||
364 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 364 | static void set_ioapic_affinity_irq(unsigned int irq, |
365 | const struct cpumask *mask) | ||
365 | { | 366 | { |
366 | struct irq_cfg *cfg; | 367 | struct irq_cfg *cfg; |
367 | unsigned long flags; | 368 | unsigned long flags; |
@@ -369,15 +370,14 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | |||
369 | cpumask_t tmp; | 370 | cpumask_t tmp; |
370 | struct irq_desc *desc; | 371 | struct irq_desc *desc; |
371 | 372 | ||
372 | cpus_and(tmp, mask, cpu_online_map); | 373 | if (!cpumask_intersects(mask, cpu_online_mask)) |
373 | if (cpus_empty(tmp)) | ||
374 | return; | 374 | return; |
375 | 375 | ||
376 | cfg = irq_cfg(irq); | 376 | cfg = irq_cfg(irq); |
377 | if (assign_irq_vector(irq, mask)) | 377 | if (assign_irq_vector(irq, *mask)) |
378 | return; | 378 | return; |
379 | 379 | ||
380 | cpus_and(tmp, cfg->domain, mask); | 380 | cpumask_and(&tmp, &cfg->domain, mask); |
381 | dest = cpu_mask_to_apicid(tmp); | 381 | dest = cpu_mask_to_apicid(tmp); |
382 | /* | 382 | /* |
383 | * Only the high 8 bits are valid. | 383 | * Only the high 8 bits are valid. |
@@ -387,7 +387,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | |||
387 | desc = irq_to_desc(irq); | 387 | desc = irq_to_desc(irq); |
388 | spin_lock_irqsave(&ioapic_lock, flags); | 388 | spin_lock_irqsave(&ioapic_lock, flags); |
389 | __target_IO_APIC_irq(irq, dest, cfg->vector); | 389 | __target_IO_APIC_irq(irq, dest, cfg->vector); |
390 | desc->affinity = mask; | 390 | cpumask_copy(&desc->affinity, mask); |
391 | spin_unlock_irqrestore(&ioapic_lock, flags); | 391 | spin_unlock_irqrestore(&ioapic_lock, flags); |
392 | } | 392 | } |
393 | #endif /* CONFIG_SMP */ | 393 | #endif /* CONFIG_SMP */ |
@@ -2189,7 +2189,7 @@ static void ir_irq_migration(struct work_struct *work) | |||
2189 | continue; | 2189 | continue; |
2190 | } | 2190 | } |
2191 | 2191 | ||
2192 | desc->chip->set_affinity(irq, desc->pending_mask); | 2192 | desc->chip->set_affinity(irq, &desc->pending_mask); |
2193 | spin_unlock_irqrestore(&desc->lock, flags); | 2193 | spin_unlock_irqrestore(&desc->lock, flags); |
2194 | } | 2194 | } |
2195 | } | 2195 | } |
@@ -2198,18 +2198,19 @@ static void ir_irq_migration(struct work_struct *work) | |||
2198 | /* | 2198 | /* |
2199 | * Migrates the IRQ destination in the process context. | 2199 | * Migrates the IRQ destination in the process context. |
2200 | */ | 2200 | */ |
2201 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 2201 | static void set_ir_ioapic_affinity_irq(unsigned int irq, |
2202 | const struct cpumask *mask) | ||
2202 | { | 2203 | { |
2203 | struct irq_desc *desc = irq_to_desc(irq); | 2204 | struct irq_desc *desc = irq_to_desc(irq); |
2204 | 2205 | ||
2205 | if (desc->status & IRQ_LEVEL) { | 2206 | if (desc->status & IRQ_LEVEL) { |
2206 | desc->status |= IRQ_MOVE_PENDING; | 2207 | desc->status |= IRQ_MOVE_PENDING; |
2207 | desc->pending_mask = mask; | 2208 | cpumask_copy(&desc->pending_mask, mask); |
2208 | migrate_irq_remapped_level(irq); | 2209 | migrate_irq_remapped_level(irq); |
2209 | return; | 2210 | return; |
2210 | } | 2211 | } |
2211 | 2212 | ||
2212 | migrate_ioapic_irq(irq, mask); | 2213 | migrate_ioapic_irq(irq, *mask); |
2213 | } | 2214 | } |
2214 | #endif | 2215 | #endif |
2215 | 2216 | ||
@@ -3027,7 +3028,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3027 | } | 3028 | } |
3028 | 3029 | ||
3029 | #ifdef CONFIG_SMP | 3030 | #ifdef CONFIG_SMP |
3030 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3031 | static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3031 | { | 3032 | { |
3032 | struct irq_cfg *cfg; | 3033 | struct irq_cfg *cfg; |
3033 | struct msi_msg msg; | 3034 | struct msi_msg msg; |
@@ -3035,15 +3036,14 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3035 | cpumask_t tmp; | 3036 | cpumask_t tmp; |
3036 | struct irq_desc *desc; | 3037 | struct irq_desc *desc; |
3037 | 3038 | ||
3038 | cpus_and(tmp, mask, cpu_online_map); | 3039 | if (!cpumask_intersects(mask, cpu_online_mask)) |
3039 | if (cpus_empty(tmp)) | ||
3040 | return; | 3040 | return; |
3041 | 3041 | ||
3042 | if (assign_irq_vector(irq, mask)) | 3042 | if (assign_irq_vector(irq, *mask)) |
3043 | return; | 3043 | return; |
3044 | 3044 | ||
3045 | cfg = irq_cfg(irq); | 3045 | cfg = irq_cfg(irq); |
3046 | cpus_and(tmp, cfg->domain, mask); | 3046 | cpumask_and(&tmp, &cfg->domain, mask); |
3047 | dest = cpu_mask_to_apicid(tmp); | 3047 | dest = cpu_mask_to_apicid(tmp); |
3048 | 3048 | ||
3049 | read_msi_msg(irq, &msg); | 3049 | read_msi_msg(irq, &msg); |
@@ -3055,7 +3055,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3055 | 3055 | ||
3056 | write_msi_msg(irq, &msg); | 3056 | write_msi_msg(irq, &msg); |
3057 | desc = irq_to_desc(irq); | 3057 | desc = irq_to_desc(irq); |
3058 | desc->affinity = mask; | 3058 | cpumask_copy(&desc->affinity, mask); |
3059 | } | 3059 | } |
3060 | 3060 | ||
3061 | #ifdef CONFIG_INTR_REMAP | 3061 | #ifdef CONFIG_INTR_REMAP |
@@ -3063,7 +3063,8 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3063 | * Migrate the MSI irq to another cpumask. This migration is | 3063 | * Migrate the MSI irq to another cpumask. This migration is |
3064 | * done in the process context using interrupt-remapping hardware. | 3064 | * done in the process context using interrupt-remapping hardware. |
3065 | */ | 3065 | */ |
3066 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3066 | static void ir_set_msi_irq_affinity(unsigned int irq, |
3067 | const struct cpumask *mask) | ||
3067 | { | 3068 | { |
3068 | struct irq_cfg *cfg; | 3069 | struct irq_cfg *cfg; |
3069 | unsigned int dest; | 3070 | unsigned int dest; |
@@ -3071,18 +3072,17 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3071 | struct irte irte; | 3072 | struct irte irte; |
3072 | struct irq_desc *desc; | 3073 | struct irq_desc *desc; |
3073 | 3074 | ||
3074 | cpus_and(tmp, mask, cpu_online_map); | 3075 | if (!cpumask_intersects(mask, cpu_online_mask)) |
3075 | if (cpus_empty(tmp)) | ||
3076 | return; | 3076 | return; |
3077 | 3077 | ||
3078 | if (get_irte(irq, &irte)) | 3078 | if (get_irte(irq, &irte)) |
3079 | return; | 3079 | return; |
3080 | 3080 | ||
3081 | if (assign_irq_vector(irq, mask)) | 3081 | if (assign_irq_vector(irq, *mask)) |
3082 | return; | 3082 | return; |
3083 | 3083 | ||
3084 | cfg = irq_cfg(irq); | 3084 | cfg = irq_cfg(irq); |
3085 | cpus_and(tmp, cfg->domain, mask); | 3085 | cpumask_and(&tmp, &cfg->domain, mask); |
3086 | dest = cpu_mask_to_apicid(tmp); | 3086 | dest = cpu_mask_to_apicid(tmp); |
3087 | 3087 | ||
3088 | irte.vector = cfg->vector; | 3088 | irte.vector = cfg->vector; |
@@ -3106,7 +3106,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3106 | } | 3106 | } |
3107 | 3107 | ||
3108 | desc = irq_to_desc(irq); | 3108 | desc = irq_to_desc(irq); |
3109 | desc->affinity = mask; | 3109 | cpumask_copy(&desc->affinity, mask); |
3110 | } | 3110 | } |
3111 | #endif | 3111 | #endif |
3112 | #endif /* CONFIG_SMP */ | 3112 | #endif /* CONFIG_SMP */ |
@@ -3308,7 +3308,7 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
3308 | 3308 | ||
3309 | #ifdef CONFIG_DMAR | 3309 | #ifdef CONFIG_DMAR |
3310 | #ifdef CONFIG_SMP | 3310 | #ifdef CONFIG_SMP |
3311 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3311 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3312 | { | 3312 | { |
3313 | struct irq_cfg *cfg; | 3313 | struct irq_cfg *cfg; |
3314 | struct msi_msg msg; | 3314 | struct msi_msg msg; |
@@ -3316,15 +3316,14 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3316 | cpumask_t tmp; | 3316 | cpumask_t tmp; |
3317 | struct irq_desc *desc; | 3317 | struct irq_desc *desc; |
3318 | 3318 | ||
3319 | cpus_and(tmp, mask, cpu_online_map); | 3319 | if (!cpumask_intersects(mask, cpu_online_mask)) |
3320 | if (cpus_empty(tmp)) | ||
3321 | return; | 3320 | return; |
3322 | 3321 | ||
3323 | if (assign_irq_vector(irq, mask)) | 3322 | if (assign_irq_vector(irq, *mask)) |
3324 | return; | 3323 | return; |
3325 | 3324 | ||
3326 | cfg = irq_cfg(irq); | 3325 | cfg = irq_cfg(irq); |
3327 | cpus_and(tmp, cfg->domain, mask); | 3326 | cpumask_and(&tmp, &cfg->domain, mask); |
3328 | dest = cpu_mask_to_apicid(tmp); | 3327 | dest = cpu_mask_to_apicid(tmp); |
3329 | 3328 | ||
3330 | dmar_msi_read(irq, &msg); | 3329 | dmar_msi_read(irq, &msg); |
@@ -3336,7 +3335,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3336 | 3335 | ||
3337 | dmar_msi_write(irq, &msg); | 3336 | dmar_msi_write(irq, &msg); |
3338 | desc = irq_to_desc(irq); | 3337 | desc = irq_to_desc(irq); |
3339 | desc->affinity = mask; | 3338 | cpumask_copy(&desc->affinity, mask); |
3340 | } | 3339 | } |
3341 | #endif /* CONFIG_SMP */ | 3340 | #endif /* CONFIG_SMP */ |
3342 | 3341 | ||
@@ -3369,7 +3368,7 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3369 | #ifdef CONFIG_HPET_TIMER | 3368 | #ifdef CONFIG_HPET_TIMER |
3370 | 3369 | ||
3371 | #ifdef CONFIG_SMP | 3370 | #ifdef CONFIG_SMP |
3372 | static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3371 | static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3373 | { | 3372 | { |
3374 | struct irq_cfg *cfg; | 3373 | struct irq_cfg *cfg; |
3375 | struct irq_desc *desc; | 3374 | struct irq_desc *desc; |
@@ -3377,15 +3376,14 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3377 | unsigned int dest; | 3376 | unsigned int dest; |
3378 | cpumask_t tmp; | 3377 | cpumask_t tmp; |
3379 | 3378 | ||
3380 | cpus_and(tmp, mask, cpu_online_map); | 3379 | if (!cpumask_intersects(mask, cpu_online_mask)) |
3381 | if (cpus_empty(tmp)) | ||
3382 | return; | 3380 | return; |
3383 | 3381 | ||
3384 | if (assign_irq_vector(irq, mask)) | 3382 | if (assign_irq_vector(irq, *mask)) |
3385 | return; | 3383 | return; |
3386 | 3384 | ||
3387 | cfg = irq_cfg(irq); | 3385 | cfg = irq_cfg(irq); |
3388 | cpus_and(tmp, cfg->domain, mask); | 3386 | cpumask_and(&tmp, &cfg->domain, mask); |
3389 | dest = cpu_mask_to_apicid(tmp); | 3387 | dest = cpu_mask_to_apicid(tmp); |
3390 | 3388 | ||
3391 | hpet_msi_read(irq, &msg); | 3389 | hpet_msi_read(irq, &msg); |
@@ -3397,7 +3395,7 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3397 | 3395 | ||
3398 | hpet_msi_write(irq, &msg); | 3396 | hpet_msi_write(irq, &msg); |
3399 | desc = irq_to_desc(irq); | 3397 | desc = irq_to_desc(irq); |
3400 | desc->affinity = mask; | 3398 | cpumask_copy(&desc->affinity, mask); |
3401 | } | 3399 | } |
3402 | #endif /* CONFIG_SMP */ | 3400 | #endif /* CONFIG_SMP */ |
3403 | 3401 | ||
@@ -3451,27 +3449,26 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
3451 | write_ht_irq_msg(irq, &msg); | 3449 | write_ht_irq_msg(irq, &msg); |
3452 | } | 3450 | } |
3453 | 3451 | ||
3454 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | 3452 | static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3455 | { | 3453 | { |
3456 | struct irq_cfg *cfg; | 3454 | struct irq_cfg *cfg; |
3457 | unsigned int dest; | 3455 | unsigned int dest; |
3458 | cpumask_t tmp; | 3456 | cpumask_t tmp; |
3459 | struct irq_desc *desc; | 3457 | struct irq_desc *desc; |
3460 | 3458 | ||
3461 | cpus_and(tmp, mask, cpu_online_map); | 3459 | if (!cpumask_intersects(mask, cpu_online_mask)) |
3462 | if (cpus_empty(tmp)) | ||
3463 | return; | 3460 | return; |
3464 | 3461 | ||
3465 | if (assign_irq_vector(irq, mask)) | 3462 | if (assign_irq_vector(irq, *mask)) |
3466 | return; | 3463 | return; |
3467 | 3464 | ||
3468 | cfg = irq_cfg(irq); | 3465 | cfg = irq_cfg(irq); |
3469 | cpus_and(tmp, cfg->domain, mask); | 3466 | cpumask_and(&tmp, &cfg->domain, mask); |
3470 | dest = cpu_mask_to_apicid(tmp); | 3467 | dest = cpu_mask_to_apicid(tmp); |
3471 | 3468 | ||
3472 | target_ht_irq(irq, dest, cfg->vector); | 3469 | target_ht_irq(irq, dest, cfg->vector); |
3473 | desc = irq_to_desc(irq); | 3470 | desc = irq_to_desc(irq); |
3474 | desc->affinity = mask; | 3471 | cpumask_copy(&desc->affinity, mask); |
3475 | } | 3472 | } |
3476 | #endif | 3473 | #endif |
3477 | 3474 | ||
@@ -3794,10 +3791,10 @@ void __init setup_ioapic_dest(void) | |||
3794 | 3791 | ||
3795 | #ifdef CONFIG_INTR_REMAP | 3792 | #ifdef CONFIG_INTR_REMAP |
3796 | if (intr_remapping_enabled) | 3793 | if (intr_remapping_enabled) |
3797 | set_ir_ioapic_affinity_irq(irq, mask); | 3794 | set_ir_ioapic_affinity_irq(irq, &mask); |
3798 | else | 3795 | else |
3799 | #endif | 3796 | #endif |
3800 | set_ioapic_affinity_irq(irq, mask); | 3797 | set_ioapic_affinity_irq(irq, &mask); |
3801 | } | 3798 | } |
3802 | 3799 | ||
3803 | } | 3800 | } |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index a51382672de0..87870a49be4e 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -251,7 +251,7 @@ void fixup_irqs(cpumask_t map) | |||
251 | mask = map; | 251 | mask = map; |
252 | } | 252 | } |
253 | if (desc->chip->set_affinity) | 253 | if (desc->chip->set_affinity) |
254 | desc->chip->set_affinity(irq, mask); | 254 | desc->chip->set_affinity(irq, &mask); |
255 | else if (desc->action && !(warned++)) | 255 | else if (desc->action && !(warned++)) |
256 | printk("Cannot set affinity for irq %i\n", irq); | 256 | printk("Cannot set affinity for irq %i\n", irq); |
257 | } | 257 | } |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 60eb84eb77a0..7d37f847544d 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -116,7 +116,7 @@ void fixup_irqs(cpumask_t map) | |||
116 | desc->chip->mask(irq); | 116 | desc->chip->mask(irq); |
117 | 117 | ||
118 | if (desc->chip->set_affinity) | 118 | if (desc->chip->set_affinity) |
119 | desc->chip->set_affinity(irq, mask); | 119 | desc->chip->set_affinity(irq, &mask); |
120 | else if (!(warned++)) | 120 | else if (!(warned++)) |
121 | set_affinity = 0; | 121 | set_affinity = 0; |
122 | 122 | ||
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 7beffcab2745..9dedbbd218c3 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c | |||
@@ -704,16 +704,17 @@ static unsigned int iosapic_startup_irq(unsigned int irq) | |||
704 | } | 704 | } |
705 | 705 | ||
706 | #ifdef CONFIG_SMP | 706 | #ifdef CONFIG_SMP |
707 | static void iosapic_set_affinity_irq(unsigned int irq, cpumask_t dest) | 707 | static void iosapic_set_affinity_irq(unsigned int irq, |
708 | const struct cpumask *dest) | ||
708 | { | 709 | { |
709 | struct vector_info *vi = iosapic_get_vector(irq); | 710 | struct vector_info *vi = iosapic_get_vector(irq); |
710 | u32 d0, d1, dummy_d0; | 711 | u32 d0, d1, dummy_d0; |
711 | unsigned long flags; | 712 | unsigned long flags; |
712 | 713 | ||
713 | if (cpu_check_affinity(irq, &dest)) | 714 | if (cpu_check_affinity(irq, dest)) |
714 | return; | 715 | return; |
715 | 716 | ||
716 | vi->txn_addr = txn_affinity_addr(irq, first_cpu(dest)); | 717 | vi->txn_addr = txn_affinity_addr(irq, cpumask_first(dest)); |
717 | 718 | ||
718 | spin_lock_irqsave(&iosapic_lock, flags); | 719 | spin_lock_irqsave(&iosapic_lock, flags); |
719 | /* d1 contains the destination CPU, so only want to set that | 720 | /* d1 contains the destination CPU, so only want to set that |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 1e3b934a4cf7..eba5ec5b020e 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -579,7 +579,7 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
579 | spin_unlock(&irq_mapping_update_lock); | 579 | spin_unlock(&irq_mapping_update_lock); |
580 | 580 | ||
581 | /* new event channels are always bound to cpu 0 */ | 581 | /* new event channels are always bound to cpu 0 */ |
582 | irq_set_affinity(irq, cpumask_of_cpu(0)); | 582 | irq_set_affinity(irq, cpumask_of(0)); |
583 | 583 | ||
584 | /* Unmask the event channel. */ | 584 | /* Unmask the event channel. */ |
585 | enable_irq(irq); | 585 | enable_irq(irq); |
@@ -608,9 +608,9 @@ static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
608 | } | 608 | } |
609 | 609 | ||
610 | 610 | ||
611 | static void set_affinity_irq(unsigned irq, cpumask_t dest) | 611 | static void set_affinity_irq(unsigned irq, const struct cpumask *dest) |
612 | { | 612 | { |
613 | unsigned tcpu = first_cpu(dest); | 613 | unsigned tcpu = cpumask_first(dest); |
614 | rebind_irq_to_cpu(irq, tcpu); | 614 | rebind_irq_to_cpu(irq, tcpu); |
615 | } | 615 | } |
616 | 616 | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f58a0cf8929a..48e63934fabe 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -109,13 +109,13 @@ extern void enable_irq(unsigned int irq); | |||
109 | 109 | ||
110 | extern cpumask_t irq_default_affinity; | 110 | extern cpumask_t irq_default_affinity; |
111 | 111 | ||
112 | extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); | 112 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
113 | extern int irq_can_set_affinity(unsigned int irq); | 113 | extern int irq_can_set_affinity(unsigned int irq); |
114 | extern int irq_select_affinity(unsigned int irq); | 114 | extern int irq_select_affinity(unsigned int irq); |
115 | 115 | ||
116 | #else /* CONFIG_SMP */ | 116 | #else /* CONFIG_SMP */ |
117 | 117 | ||
118 | static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 118 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
119 | { | 119 | { |
120 | return -EINVAL; | 120 | return -EINVAL; |
121 | } | 121 | } |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 3dddfa703ebd..ab70fd604d3a 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -113,7 +113,8 @@ struct irq_chip { | |||
113 | void (*eoi)(unsigned int irq); | 113 | void (*eoi)(unsigned int irq); |
114 | 114 | ||
115 | void (*end)(unsigned int irq); | 115 | void (*end)(unsigned int irq); |
116 | void (*set_affinity)(unsigned int irq, cpumask_t dest); | 116 | void (*set_affinity)(unsigned int irq, |
117 | const struct cpumask *dest); | ||
117 | int (*retrigger)(unsigned int irq); | 118 | int (*retrigger)(unsigned int irq); |
118 | int (*set_type)(unsigned int irq, unsigned int flow_type); | 119 | int (*set_type)(unsigned int irq, unsigned int flow_type); |
119 | int (*set_wake)(unsigned int irq, unsigned int on); | 120 | int (*set_wake)(unsigned int irq, unsigned int on); |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 10b5092e9bfe..58d8e31daa49 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -45,7 +45,7 @@ void dynamic_irq_init(unsigned int irq) | |||
45 | desc->irq_count = 0; | 45 | desc->irq_count = 0; |
46 | desc->irqs_unhandled = 0; | 46 | desc->irqs_unhandled = 0; |
47 | #ifdef CONFIG_SMP | 47 | #ifdef CONFIG_SMP |
48 | cpus_setall(desc->affinity); | 48 | cpumask_setall(&desc->affinity); |
49 | #endif | 49 | #endif |
50 | spin_unlock_irqrestore(&desc->lock, flags); | 50 | spin_unlock_irqrestore(&desc->lock, flags); |
51 | } | 51 | } |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 801addda3c43..10ad2f87ed9a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -79,7 +79,7 @@ int irq_can_set_affinity(unsigned int irq) | |||
79 | * @cpumask: cpumask | 79 | * @cpumask: cpumask |
80 | * | 80 | * |
81 | */ | 81 | */ |
82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 82 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
83 | { | 83 | { |
84 | struct irq_desc *desc = irq_to_desc(irq); | 84 | struct irq_desc *desc = irq_to_desc(irq); |
85 | unsigned long flags; | 85 | unsigned long flags; |
@@ -91,14 +91,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
91 | 91 | ||
92 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 92 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
93 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 93 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
94 | desc->affinity = cpumask; | 94 | cpumask_copy(&desc->affinity, cpumask); |
95 | desc->chip->set_affinity(irq, cpumask); | 95 | desc->chip->set_affinity(irq, cpumask); |
96 | } else { | 96 | } else { |
97 | desc->status |= IRQ_MOVE_PENDING; | 97 | desc->status |= IRQ_MOVE_PENDING; |
98 | desc->pending_mask = cpumask; | 98 | cpumask_copy(&desc->pending_mask, cpumask); |
99 | } | 99 | } |
100 | #else | 100 | #else |
101 | desc->affinity = cpumask; | 101 | cpumask_copy(&desc->affinity, cpumask); |
102 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
103 | #endif | 103 | #endif |
104 | desc->status |= IRQ_AFFINITY_SET; | 104 | desc->status |= IRQ_AFFINITY_SET; |
@@ -112,26 +112,24 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
112 | */ | 112 | */ |
113 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | 113 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) |
114 | { | 114 | { |
115 | cpumask_t mask; | ||
116 | |||
117 | if (!irq_can_set_affinity(irq)) | 115 | if (!irq_can_set_affinity(irq)) |
118 | return 0; | 116 | return 0; |
119 | 117 | ||
120 | cpus_and(mask, cpu_online_map, irq_default_affinity); | ||
121 | |||
122 | /* | 118 | /* |
123 | * Preserve an userspace affinity setup, but make sure that | 119 | * Preserve an userspace affinity setup, but make sure that |
124 | * one of the targets is online. | 120 | * one of the targets is online. |
125 | */ | 121 | */ |
126 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 122 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
127 | if (cpus_intersects(desc->affinity, cpu_online_map)) | 123 | if (cpumask_any_and(&desc->affinity, cpu_online_mask) |
128 | mask = desc->affinity; | 124 | < nr_cpu_ids) |
125 | goto set_affinity; | ||
129 | else | 126 | else |
130 | desc->status &= ~IRQ_AFFINITY_SET; | 127 | desc->status &= ~IRQ_AFFINITY_SET; |
131 | } | 128 | } |
132 | 129 | ||
133 | desc->affinity = mask; | 130 | cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); |
134 | desc->chip->set_affinity(irq, mask); | 131 | set_affinity: |
132 | desc->chip->set_affinity(irq, &desc->affinity); | ||
135 | 133 | ||
136 | return 0; | 134 | return 0; |
137 | } | 135 | } |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 9db681d95814..bd72329e630c 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -4,7 +4,6 @@ | |||
4 | void move_masked_irq(int irq) | 4 | void move_masked_irq(int irq) |
5 | { | 5 | { |
6 | struct irq_desc *desc = irq_to_desc(irq); | 6 | struct irq_desc *desc = irq_to_desc(irq); |
7 | cpumask_t tmp; | ||
8 | 7 | ||
9 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 8 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) |
10 | return; | 9 | return; |
@@ -19,7 +18,7 @@ void move_masked_irq(int irq) | |||
19 | 18 | ||
20 | desc->status &= ~IRQ_MOVE_PENDING; | 19 | desc->status &= ~IRQ_MOVE_PENDING; |
21 | 20 | ||
22 | if (unlikely(cpus_empty(desc->pending_mask))) | 21 | if (unlikely(cpumask_empty(&desc->pending_mask))) |
23 | return; | 22 | return; |
24 | 23 | ||
25 | if (!desc->chip->set_affinity) | 24 | if (!desc->chip->set_affinity) |
@@ -27,8 +26,6 @@ void move_masked_irq(int irq) | |||
27 | 26 | ||
28 | assert_spin_locked(&desc->lock); | 27 | assert_spin_locked(&desc->lock); |
29 | 28 | ||
30 | cpus_and(tmp, desc->pending_mask, cpu_online_map); | ||
31 | |||
32 | /* | 29 | /* |
33 | * If there was a valid mask to work with, please | 30 | * If there was a valid mask to work with, please |
34 | * do the disable, re-program, enable sequence. | 31 | * do the disable, re-program, enable sequence. |
@@ -41,10 +38,13 @@ void move_masked_irq(int irq) | |||
41 | * For correct operation this depends on the caller | 38 | * For correct operation this depends on the caller |
42 | * masking the irqs. | 39 | * masking the irqs. |
43 | */ | 40 | */ |
44 | if (likely(!cpus_empty(tmp))) { | 41 | if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) |
45 | desc->chip->set_affinity(irq,tmp); | 42 | < nr_cpu_ids)) { |
43 | cpumask_and(&desc->affinity, | ||
44 | &desc->pending_mask, cpu_online_mask); | ||
45 | desc->chip->set_affinity(irq, &desc->affinity); | ||
46 | } | 46 | } |
47 | cpus_clear(desc->pending_mask); | 47 | cpumask_clear(&desc->pending_mask); |
48 | } | 48 | } |
49 | 49 | ||
50 | void move_native_irq(int irq) | 50 | void move_native_irq(int irq) |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index f293349d49d0..8e91c9762520 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -40,33 +40,42 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
40 | const char __user *buffer, size_t count, loff_t *pos) | 40 | const char __user *buffer, size_t count, loff_t *pos) |
41 | { | 41 | { |
42 | unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; | 42 | unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; |
43 | cpumask_t new_value; | 43 | cpumask_var_t new_value; |
44 | int err; | 44 | int err; |
45 | 45 | ||
46 | if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || | 46 | if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || |
47 | irq_balancing_disabled(irq)) | 47 | irq_balancing_disabled(irq)) |
48 | return -EIO; | 48 | return -EIO; |
49 | 49 | ||
50 | err = cpumask_parse_user(buffer, count, &new_value); | 50 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
51 | return -ENOMEM; | ||
52 | |||
53 | err = cpumask_parse_user(buffer, count, new_value); | ||
51 | if (err) | 54 | if (err) |
52 | return err; | 55 | goto free_cpumask; |
53 | 56 | ||
54 | if (!is_affinity_mask_valid(new_value)) | 57 | if (!is_affinity_mask_valid(*new_value)) { |
55 | return -EINVAL; | 58 | err = -EINVAL; |
59 | goto free_cpumask; | ||
60 | } | ||
56 | 61 | ||
57 | /* | 62 | /* |
58 | * Do not allow disabling IRQs completely - it's a too easy | 63 | * Do not allow disabling IRQs completely - it's a too easy |
59 | * way to make the system unusable accidentally :-) At least | 64 | * way to make the system unusable accidentally :-) At least |
60 | * one online CPU still has to be targeted. | 65 | * one online CPU still has to be targeted. |
61 | */ | 66 | */ |
62 | if (!cpus_intersects(new_value, cpu_online_map)) | 67 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
63 | /* Special case for empty set - allow the architecture | 68 | /* Special case for empty set - allow the architecture |
64 | code to set default SMP affinity. */ | 69 | code to set default SMP affinity. */ |
65 | return irq_select_affinity_usr(irq) ? -EINVAL : count; | 70 | err = irq_select_affinity_usr(irq) ? -EINVAL : count; |
66 | 71 | } else { | |
67 | irq_set_affinity(irq, new_value); | 72 | irq_set_affinity(irq, new_value); |
73 | err = count; | ||
74 | } | ||
68 | 75 | ||
69 | return count; | 76 | free_cpumask: |
77 | free_cpumask_var(new_value); | ||
78 | return err; | ||
70 | } | 79 | } |
71 | 80 | ||
72 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) | 81 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index df12434b43ca..ab65d217583f 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -136,7 +136,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
136 | */ | 136 | */ |
137 | static void tick_setup_device(struct tick_device *td, | 137 | static void tick_setup_device(struct tick_device *td, |
138 | struct clock_event_device *newdev, int cpu, | 138 | struct clock_event_device *newdev, int cpu, |
139 | const cpumask_t *cpumask) | 139 | const struct cpumask *cpumask) |
140 | { | 140 | { |
141 | ktime_t next_event; | 141 | ktime_t next_event; |
142 | void (*handler)(struct clock_event_device *) = NULL; | 142 | void (*handler)(struct clock_event_device *) = NULL; |
@@ -171,8 +171,8 @@ static void tick_setup_device(struct tick_device *td, | |||
171 | * When the device is not per cpu, pin the interrupt to the | 171 | * When the device is not per cpu, pin the interrupt to the |
172 | * current cpu: | 172 | * current cpu: |
173 | */ | 173 | */ |
174 | if (!cpus_equal(newdev->cpumask, *cpumask)) | 174 | if (!cpumask_equal(&newdev->cpumask, cpumask)) |
175 | irq_set_affinity(newdev->irq, *cpumask); | 175 | irq_set_affinity(newdev->irq, cpumask); |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * When global broadcasting is active, check if the current | 178 | * When global broadcasting is active, check if the current |