diff options
| author | Mike Travis <travis@sgi.com> | 2009-01-12 18:27:13 -0500 |
|---|---|---|
| committer | Mike Travis <travis@sgi.com> | 2009-01-12 18:27:13 -0500 |
| commit | e65e49d0f3714f4a6a42f6f6a19926ba33fcda75 (patch) | |
| tree | 8b805b51f41c980ceb79f8fad0e56dac428c7c37 /arch/arm | |
| parent | 28e08861b9afab4168b758fb7b95aa7a4da0f668 (diff) | |
irq: update all arches for new irq_desc
Impact: cleanup, update to new cpumask API
Irq_desc.affinity and irq_desc.pending_mask are now cpumask_var_t's
so access to them should be using the new cpumask API.
Signed-off-by: Mike Travis <travis@sgi.com>
Diffstat (limited to 'arch/arm')
| -rw-r--r-- | arch/arm/kernel/irq.c | 18 | ||||
| -rw-r--r-- | arch/arm/oprofile/op_model_mpcore.c | 2 |
2 files changed, 13 insertions, 7 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 7141cee1fab7..4bb723eadad1 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
| @@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = { | |||
| 104 | .lock = SPIN_LOCK_UNLOCKED | 104 | .lock = SPIN_LOCK_UNLOCKED |
| 105 | }; | 105 | }; |
| 106 | 106 | ||
| 107 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
| 108 | /* We are not allocating bad_irq_desc.affinity or .pending_mask */ | ||
| 109 | #error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK." | ||
| 110 | #endif | ||
| 111 | |||
| 107 | /* | 112 | /* |
| 108 | * do_IRQ handles all hardware IRQ's. Decoded IRQs should not | 113 | * do_IRQ handles all hardware IRQ's. Decoded IRQs should not |
| 109 | * come via this function. Instead, they should provide their | 114 | * come via this function. Instead, they should provide their |
| @@ -161,7 +166,7 @@ void __init init_IRQ(void) | |||
| 161 | irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; | 166 | irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; |
| 162 | 167 | ||
| 163 | #ifdef CONFIG_SMP | 168 | #ifdef CONFIG_SMP |
| 164 | bad_irq_desc.affinity = CPU_MASK_ALL; | 169 | cpumask_setall(bad_irq_desc.affinity); |
| 165 | bad_irq_desc.cpu = smp_processor_id(); | 170 | bad_irq_desc.cpu = smp_processor_id(); |
| 166 | #endif | 171 | #endif |
| 167 | init_arch_irq(); | 172 | init_arch_irq(); |
| @@ -191,15 +196,16 @@ void migrate_irqs(void) | |||
| 191 | struct irq_desc *desc = irq_desc + i; | 196 | struct irq_desc *desc = irq_desc + i; |
| 192 | 197 | ||
| 193 | if (desc->cpu == cpu) { | 198 | if (desc->cpu == cpu) { |
| 194 | unsigned int newcpu = any_online_cpu(desc->affinity); | 199 | unsigned int newcpu = cpumask_any_and(desc->affinity, |
| 195 | 200 | cpu_online_mask); | |
| 196 | if (newcpu == NR_CPUS) { | 201 | if (newcpu >= nr_cpu_ids) { |
| 197 | if (printk_ratelimit()) | 202 | if (printk_ratelimit()) |
| 198 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", | 203 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", |
| 199 | i, cpu); | 204 | i, cpu); |
| 200 | 205 | ||
| 201 | cpus_setall(desc->affinity); | 206 | cpumask_setall(desc->affinity); |
| 202 | newcpu = any_online_cpu(desc->affinity); | 207 | newcpu = cpumask_any_and(desc->affinity, |
| 208 | cpu_online_mask); | ||
| 203 | } | 209 | } |
| 204 | 210 | ||
| 205 | route_irq(desc, i, newcpu); | 211 | route_irq(desc, i, newcpu); |
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c index 6d6bd5899240..853d42bb8682 100644 --- a/arch/arm/oprofile/op_model_mpcore.c +++ b/arch/arm/oprofile/op_model_mpcore.c | |||
| @@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu) | |||
| 263 | const struct cpumask *mask = cpumask_of(cpu); | 263 | const struct cpumask *mask = cpumask_of(cpu); |
| 264 | 264 | ||
| 265 | spin_lock_irq(&desc->lock); | 265 | spin_lock_irq(&desc->lock); |
| 266 | desc->affinity = *mask; | 266 | cpumask_copy(desc->affinity, mask); |
| 267 | desc->chip->set_affinity(irq, mask); | 267 | desc->chip->set_affinity(irq, mask); |
| 268 | spin_unlock_irq(&desc->lock); | 268 | spin_unlock_irq(&desc->lock); |
| 269 | } | 269 | } |
