diff options
author | Suresh Siddha <suresh.b.siddha@intel.com> | 2009-10-26 18:24:31 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-02 09:56:34 -0500 |
commit | 7a7732bc0f7c46f217dbec723f25366b6285cc42 (patch) | |
tree | 6238e0cf1d22c1939558b693e9a51f2703099639 /arch/x86/kernel/irq.c | |
parent | 6f9b41006af1bc489030f84ee247abc0df1edccd (diff) |
x86: Unify fixup_irqs() for 32-bit and 64-bit kernels
There is no reason to have different fixup_irqs() for 32-bit and
64-bit kernels. Unify by using the superior 64-bit version for
both the kernels.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Gary Hade <garyhade@us.ibm.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
LKML-Reference: <20091026230001.562512739@sbs-t61.sc.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/irq.c')
-rw-r--r-- | arch/x86/kernel/irq.c | 59 |
1 files changed, 59 insertions, 0 deletions
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 391206199515..3ea66556e5e1 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -276,3 +276,62 @@ void smp_generic_interrupt(struct pt_regs *regs) | |||
276 | } | 276 | } |
277 | 277 | ||
278 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); | 278 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); |
279 | |||
280 | #ifdef CONFIG_HOTPLUG_CPU | ||
281 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | ||
282 | void fixup_irqs(void) | ||
283 | { | ||
284 | unsigned int irq; | ||
285 | static int warned; | ||
286 | struct irq_desc *desc; | ||
287 | |||
288 | for_each_irq_desc(irq, desc) { | ||
289 | int break_affinity = 0; | ||
290 | int set_affinity = 1; | ||
291 | const struct cpumask *affinity; | ||
292 | |||
293 | if (!desc) | ||
294 | continue; | ||
295 | if (irq == 2) | ||
296 | continue; | ||
297 | |||
298 | /* interrupt's are disabled at this point */ | ||
299 | spin_lock(&desc->lock); | ||
300 | |||
301 | affinity = desc->affinity; | ||
302 | if (!irq_has_action(irq) || | ||
303 | cpumask_equal(affinity, cpu_online_mask)) { | ||
304 | spin_unlock(&desc->lock); | ||
305 | continue; | ||
306 | } | ||
307 | |||
308 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
309 | break_affinity = 1; | ||
310 | affinity = cpu_all_mask; | ||
311 | } | ||
312 | |||
313 | if (desc->chip->mask) | ||
314 | desc->chip->mask(irq); | ||
315 | |||
316 | if (desc->chip->set_affinity) | ||
317 | desc->chip->set_affinity(irq, affinity); | ||
318 | else if (!(warned++)) | ||
319 | set_affinity = 0; | ||
320 | |||
321 | if (desc->chip->unmask) | ||
322 | desc->chip->unmask(irq); | ||
323 | |||
324 | spin_unlock(&desc->lock); | ||
325 | |||
326 | if (break_affinity && set_affinity) | ||
327 | printk("Broke affinity for irq %i\n", irq); | ||
328 | else if (!set_affinity) | ||
329 | printk("Cannot set affinity for irq %i\n", irq); | ||
330 | } | ||
331 | |||
332 | /* That doesn't seem sufficient. Give it 1ms. */ | ||
333 | local_irq_enable(); | ||
334 | mdelay(1); | ||
335 | local_irq_disable(); | ||
336 | } | ||
337 | #endif | ||