diff options
author | Ashok Raj <ashok.raj@intel.com> | 2005-06-25 17:55:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-25 19:24:30 -0400 |
commit | 76e4f660d9f4c6d1bb473f72be2988c35eaca948 (patch) | |
tree | 3a0b3ae0cc12d6f0d96bf1f3b564f8470e0f72ae /arch/x86_64/kernel/irq.c | |
parent | e6982c671c560da4a0bc5c908cbcbec12bd5991d (diff) |
[PATCH] x86_64: CPU hotplug support
Experimental CPU hotplug patch for x86_64
-----------------------------------------
This supports logical CPU online and offline.
- Test with maxcpus=1, and then kick other cpu's off to test if init code
is all cleaned up. CONFIG_SCHED_SMT works as well.
- idle threads are forked on demand from keventd threads for clean startup
TBD:
1. Not tested on a real NUMA machine (tested with numa=fake=2)
2. Handle ACPI pieces for physical hotplug support.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Andi Kleen <ak@muc.de>
Acked-by: Zwane Mwaikambo <zwane@arm.linux.org.uk>
Signed-off-by: Shaohua.li<shaohua.li@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/irq.c')
-rw-r--r-- | arch/x86_64/kernel/irq.c | 29 |
1 files changed, 29 insertions, 0 deletions
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c index 62b112e4deb..cc3fb85f514 100644 --- a/arch/x86_64/kernel/irq.c +++ b/arch/x86_64/kernel/irq.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/delay.h> | ||
17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
18 | #include <asm/io_apic.h> | 19 | #include <asm/io_apic.h> |
19 | 20 | ||
@@ -106,3 +107,31 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) | |||
106 | return 1; | 107 | return 1; |
107 | } | 108 | } |
108 | 109 | ||
110 | #ifdef CONFIG_HOTPLUG_CPU | ||
111 | void fixup_irqs(cpumask_t map) | ||
112 | { | ||
113 | unsigned int irq; | ||
114 | static int warned; | ||
115 | |||
116 | for (irq = 0; irq < NR_IRQS; irq++) { | ||
117 | cpumask_t mask; | ||
118 | if (irq == 2) | ||
119 | continue; | ||
120 | |||
121 | cpus_and(mask, irq_affinity[irq], map); | ||
122 | if (any_online_cpu(mask) == NR_CPUS) { | ||
123 | printk("Breaking affinity for irq %i\n", irq); | ||
124 | mask = map; | ||
125 | } | ||
126 | if (irq_desc[irq].handler->set_affinity) | ||
127 | irq_desc[irq].handler->set_affinity(irq, mask); | ||
128 | else if (irq_desc[irq].action && !(warned++)) | ||
129 | printk("Cannot set affinity for irq %i\n", irq); | ||
130 | } | ||
131 | |||
132 | /* That doesn't seem sufficient. Give it 1ms. */ | ||
133 | local_irq_enable(); | ||
134 | mdelay(1); | ||
135 | local_irq_disable(); | ||
136 | } | ||
137 | #endif | ||