diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/smpboot.c | 85 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_32.c | 67 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_64.c | 79 |
3 files changed, 85 insertions, 146 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 644e60969f90..c35cd319d1ed 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -2,6 +2,13 @@ | |||
2 | #include <linux/smp.h> | 2 | #include <linux/smp.h> |
3 | #include <linux/module.h> | 3 | #include <linux/module.h> |
4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
5 | #include <linux/percpu.h> | ||
6 | |||
7 | #include <asm/nmi.h> | ||
8 | #include <asm/irq.h> | ||
9 | #include <asm/smp.h> | ||
10 | #include <asm/cpu.h> | ||
11 | #include <asm/numa.h> | ||
5 | 12 | ||
6 | /* Number of siblings per CPU package */ | 13 | /* Number of siblings per CPU package */ |
7 | int smp_num_siblings = 1; | 14 | int smp_num_siblings = 1; |
@@ -181,5 +188,83 @@ __init void prefill_possible_map(void) | |||
181 | for (i = 0; i < possible; i++) | 188 | for (i = 0; i < possible; i++) |
182 | cpu_set(i, cpu_possible_map); | 189 | cpu_set(i, cpu_possible_map); |
183 | } | 190 | } |
191 | |||
192 | static void __ref remove_cpu_from_maps(int cpu) | ||
193 | { | ||
194 | cpu_clear(cpu, cpu_online_map); | ||
195 | #ifdef CONFIG_X86_64 | ||
196 | cpu_clear(cpu, cpu_callout_map); | ||
197 | cpu_clear(cpu, cpu_callin_map); | ||
198 | /* was set by cpu_init() */ | ||
199 | clear_bit(cpu, (unsigned long *)&cpu_initialized); | ||
200 | clear_node_cpumask(cpu); | ||
201 | #endif | ||
202 | } | ||
203 | |||
204 | int __cpu_disable(void) | ||
205 | { | ||
206 | int cpu = smp_processor_id(); | ||
207 | |||
208 | /* | ||
209 | * Perhaps use cpufreq to drop frequency, but that could go | ||
210 | * into generic code. | ||
211 | * | ||
212 | * We won't take down the boot processor on i386 due to some | ||
213 | * interrupts only being able to be serviced by the BSP. | ||
214 | * Especially so if we're not using an IOAPIC -zwane | ||
215 | */ | ||
216 | if (cpu == 0) | ||
217 | return -EBUSY; | ||
218 | |||
219 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
220 | stop_apic_nmi_watchdog(NULL); | ||
221 | clear_local_APIC(); | ||
222 | |||
223 | /* | ||
224 | * HACK: | ||
225 | * Allow any queued timer interrupts to get serviced | ||
226 | * This is only a temporary solution until we cleanup | ||
227 | * fixup_irqs as we do for IA64. | ||
228 | */ | ||
229 | local_irq_enable(); | ||
230 | mdelay(1); | ||
231 | |||
232 | local_irq_disable(); | ||
233 | remove_siblinginfo(cpu); | ||
234 | |||
235 | /* It's now safe to remove this processor from the online map */ | ||
236 | remove_cpu_from_maps(cpu); | ||
237 | fixup_irqs(cpu_online_map); | ||
238 | return 0; | ||
239 | } | ||
240 | |||
241 | void __cpu_die(unsigned int cpu) | ||
242 | { | ||
243 | /* We don't do anything here: idle task is faking death itself. */ | ||
244 | unsigned int i; | ||
245 | |||
246 | for (i = 0; i < 10; i++) { | ||
247 | /* They ack this in play_dead by setting CPU_DEAD */ | ||
248 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | ||
249 | printk(KERN_INFO "CPU %d is now offline\n", cpu); | ||
250 | if (1 == num_online_cpus()) | ||
251 | alternatives_smp_switch(0); | ||
252 | return; | ||
253 | } | ||
254 | msleep(100); | ||
255 | } | ||
256 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | ||
257 | } | ||
258 | #else /* ... !CONFIG_HOTPLUG_CPU */ | ||
259 | int __cpu_disable(void) | ||
260 | { | ||
261 | return -ENOSYS; | ||
262 | } | ||
263 | |||
264 | void __cpu_die(unsigned int cpu) | ||
265 | { | ||
266 | /* We said "no" in __cpu_disable */ | ||
267 | BUG(); | ||
268 | } | ||
184 | #endif | 269 | #endif |
185 | 270 | ||
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 3d21c663aa76..00b1b59cd560 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c | |||
@@ -1040,73 +1040,6 @@ void __init native_smp_prepare_boot_cpu(void) | |||
1040 | __get_cpu_var(cpu_state) = CPU_ONLINE; | 1040 | __get_cpu_var(cpu_state) = CPU_ONLINE; |
1041 | } | 1041 | } |
1042 | 1042 | ||
1043 | #ifdef CONFIG_HOTPLUG_CPU | ||
1044 | static void __ref remove_cpu_from_maps(int cpu) | ||
1045 | { | ||
1046 | cpu_clear(cpu, cpu_online_map); | ||
1047 | } | ||
1048 | |||
1049 | int __cpu_disable(void) | ||
1050 | { | ||
1051 | cpumask_t map = cpu_online_map; | ||
1052 | int cpu = smp_processor_id(); | ||
1053 | |||
1054 | /* | ||
1055 | * Perhaps use cpufreq to drop frequency, but that could go | ||
1056 | * into generic code. | ||
1057 | * | ||
1058 | * We won't take down the boot processor on i386 due to some | ||
1059 | * interrupts only being able to be serviced by the BSP. | ||
1060 | * Especially so if we're not using an IOAPIC -zwane | ||
1061 | */ | ||
1062 | if (cpu == 0) | ||
1063 | return -EBUSY; | ||
1064 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
1065 | stop_apic_nmi_watchdog(NULL); | ||
1066 | clear_local_APIC(); | ||
1067 | /* Allow any queued timer interrupts to get serviced */ | ||
1068 | local_irq_enable(); | ||
1069 | mdelay(1); | ||
1070 | local_irq_disable(); | ||
1071 | |||
1072 | remove_siblinginfo(cpu); | ||
1073 | |||
1074 | remove_cpu_from_maps(cpu); | ||
1075 | fixup_irqs(map); | ||
1076 | |||
1077 | return 0; | ||
1078 | } | ||
1079 | |||
1080 | void __cpu_die(unsigned int cpu) | ||
1081 | { | ||
1082 | /* We don't do anything here: idle task is faking death itself. */ | ||
1083 | unsigned int i; | ||
1084 | |||
1085 | for (i = 0; i < 10; i++) { | ||
1086 | /* They ack this in play_dead by setting CPU_DEAD */ | ||
1087 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | ||
1088 | printk ("CPU %d is now offline\n", cpu); | ||
1089 | if (1 == num_online_cpus()) | ||
1090 | alternatives_smp_switch(0); | ||
1091 | return; | ||
1092 | } | ||
1093 | msleep(100); | ||
1094 | } | ||
1095 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | ||
1096 | } | ||
1097 | #else /* ... !CONFIG_HOTPLUG_CPU */ | ||
1098 | int __cpu_disable(void) | ||
1099 | { | ||
1100 | return -ENOSYS; | ||
1101 | } | ||
1102 | |||
1103 | void __cpu_die(unsigned int cpu) | ||
1104 | { | ||
1105 | /* We said "no" in __cpu_disable */ | ||
1106 | BUG(); | ||
1107 | } | ||
1108 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
1109 | |||
1110 | int __cpuinit native_cpu_up(unsigned int cpu) | 1043 | int __cpuinit native_cpu_up(unsigned int cpu) |
1111 | { | 1044 | { |
1112 | unsigned long flags; | 1045 | unsigned long flags; |
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 6509d3c1b3df..0c67e5ae9c9d 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c | |||
@@ -836,82 +836,3 @@ void __init native_smp_cpus_done(unsigned int max_cpus) | |||
836 | setup_ioapic_dest(); | 836 | setup_ioapic_dest(); |
837 | check_nmi_watchdog(); | 837 | check_nmi_watchdog(); |
838 | } | 838 | } |
839 | |||
840 | #ifdef CONFIG_HOTPLUG_CPU | ||
841 | static void __ref remove_cpu_from_maps(int cpu) | ||
842 | { | ||
843 | cpu_clear(cpu, cpu_online_map); | ||
844 | cpu_clear(cpu, cpu_callout_map); | ||
845 | cpu_clear(cpu, cpu_callin_map); | ||
846 | clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */ | ||
847 | clear_node_cpumask(cpu); | ||
848 | } | ||
849 | |||
850 | int __cpu_disable(void) | ||
851 | { | ||
852 | int cpu = smp_processor_id(); | ||
853 | |||
854 | /* | ||
855 | * Perhaps use cpufreq to drop frequency, but that could go | ||
856 | * into generic code. | ||
857 | * | ||
858 | * We won't take down the boot processor on i386 due to some | ||
859 | * interrupts only being able to be serviced by the BSP. | ||
860 | * Especially so if we're not using an IOAPIC -zwane | ||
861 | */ | ||
862 | if (cpu == 0) | ||
863 | return -EBUSY; | ||
864 | |||
865 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
866 | stop_apic_nmi_watchdog(NULL); | ||
867 | clear_local_APIC(); | ||
868 | |||
869 | /* | ||
870 | * HACK: | ||
871 | * Allow any queued timer interrupts to get serviced | ||
872 | * This is only a temporary solution until we cleanup | ||
873 | * fixup_irqs as we do for IA64. | ||
874 | */ | ||
875 | local_irq_enable(); | ||
876 | mdelay(1); | ||
877 | |||
878 | local_irq_disable(); | ||
879 | remove_siblinginfo(cpu); | ||
880 | |||
881 | /* It's now safe to remove this processor from the online map */ | ||
882 | remove_cpu_from_maps(cpu); | ||
883 | fixup_irqs(cpu_online_map); | ||
884 | return 0; | ||
885 | } | ||
886 | |||
887 | void __cpu_die(unsigned int cpu) | ||
888 | { | ||
889 | /* We don't do anything here: idle task is faking death itself. */ | ||
890 | unsigned int i; | ||
891 | |||
892 | for (i = 0; i < 10; i++) { | ||
893 | /* They ack this in play_dead by setting CPU_DEAD */ | ||
894 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | ||
895 | printk ("CPU %d is now offline\n", cpu); | ||
896 | if (1 == num_online_cpus()) | ||
897 | alternatives_smp_switch(0); | ||
898 | return; | ||
899 | } | ||
900 | msleep(100); | ||
901 | } | ||
902 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | ||
903 | } | ||
904 | |||
905 | #else /* ... !CONFIG_HOTPLUG_CPU */ | ||
906 | |||
907 | int __cpu_disable(void) | ||
908 | { | ||
909 | return -ENOSYS; | ||
910 | } | ||
911 | |||
912 | void __cpu_die(unsigned int cpu) | ||
913 | { | ||
914 | /* We said "no" in __cpu_disable */ | ||
915 | BUG(); | ||
916 | } | ||
917 | #endif /* CONFIG_HOTPLUG_CPU */ | ||