diff options
Diffstat (limited to 'arch/x86_64/kernel/smpboot.c')
-rw-r--r-- | arch/x86_64/kernel/smpboot.c | 84 |
1 files changed, 40 insertions, 44 deletions
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index e773a794ec45..6e4807d64d46 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c | |||
@@ -113,24 +113,6 @@ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | |||
113 | #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) | 113 | #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * cpu_possible_map should be static, it cannot change as cpu's | ||
117 | * are onlined, or offlined. The reason is per-cpu data-structures | ||
118 | * are allocated by some modules at init time, and dont expect to | ||
119 | * do this dynamically on cpu arrival/departure. | ||
120 | * cpu_present_map on the other hand can change dynamically. | ||
121 | * In case when cpu_hotplug is not compiled, then we resort to current | ||
122 | * behaviour, which is cpu_possible == cpu_present. | ||
123 | * If cpu-hotplug is supported, then we need to preallocate for all | ||
124 | * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range. | ||
125 | * - Ashok Raj | ||
126 | */ | ||
127 | #ifdef CONFIG_HOTPLUG_CPU | ||
128 | #define fixup_cpu_possible_map(x) cpu_set((x), cpu_possible_map) | ||
129 | #else | ||
130 | #define fixup_cpu_possible_map(x) | ||
131 | #endif | ||
132 | |||
133 | /* | ||
134 | * Currently trivial. Write the real->protected mode | 116 | * Currently trivial. Write the real->protected mode |
135 | * bootstrap into the page concerned. The caller | 117 | * bootstrap into the page concerned. The caller |
136 | * has made sure it's suitably aligned. | 118 | * has made sure it's suitably aligned. |
@@ -229,9 +211,6 @@ static __cpuinit void sync_master(void *arg) | |||
229 | { | 211 | { |
230 | unsigned long flags, i; | 212 | unsigned long flags, i; |
231 | 213 | ||
232 | if (smp_processor_id() != 0) | ||
233 | return; | ||
234 | |||
235 | go[MASTER] = 0; | 214 | go[MASTER] = 0; |
236 | 215 | ||
237 | local_irq_save(flags); | 216 | local_irq_save(flags); |
@@ -280,7 +259,7 @@ get_delta(long *rt, long *master) | |||
280 | return tcenter - best_tm; | 259 | return tcenter - best_tm; |
281 | } | 260 | } |
282 | 261 | ||
283 | static __cpuinit void sync_tsc(void) | 262 | static __cpuinit void sync_tsc(unsigned int master) |
284 | { | 263 | { |
285 | int i, done = 0; | 264 | int i, done = 0; |
286 | long delta, adj, adjust_latency = 0; | 265 | long delta, adj, adjust_latency = 0; |
@@ -294,9 +273,17 @@ static __cpuinit void sync_tsc(void) | |||
294 | } t[NUM_ROUNDS] __cpuinitdata; | 273 | } t[NUM_ROUNDS] __cpuinitdata; |
295 | #endif | 274 | #endif |
296 | 275 | ||
276 | printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", | ||
277 | smp_processor_id(), master); | ||
278 | |||
297 | go[MASTER] = 1; | 279 | go[MASTER] = 1; |
298 | 280 | ||
299 | smp_call_function(sync_master, NULL, 1, 0); | 281 | /* It is dangerous to broadcast IPI as cpus are coming up, |
282 | * as they may not be ready to accept them. So since | ||
283 | * we only need to send the ipi to the boot cpu direct | ||
284 | * the message, and avoid the race. | ||
285 | */ | ||
286 | smp_call_function_single(master, sync_master, NULL, 1, 0); | ||
300 | 287 | ||
301 | while (go[MASTER]) /* wait for master to be ready */ | 288 | while (go[MASTER]) /* wait for master to be ready */ |
302 | no_cpu_relax(); | 289 | no_cpu_relax(); |
@@ -340,16 +327,14 @@ static __cpuinit void sync_tsc(void) | |||
340 | printk(KERN_INFO | 327 | printk(KERN_INFO |
341 | "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, " | 328 | "CPU %d: synchronized TSC with CPU %u (last diff %ld cycles, " |
342 | "maxerr %lu cycles)\n", | 329 | "maxerr %lu cycles)\n", |
343 | smp_processor_id(), boot_cpu_id, delta, rt); | 330 | smp_processor_id(), master, delta, rt); |
344 | } | 331 | } |
345 | 332 | ||
346 | static void __cpuinit tsc_sync_wait(void) | 333 | static void __cpuinit tsc_sync_wait(void) |
347 | { | 334 | { |
348 | if (notscsync || !cpu_has_tsc) | 335 | if (notscsync || !cpu_has_tsc) |
349 | return; | 336 | return; |
350 | printk(KERN_INFO "CPU %d: Syncing TSC to CPU %u.\n", smp_processor_id(), | 337 | sync_tsc(boot_cpu_id); |
351 | boot_cpu_id); | ||
352 | sync_tsc(); | ||
353 | } | 338 | } |
354 | 339 | ||
355 | static __init int notscsync_setup(char *s) | 340 | static __init int notscsync_setup(char *s) |
@@ -773,8 +758,9 @@ do_rest: | |||
773 | initial_code = start_secondary; | 758 | initial_code = start_secondary; |
774 | clear_ti_thread_flag(c_idle.idle->thread_info, TIF_FORK); | 759 | clear_ti_thread_flag(c_idle.idle->thread_info, TIF_FORK); |
775 | 760 | ||
776 | printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid, | 761 | printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu, |
777 | start_rip, init_rsp); | 762 | cpus_weight(cpu_present_map), |
763 | apicid); | ||
778 | 764 | ||
779 | /* | 765 | /* |
780 | * This grunge runs the startup process for | 766 | * This grunge runs the startup process for |
@@ -924,6 +910,27 @@ static __init void enforce_max_cpus(unsigned max_cpus) | |||
924 | } | 910 | } |
925 | } | 911 | } |
926 | 912 | ||
913 | #ifdef CONFIG_HOTPLUG_CPU | ||
914 | /* | ||
915 | * cpu_possible_map should be static, it cannot change as cpu's | ||
916 | * are onlined, or offlined. The reason is per-cpu data-structures | ||
917 | * are allocated by some modules at init time, and dont expect to | ||
918 | * do this dynamically on cpu arrival/departure. | ||
919 | * cpu_present_map on the other hand can change dynamically. | ||
920 | * In case when cpu_hotplug is not compiled, then we resort to current | ||
921 | * behaviour, which is cpu_possible == cpu_present. | ||
922 | * If cpu-hotplug is supported, then we need to preallocate for all | ||
923 | * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range. | ||
924 | * - Ashok Raj | ||
925 | */ | ||
926 | static void prefill_possible_map(void) | ||
927 | { | ||
928 | int i; | ||
929 | for (i = 0; i < NR_CPUS; i++) | ||
930 | cpu_set(i, cpu_possible_map); | ||
931 | } | ||
932 | #endif | ||
933 | |||
927 | /* | 934 | /* |
928 | * Various sanity checks. | 935 | * Various sanity checks. |
929 | */ | 936 | */ |
@@ -987,25 +994,15 @@ static int __init smp_sanity_check(unsigned max_cpus) | |||
987 | */ | 994 | */ |
988 | void __init smp_prepare_cpus(unsigned int max_cpus) | 995 | void __init smp_prepare_cpus(unsigned int max_cpus) |
989 | { | 996 | { |
990 | int i; | ||
991 | |||
992 | nmi_watchdog_default(); | 997 | nmi_watchdog_default(); |
993 | current_cpu_data = boot_cpu_data; | 998 | current_cpu_data = boot_cpu_data; |
994 | current_thread_info()->cpu = 0; /* needed? */ | 999 | current_thread_info()->cpu = 0; /* needed? */ |
995 | 1000 | ||
996 | enforce_max_cpus(max_cpus); | 1001 | enforce_max_cpus(max_cpus); |
997 | 1002 | ||
998 | /* | 1003 | #ifdef CONFIG_HOTPLUG_CPU |
999 | * Fill in cpu_present_mask | 1004 | prefill_possible_map(); |
1000 | */ | 1005 | #endif |
1001 | for (i = 0; i < NR_CPUS; i++) { | ||
1002 | int apicid = cpu_present_to_apicid(i); | ||
1003 | if (physid_isset(apicid, phys_cpu_present_map)) { | ||
1004 | cpu_set(i, cpu_present_map); | ||
1005 | cpu_set(i, cpu_possible_map); | ||
1006 | } | ||
1007 | fixup_cpu_possible_map(i); | ||
1008 | } | ||
1009 | 1006 | ||
1010 | if (smp_sanity_check(max_cpus) < 0) { | 1007 | if (smp_sanity_check(max_cpus) < 0) { |
1011 | printk(KERN_INFO "SMP disabled\n"); | 1008 | printk(KERN_INFO "SMP disabled\n"); |
@@ -1189,8 +1186,7 @@ void __cpu_die(unsigned int cpu) | |||
1189 | printk ("CPU %d is now offline\n", cpu); | 1186 | printk ("CPU %d is now offline\n", cpu); |
1190 | return; | 1187 | return; |
1191 | } | 1188 | } |
1192 | current->state = TASK_UNINTERRUPTIBLE; | 1189 | msleep(100); |
1193 | schedule_timeout(HZ/10); | ||
1194 | } | 1190 | } |
1195 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 1191 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
1196 | } | 1192 | } |