aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smpboot.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/smpboot.c')
-rw-r--r--arch/x86/kernel/smpboot.c86
1 files changed, 85 insertions, 1 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 69c17965f48d..a36ae2785c48 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -17,6 +17,7 @@
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm/mtrr.h> 18#include <asm/mtrr.h>
19#include <asm/nmi.h> 19#include <asm/nmi.h>
20#include <asm/vmi.h>
20#include <linux/mc146818rtc.h> 21#include <linux/mc146818rtc.h>
21 22
22#include <mach_apic.h> 23#include <mach_apic.h>
@@ -229,6 +230,90 @@ void __cpuinit smp_callin(void)
229 cpu_set(cpuid, cpu_callin_map); 230 cpu_set(cpuid, cpu_callin_map);
230} 231}
231 232
233/*
234 * Activate a secondary processor.
235 */
236void __cpuinit start_secondary(void *unused)
237{
238 /*
239 * Don't put *anything* before cpu_init(), SMP booting is too
240 * fragile that we want to limit the things done here to the
241 * most necessary things.
242 */
243#ifdef CONFIG_VMI
244 vmi_bringup();
245#endif
246 cpu_init();
247 preempt_disable();
248 smp_callin();
249
250 /* otherwise gcc will move up smp_processor_id before the cpu_init */
251 barrier();
252 /*
253 * Check TSC synchronization with the BP:
254 */
255 check_tsc_sync_target();
256
257 if (nmi_watchdog == NMI_IO_APIC) {
258 disable_8259A_irq(0);
259 enable_NMI_through_LVT0();
260 enable_8259A_irq(0);
261 }
262
263 /* This must be done before setting cpu_online_map */
264 set_cpu_sibling_map(raw_smp_processor_id());
265 wmb();
266
267 /*
268 * We need to hold call_lock, so there is no inconsistency
269 * between the time smp_call_function() determines number of
270 * IPI recipients, and the time when the determination is made
271 * for which cpus receive the IPI. Holding this
272 * lock helps us to not include this cpu in a currently in progress
273 * smp_call_function().
274 */
275 lock_ipi_call_lock();
276#ifdef CONFIG_X86_64
277 spin_lock(&vector_lock);
278
279 /* Setup the per cpu irq handling data structures */
280 __setup_vector_irq(smp_processor_id());
281 /*
282 * Allow the master to continue.
283 */
284 spin_unlock(&vector_lock);
285#endif
286 cpu_set(smp_processor_id(), cpu_online_map);
287 unlock_ipi_call_lock();
288 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
289
290 setup_secondary_clock();
291
292 wmb();
293 cpu_idle();
294}
295
296#ifdef CONFIG_X86_32
297/*
298 * Everything has been set up for the secondary
299 * CPUs - they just need to reload everything
300 * from the task structure
301 * This function must not return.
302 */
303void __devinit initialize_secondary(void)
304{
305 /*
306 * We don't actually need to load the full TSS,
307 * basically just the stack pointer and the ip.
308 */
309
310 asm volatile(
311 "movl %0,%%esp\n\t"
312 "jmp *%1"
313 :
314 :"m" (current->thread.sp), "m" (current->thread.ip));
315}
316#endif
232 317
233static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) 318static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
234{ 319{
@@ -533,7 +618,6 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
533} 618}
534#endif /* WAKE_SECONDARY_VIA_NMI */ 619#endif /* WAKE_SECONDARY_VIA_NMI */
535 620
536extern void start_secondary(void *unused);
537#ifdef WAKE_SECONDARY_VIA_INIT 621#ifdef WAKE_SECONDARY_VIA_INIT
538static int __devinit 622static int __devinit
539wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) 623wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)