diff options
Diffstat (limited to 'arch/x86/kernel/smpboot_64.c')
-rw-r--r-- | arch/x86/kernel/smpboot_64.c | 63 |
1 files changed, 0 insertions, 63 deletions
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 71f13b15bd89..60cd8cf1b073 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c | |||
@@ -71,69 +71,6 @@ int smp_threads_ready; | |||
71 | /* State of each CPU */ | 71 | /* State of each CPU */ |
72 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | 72 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
73 | 73 | ||
74 | extern void smp_callin(void); | ||
75 | /* | ||
76 | * Setup code on secondary processor (after comming out of the trampoline) | ||
77 | */ | ||
78 | void __cpuinit start_secondary(void) | ||
79 | { | ||
80 | /* | ||
81 | * Dont put anything before smp_callin(), SMP | ||
82 | * booting is too fragile that we want to limit the | ||
83 | * things done here to the most necessary things. | ||
84 | */ | ||
85 | cpu_init(); | ||
86 | preempt_disable(); | ||
87 | smp_callin(); | ||
88 | |||
89 | /* otherwise gcc will move up the smp_processor_id before the cpu_init */ | ||
90 | barrier(); | ||
91 | |||
92 | /* | ||
93 | * Check TSC sync first: | ||
94 | */ | ||
95 | check_tsc_sync_target(); | ||
96 | |||
97 | if (nmi_watchdog == NMI_IO_APIC) { | ||
98 | disable_8259A_irq(0); | ||
99 | enable_NMI_through_LVT0(); | ||
100 | enable_8259A_irq(0); | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * The sibling maps must be set before turing the online map on for | ||
105 | * this cpu | ||
106 | */ | ||
107 | set_cpu_sibling_map(smp_processor_id()); | ||
108 | |||
109 | /* | ||
110 | * We need to hold call_lock, so there is no inconsistency | ||
111 | * between the time smp_call_function() determines number of | ||
112 | * IPI recipients, and the time when the determination is made | ||
113 | * for which cpus receive the IPI in genapic_flat.c. Holding this | ||
114 | * lock helps us to not include this cpu in a currently in progress | ||
115 | * smp_call_function(). | ||
116 | */ | ||
117 | lock_ipi_call_lock(); | ||
118 | spin_lock(&vector_lock); | ||
119 | |||
120 | /* Setup the per cpu irq handling data structures */ | ||
121 | __setup_vector_irq(smp_processor_id()); | ||
122 | /* | ||
123 | * Allow the master to continue. | ||
124 | */ | ||
125 | spin_unlock(&vector_lock); | ||
126 | cpu_set(smp_processor_id(), cpu_online_map); | ||
127 | unlock_ipi_call_lock(); | ||
128 | |||
129 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | ||
130 | |||
131 | setup_secondary_clock(); | ||
132 | |||
133 | wmb(); | ||
134 | cpu_idle(); | ||
135 | } | ||
136 | |||
137 | cycles_t cacheflush_time; | 74 | cycles_t cacheflush_time; |
138 | unsigned long cache_decay_ticks; | 75 | unsigned long cache_decay_ticks; |
139 | 76 | ||