aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-03-19 13:26:00 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:41:03 -0400
commitbbc2ff6a91a4eef8030018cd389bb12352d11b34 (patch)
tree89d26560f180c6abe6bc700a74c63cd7d6279618 /arch/x86/kernel
parentcb3c8b9003f15efa4a750a32d2d602d40cc45d5a (diff)
x86: integrate start_secondary
It now looks the same between architectures, so we merge it in smpboot.c. Minor differences goes inside an ifdef Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/smpboot.c86
-rw-r--r--arch/x86/kernel/smpboot_32.c75
-rw-r--r--arch/x86/kernel/smpboot_64.c63
3 files changed, 85 insertions, 139 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 69c17965f48d..a36ae2785c48 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -17,6 +17,7 @@
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm/mtrr.h> 18#include <asm/mtrr.h>
19#include <asm/nmi.h> 19#include <asm/nmi.h>
20#include <asm/vmi.h>
20#include <linux/mc146818rtc.h> 21#include <linux/mc146818rtc.h>
21 22
22#include <mach_apic.h> 23#include <mach_apic.h>
@@ -229,6 +230,90 @@ void __cpuinit smp_callin(void)
229 cpu_set(cpuid, cpu_callin_map); 230 cpu_set(cpuid, cpu_callin_map);
230} 231}
231 232
233/*
234 * Activate a secondary processor.
235 */
236void __cpuinit start_secondary(void *unused)
237{
238 /*
239 * Don't put *anything* before cpu_init(), SMP booting is too
240 * fragile that we want to limit the things done here to the
241 * most necessary things.
242 */
243#ifdef CONFIG_VMI
244 vmi_bringup();
245#endif
246 cpu_init();
247 preempt_disable();
248 smp_callin();
249
250 /* otherwise gcc will move up smp_processor_id before the cpu_init */
251 barrier();
252 /*
253 * Check TSC synchronization with the BP:
254 */
255 check_tsc_sync_target();
256
257 if (nmi_watchdog == NMI_IO_APIC) {
258 disable_8259A_irq(0);
259 enable_NMI_through_LVT0();
260 enable_8259A_irq(0);
261 }
262
263 /* This must be done before setting cpu_online_map */
264 set_cpu_sibling_map(raw_smp_processor_id());
265 wmb();
266
267 /*
268 * We need to hold call_lock, so there is no inconsistency
269 * between the time smp_call_function() determines number of
270 * IPI recipients, and the time when the determination is made
271 * for which cpus receive the IPI. Holding this
272 * lock helps us to not include this cpu in a currently in progress
273 * smp_call_function().
274 */
275 lock_ipi_call_lock();
276#ifdef CONFIG_X86_64
277 spin_lock(&vector_lock);
278
279 /* Setup the per cpu irq handling data structures */
280 __setup_vector_irq(smp_processor_id());
281 /*
282 * Allow the master to continue.
283 */
284 spin_unlock(&vector_lock);
285#endif
286 cpu_set(smp_processor_id(), cpu_online_map);
287 unlock_ipi_call_lock();
288 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
289
290 setup_secondary_clock();
291
292 wmb();
293 cpu_idle();
294}
295
296#ifdef CONFIG_X86_32
297/*
298 * Everything has been set up for the secondary
299 * CPUs - they just need to reload everything
300 * from the task structure
301 * This function must not return.
302 */
303void __devinit initialize_secondary(void)
304{
305 /*
306 * We don't actually need to load the full TSS,
307 * basically just the stack pointer and the ip.
308 */
309
310 asm volatile(
311 "movl %0,%%esp\n\t"
312 "jmp *%1"
313 :
314 :"m" (current->thread.sp), "m" (current->thread.ip));
315}
316#endif
232 317
233static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) 318static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
234{ 319{
@@ -533,7 +618,6 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
533} 618}
534#endif /* WAKE_SECONDARY_VIA_NMI */ 619#endif /* WAKE_SECONDARY_VIA_NMI */
535 620
536extern void start_secondary(void *unused);
537#ifdef WAKE_SECONDARY_VIA_INIT 621#ifdef WAKE_SECONDARY_VIA_INIT
538static int __devinit 622static int __devinit
539wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) 623wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index e82eeb2fdfef..77b045cfebd4 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -80,81 +80,6 @@ extern void unmap_cpu_to_logical_apicid(int cpu);
80/* State of each CPU. */ 80/* State of each CPU. */
81DEFINE_PER_CPU(int, cpu_state) = { 0 }; 81DEFINE_PER_CPU(int, cpu_state) = { 0 };
82 82
83extern void smp_callin(void);
84
85/*
86 * Activate a secondary processor.
87 */
88void __cpuinit start_secondary(void *unused)
89{
90 /*
91 * Don't put *anything* before cpu_init(), SMP booting is too
92 * fragile that we want to limit the things done here to the
93 * most necessary things.
94 */
95#ifdef CONFIG_VMI
96 vmi_bringup();
97#endif
98 cpu_init();
99 preempt_disable();
100 smp_callin();
101
102 /* otherwise gcc will move up smp_processor_id before the cpu_init */
103 barrier();
104 /*
105 * Check TSC synchronization with the BP:
106 */
107 check_tsc_sync_target();
108
109 if (nmi_watchdog == NMI_IO_APIC) {
110 disable_8259A_irq(0);
111 enable_NMI_through_LVT0();
112 enable_8259A_irq(0);
113 }
114
115 /* This must be done before setting cpu_online_map */
116 set_cpu_sibling_map(raw_smp_processor_id());
117 wmb();
118
119 /*
120 * We need to hold call_lock, so there is no inconsistency
121 * between the time smp_call_function() determines number of
122 * IPI recipients, and the time when the determination is made
123 * for which cpus receive the IPI. Holding this
124 * lock helps us to not include this cpu in a currently in progress
125 * smp_call_function().
126 */
127 lock_ipi_call_lock();
128 cpu_set(smp_processor_id(), cpu_online_map);
129 unlock_ipi_call_lock();
130 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
131
132 setup_secondary_clock();
133
134 wmb();
135 cpu_idle();
136}
137
138/*
139 * Everything has been set up for the secondary
140 * CPUs - they just need to reload everything
141 * from the task structure
142 * This function must not return.
143 */
144void __devinit initialize_secondary(void)
145{
146 /*
147 * We don't actually need to load the full TSS,
148 * basically just the stack pointer and the ip.
149 */
150
151 asm volatile(
152 "movl %0,%%esp\n\t"
153 "jmp *%1"
154 :
155 :"m" (current->thread.sp),"m" (current->thread.ip));
156}
157
158#ifdef CONFIG_HOTPLUG_CPU 83#ifdef CONFIG_HOTPLUG_CPU
159void cpu_exit_clear(void) 84void cpu_exit_clear(void)
160{ 85{
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 71f13b15bd89..60cd8cf1b073 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -71,69 +71,6 @@ int smp_threads_ready;
71/* State of each CPU */ 71/* State of each CPU */
72DEFINE_PER_CPU(int, cpu_state) = { 0 }; 72DEFINE_PER_CPU(int, cpu_state) = { 0 };
73 73
74extern void smp_callin(void);
75/*
76 * Setup code on secondary processor (after comming out of the trampoline)
77 */
78void __cpuinit start_secondary(void)
79{
80 /*
81 * Dont put anything before smp_callin(), SMP
82 * booting is too fragile that we want to limit the
83 * things done here to the most necessary things.
84 */
85 cpu_init();
86 preempt_disable();
87 smp_callin();
88
89 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
90 barrier();
91
92 /*
93 * Check TSC sync first:
94 */
95 check_tsc_sync_target();
96
97 if (nmi_watchdog == NMI_IO_APIC) {
98 disable_8259A_irq(0);
99 enable_NMI_through_LVT0();
100 enable_8259A_irq(0);
101 }
102
103 /*
104 * The sibling maps must be set before turing the online map on for
105 * this cpu
106 */
107 set_cpu_sibling_map(smp_processor_id());
108
109 /*
110 * We need to hold call_lock, so there is no inconsistency
111 * between the time smp_call_function() determines number of
112 * IPI recipients, and the time when the determination is made
113 * for which cpus receive the IPI in genapic_flat.c. Holding this
114 * lock helps us to not include this cpu in a currently in progress
115 * smp_call_function().
116 */
117 lock_ipi_call_lock();
118 spin_lock(&vector_lock);
119
120 /* Setup the per cpu irq handling data structures */
121 __setup_vector_irq(smp_processor_id());
122 /*
123 * Allow the master to continue.
124 */
125 spin_unlock(&vector_lock);
126 cpu_set(smp_processor_id(), cpu_online_map);
127 unlock_ipi_call_lock();
128
129 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
130
131 setup_secondary_clock();
132
133 wmb();
134 cpu_idle();
135}
136
137cycles_t cacheflush_time; 74cycles_t cacheflush_time;
138unsigned long cache_decay_ticks; 75unsigned long cache_decay_ticks;
139 76