aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGlauber Costa <gcosta@redhat.com>2008-03-03 12:13:12 -0500
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:40:57 -0400
commit91718e8d13c23bfe0aa6fa6b730c5c33ee9771bf (patch)
tree6ed8d645bfdc0bdcf6b93e3ac9b618f879b3bf8b /arch
parentda522b07293756b9cb4e2c570454f95b8e79e189 (diff)
x86: unify setup_trampoline
setup_trampoline() looks very similar between architectures, and this patch unifies them. The i386 version allocates bootmem memory, while the x86_64 version uses a fixed address. In this patch, we initialize the global trampoline_base to the x86_64 version, and i386 allocation can later override it. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/smpboot.c33
-rw-r--r--arch/x86/kernel/smpboot_32.c29
-rw-r--r--arch/x86/kernel/smpboot_64.c14
3 files changed, 33 insertions, 43 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 34c31178041b..b13b9d55f9ce 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -3,6 +3,7 @@
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/percpu.h> 5#include <linux/percpu.h>
6#include <linux/bootmem.h>
6 7
7#include <asm/nmi.h> 8#include <asm/nmi.h>
8#include <asm/irq.h> 9#include <asm/irq.h>
@@ -38,6 +39,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
38DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 39DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
39EXPORT_PER_CPU_SYMBOL(cpu_info); 40EXPORT_PER_CPU_SYMBOL(cpu_info);
40 41
42/* ready for x86_64, no harm for x86, since it will overwrite after alloc */
43unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE);
44
41/* representing cpus for which sibling maps can be computed */ 45/* representing cpus for which sibling maps can be computed */
42static cpumask_t cpu_sibling_setup_map; 46static cpumask_t cpu_sibling_setup_map;
43 47
@@ -117,6 +121,35 @@ cpumask_t cpu_coregroup_map(int cpu)
117 return c->llc_shared_map; 121 return c->llc_shared_map;
118} 122}
119 123
124/*
125 * Currently trivial. Write the real->protected mode
126 * bootstrap into the page concerned. The caller
127 * has made sure it's suitably aligned.
128 */
129
130unsigned long __cpuinit setup_trampoline(void)
131{
132 memcpy(trampoline_base, trampoline_data,
133 trampoline_end - trampoline_data);
134 return virt_to_phys(trampoline_base);
135}
136
137#ifdef CONFIG_X86_32
138/*
139 * We are called very early to get the low memory for the
140 * SMP bootup trampoline page.
141 */
142void __init smp_alloc_memory(void)
143{
144 trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
145 /*
146 * Has to be in very low memory so we can execute
147 * real-mode AP code.
148 */
149 if (__pa(trampoline_base) >= 0x9F000)
150 BUG();
151}
152#endif
120 153
121#ifdef CONFIG_HOTPLUG_CPU 154#ifdef CONFIG_HOTPLUG_CPU
122void remove_siblinginfo(int cpu) 155void remove_siblinginfo(int cpu)
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index a21f25418b3e..ee826594aa03 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -73,41 +73,12 @@ EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
73 73
74u8 apicid_2_node[MAX_APICID]; 74u8 apicid_2_node[MAX_APICID];
75 75
76static unsigned char *trampoline_base;
77
78static void map_cpu_to_logical_apicid(void); 76static void map_cpu_to_logical_apicid(void);
79 77
80/* State of each CPU. */ 78/* State of each CPU. */
81DEFINE_PER_CPU(int, cpu_state) = { 0 }; 79DEFINE_PER_CPU(int, cpu_state) = { 0 };
82 80
83/* 81/*
84 * Currently trivial. Write the real->protected mode
85 * bootstrap into the page concerned. The caller
86 * has made sure it's suitably aligned.
87 */
88
89static unsigned long __cpuinit setup_trampoline(void)
90{
91 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
92 return virt_to_phys(trampoline_base);
93}
94
95/*
96 * We are called very early to get the low memory for the
97 * SMP bootup trampoline page.
98 */
99void __init smp_alloc_memory(void)
100{
101 trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
102 /*
103 * Has to be in very low memory so we can execute
104 * real-mode AP code.
105 */
106 if (__pa(trampoline_base) >= 0x9F000)
107 BUG();
108}
109
110/*
111 * The bootstrap kernel entry code has set these up. Save them for 82 * The bootstrap kernel entry code has set these up. Save them for
112 * a given CPU 83 * a given CPU
113 */ 84 */
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 2cc1b8b0601c..9f4935e70e72 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -85,20 +85,6 @@ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
85#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) 85#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
86#endif 86#endif
87 87
88
89/*
90 * Currently trivial. Write the real->protected mode
91 * bootstrap into the page concerned. The caller
92 * has made sure it's suitably aligned.
93 */
94
95static unsigned long __cpuinit setup_trampoline(void)
96{
97 void *tramp = __va(SMP_TRAMPOLINE_BASE);
98 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
99 return virt_to_phys(tramp);
100}
101
102/* 88/*
103 * The bootstrap kernel entry code has set these up. Save them for 89 * The bootstrap kernel entry code has set these up. Save them for
104 * a given CPU 90 * a given CPU