diff options
author | Tejun Heo <tj@kernel.org> | 2009-01-13 06:41:35 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-16 08:19:58 -0500 |
commit | 9939ddaff52787b2a7c1adf1b2afc95421aa0884 (patch) | |
tree | 6e7266d065914e19c3c3f4b4e475f09b9669fa51 /arch/x86/kernel/setup_percpu.c | |
parent | 1a51e3a0aed18767cf2762e95456ecfeb0bca5e6 (diff) |
x86: merge 64 and 32 SMP percpu handling
Now that pda is allocated as part of percpu, percpu doesn't need to be
accessed through pda. Unify x86_64 SMP percpu access with x86_32 SMP
one. Other than the segment register, operand size and the base of
percpu symbols, they behave identical now.
This patch replaces now unnecessary pda->data_offset with a dummy
field which is necessary to keep stack_canary at its place. This
patch also moves per_cpu_offset initialization out of init_gdt() into
setup_per_cpu_areas(). Note that this change also necessitates
explicit per_cpu_offset initializations in voyager_smp.c.
With this change, x86_OP_percpu()'s are as efficient on x86_64 as on
x86_32 and also x86_64 can use assembly PER_CPU macros.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 63d462802272..be1ff34db112 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -125,14 +125,14 @@ static void __init setup_per_cpu_maps(void) | |||
125 | #endif | 125 | #endif |
126 | } | 126 | } |
127 | 127 | ||
128 | #ifdef CONFIG_X86_32 | 128 | #ifdef CONFIG_X86_64 |
129 | /* | 129 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { |
130 | * Great future not-so-futuristic plan: make i386 and x86_64 do it | 130 | [0] = (unsigned long)__per_cpu_load, |
131 | * the same way | 131 | }; |
132 | */ | 132 | #else |
133 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | 133 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
134 | EXPORT_SYMBOL(__per_cpu_offset); | ||
135 | #endif | 134 | #endif |
135 | EXPORT_SYMBOL(__per_cpu_offset); | ||
136 | 136 | ||
137 | /* | 137 | /* |
138 | * Great future plan: | 138 | * Great future plan: |
@@ -178,6 +178,7 @@ void __init setup_per_cpu_areas(void) | |||
178 | #endif | 178 | #endif |
179 | 179 | ||
180 | memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); | 180 | memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); |
181 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | ||
181 | #ifdef CONFIG_X86_64 | 182 | #ifdef CONFIG_X86_64 |
182 | cpu_pda(cpu) = (void *)ptr; | 183 | cpu_pda(cpu) = (void *)ptr; |
183 | 184 | ||
@@ -190,7 +191,7 @@ void __init setup_per_cpu_areas(void) | |||
190 | else | 191 | else |
191 | memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu))); | 192 | memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu))); |
192 | #endif | 193 | #endif |
193 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | 194 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); |
194 | 195 | ||
195 | DBG("PERCPU: cpu %4d %p\n", cpu, ptr); | 196 | DBG("PERCPU: cpu %4d %p\n", cpu, ptr); |
196 | } | 197 | } |