aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-01-13 06:41:35 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-16 08:19:58 -0500
commit9939ddaff52787b2a7c1adf1b2afc95421aa0884 (patch)
tree6e7266d065914e19c3c3f4b4e475f09b9669fa51 /arch/x86/kernel
parent1a51e3a0aed18767cf2762e95456ecfeb0bca5e6 (diff)
x86: merge 64 and 32 SMP percpu handling
Now that pda is allocated as part of percpu, percpu doesn't need to be accessed through pda. Unify x86_64 SMP percpu access with x86_32 SMP one. Other than the segment register, operand size and the base of percpu symbols, they behave identical now. This patch replaces now unnecessary pda->data_offset with a dummy field which is necessary to keep stack_canary at its place. This patch also moves per_cpu_offset initialization out of init_gdt() into setup_per_cpu_areas(). Note that this change also necessitates explicit per_cpu_offset initializations in voyager_smp.c. With this change, x86_OP_percpu()'s are as efficient on x86_64 as on x86_32 and also x86_64 can use assembly PER_CPU macros. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/asm-offsets_64.c1
-rw-r--r--arch/x86/kernel/entry_64.S7
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c15
-rw-r--r--arch/x86/kernel/smpcommon.c3
5 files changed, 13 insertions, 15 deletions
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index f8d1b047ef4f..f4cc81bfbf89 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -55,7 +55,6 @@ int main(void)
55 ENTRY(irqcount); 55 ENTRY(irqcount);
56 ENTRY(cpunumber); 56 ENTRY(cpunumber);
57 ENTRY(irqstackptr); 57 ENTRY(irqstackptr);
58 ENTRY(data_offset);
59 DEFINE(pda_size, sizeof(struct x8664_pda)); 58 DEFINE(pda_size, sizeof(struct x8664_pda));
60 BLANK(); 59 BLANK();
61#undef ENTRY 60#undef ENTRY
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e28c7a987793..4833f3a19650 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -52,6 +52,7 @@
52#include <asm/irqflags.h> 52#include <asm/irqflags.h>
53#include <asm/paravirt.h> 53#include <asm/paravirt.h>
54#include <asm/ftrace.h> 54#include <asm/ftrace.h>
55#include <asm/percpu.h>
55 56
56/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 57/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57#include <linux/elf-em.h> 58#include <linux/elf-em.h>
@@ -1072,10 +1073,10 @@ ENTRY(\sym)
1072 TRACE_IRQS_OFF 1073 TRACE_IRQS_OFF
1073 movq %rsp,%rdi /* pt_regs pointer */ 1074 movq %rsp,%rdi /* pt_regs pointer */
1074 xorl %esi,%esi /* no error code */ 1075 xorl %esi,%esi /* no error code */
1075 movq %gs:pda_data_offset, %rbp 1076 PER_CPU(init_tss, %rbp)
1076 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) 1077 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1077 call \do_sym 1078 call \do_sym
1078 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) 1079 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
1079 jmp paranoid_exit /* %ebx: no swapgs flag */ 1080 jmp paranoid_exit /* %ebx: no swapgs flag */
1080 CFI_ENDPROC 1081 CFI_ENDPROC
1081END(\sym) 1082END(\sym)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 1a311293f733..e99b661a97f4 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -38,8 +38,6 @@ void __init x86_64_init_pda(void)
38#else 38#else
39 cpu_pda(0) = &_boot_cpu_pda; 39 cpu_pda(0) = &_boot_cpu_pda;
40#endif 40#endif
41 cpu_pda(0)->data_offset =
42 (unsigned long)(__per_cpu_load - __per_cpu_start);
43 pda_init(0); 41 pda_init(0);
44} 42}
45 43
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 63d462802272..be1ff34db112 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -125,14 +125,14 @@ static void __init setup_per_cpu_maps(void)
125#endif 125#endif
126} 126}
127 127
128#ifdef CONFIG_X86_32 128#ifdef CONFIG_X86_64
129/* 129unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
130 * Great future not-so-futuristic plan: make i386 and x86_64 do it 130 [0] = (unsigned long)__per_cpu_load,
131 * the same way 131};
132 */ 132#else
133unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 133unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
134EXPORT_SYMBOL(__per_cpu_offset);
135#endif 134#endif
135EXPORT_SYMBOL(__per_cpu_offset);
136 136
137/* 137/*
138 * Great future plan: 138 * Great future plan:
@@ -178,6 +178,7 @@ void __init setup_per_cpu_areas(void)
178#endif 178#endif
179 179
180 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); 180 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
181 per_cpu_offset(cpu) = ptr - __per_cpu_start;
181#ifdef CONFIG_X86_64 182#ifdef CONFIG_X86_64
182 cpu_pda(cpu) = (void *)ptr; 183 cpu_pda(cpu) = (void *)ptr;
183 184
@@ -190,7 +191,7 @@ void __init setup_per_cpu_areas(void)
190 else 191 else
191 memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu))); 192 memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu)));
192#endif 193#endif
193 per_cpu_offset(cpu) = ptr - __per_cpu_start; 194 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
194 195
195 DBG("PERCPU: cpu %4d %p\n", cpu, ptr); 196 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
196 } 197 }
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
index 397e309839dd..84395fabc410 100644
--- a/arch/x86/kernel/smpcommon.c
+++ b/arch/x86/kernel/smpcommon.c
@@ -4,10 +4,10 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <asm/smp.h> 5#include <asm/smp.h>
6 6
7#ifdef CONFIG_X86_32
8DEFINE_PER_CPU(unsigned long, this_cpu_off); 7DEFINE_PER_CPU(unsigned long, this_cpu_off);
9EXPORT_PER_CPU_SYMBOL(this_cpu_off); 8EXPORT_PER_CPU_SYMBOL(this_cpu_off);
10 9
10#ifdef CONFIG_X86_32
11/* 11/*
12 * Initialize the CPU's GDT. This is either the boot CPU doing itself 12 * Initialize the CPU's GDT. This is either the boot CPU doing itself
13 * (still using the master per-cpu area), or a CPU doing it for a 13 * (still using the master per-cpu area), or a CPU doing it for a
@@ -24,7 +24,6 @@ __cpuinit void init_gdt(int cpu)
24 write_gdt_entry(get_cpu_gdt_table(cpu), 24 write_gdt_entry(get_cpu_gdt_table(cpu),
25 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); 25 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
26 26
27 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
28 per_cpu(cpu_number, cpu) = cpu; 27 per_cpu(cpu_number, cpu) = cpu;
29} 28}
30#endif 29#endif