aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/head_64.S
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-01-13 06:41:35 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-16 08:19:14 -0500
commit3e5d8f978435bb9ba4dfe3f4514e65e7885db1a9 (patch)
tree1ce55b2ec16a0bd59a29857e05215960d463a1d8 /arch/x86/kernel/head_64.S
parenta698c823e15149941b0f0281527d0c0d1daf2639 (diff)
x86: make percpu symbols zerobased on SMP
[ Based on original patch from Christoph Lameter and Mike Travis. ] This patch makes percpu symbols zerobased on x86_64 SMP by adding PERCPU_VADDR() to vmlinux.lds.h which helps setting explicit vaddr on the percpu output section and using it in vmlinux_64.lds.S. A new PHDR is added as existing ones cannot contain sections near address zero. PERCPU_VADDR() also adds a new symbol __per_cpu_load which always points to the vaddr of the loaded percpu data.init region. The following adjustments have been made to accomodate the address change. * code to locate percpu gdt_page in head_64.S is updated to add the load address to the gdt_page offset. * __per_cpu_load is used in places where access to the init data area is necessary. * pda->data_offset is initialized soon after C code is entered as zero value doesn't work anymore. This patch is mostly taken from Mike Travis' "x86_64: Base percpu variables at zero" patch. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/head_64.S')
-rw-r--r--arch/x86/kernel/head_64.S24
1 files changed, 23 insertions, 1 deletions
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 0e275d495563..7ee0363871e8 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -204,6 +204,23 @@ ENTRY(secondary_startup_64)
204 pushq $0 204 pushq $0
205 popfq 205 popfq
206 206
207#ifdef CONFIG_SMP
208 /*
209 * early_gdt_base should point to the gdt_page in static percpu init
210 * data area. Computing this requires two symbols - __per_cpu_load
211 * and per_cpu__gdt_page. As linker can't do no such relocation, do
212 * it by hand. As early_gdt_descr is manipulated by C code for
213 * secondary CPUs, this should be done only once for the boot CPU
214 * when early_gdt_descr_base contains zero.
215 */
216 movq early_gdt_descr_base(%rip), %rax
217 testq %rax, %rax
218 jnz 1f
219 movq $__per_cpu_load, %rax
220 addq $per_cpu__gdt_page, %rax
221 movq %rax, early_gdt_descr_base(%rip)
2221:
223#endif
207 /* 224 /*
208 * We must switch to a new descriptor in kernel space for the GDT 225 * We must switch to a new descriptor in kernel space for the GDT
209 * because soon the kernel won't have access anymore to the userspace 226 * because soon the kernel won't have access anymore to the userspace
@@ -401,7 +418,12 @@ NEXT_PAGE(level2_spare_pgt)
401 .globl early_gdt_descr 418 .globl early_gdt_descr
402early_gdt_descr: 419early_gdt_descr:
403 .word GDT_ENTRIES*8-1 420 .word GDT_ENTRIES*8-1
404 .quad per_cpu__gdt_page 421#ifdef CONFIG_SMP
422early_gdt_descr_base:
423 .quad 0x0000000000000000
424#else
425 .quad per_cpu__gdt_page
426#endif
405 427
406ENTRY(phys_base) 428ENTRY(phys_base)
407 /* This must match the first entry in level2_kernel_pgt */ 429 /* This must match the first entry in level2_kernel_pgt */