diff options
author | Tejun Heo <tj@kernel.org> | 2009-01-30 02:32:22 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-30 17:27:46 -0500 |
commit | 3ac6cffea4aa18007a454a7442da2855882f403d (patch) | |
tree | 4e784abaa1715728cb8fd2adbce793e30304d3b7 /include/asm-generic | |
parent | c43e0e46adf79c321ed3fbf0351e1005fb8a2413 (diff) |
linker script: use separate simpler definition for PERCPU()
Impact: fix linker screwup on x86_32
Recent x86_64 zerobased patches introduced PERCPU_VADDR() to put
.data.percpu to a predefined address and re-defined PERCPU() in terms
of it. The new macro defined one extra symbol, __per_cpu_load, for
LMA of the section so that the init data could be accessed. This new
symbol introduced the following problems to x86_32.
1. If __per_cpu_load is defined outside of .data.percpu as an absolute
symbol, relocation generation for relocatable kernel fails due to
absolute relocation.
2. If __per_cpu_load is put inside .data.percpu with absolute address
assignment to work around #1, linker gets confused and under
certain configurations ends up relocating the symbol against
.data.percpu such that the load address gets added on top of
already set load address.
As x86_32 doesn't use predefined address for .data.percpu, there's no
need for it to care about the possibility of __per_cpu_load being
different from __per_cpu_start.
This patch defines PERCPU() separately so that __per_cpu_load is
defined inside .data.percpu so that everything is ordinary
linking-wise.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 22 |
1 files changed, 17 insertions, 5 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 53e21f36a802..5406e70aba86 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -445,10 +445,9 @@ | |||
445 | * section in the linker script will go there too. @phdr should have | 445 | * section in the linker script will go there too. @phdr should have |
446 | * a leading colon. | 446 | * a leading colon. |
447 | * | 447 | * |
448 | * This macro defines three symbols, __per_cpu_load, __per_cpu_start | 448 | * Note that this macros defines __per_cpu_load as an absolute symbol. |
449 | * and __per_cpu_end. The first one is the vaddr of loaded percpu | 449 | * If there is no need to put the percpu section at a predetermined |
450 | * init data. __per_cpu_start equals @vaddr and __per_cpu_end is the | 450 | * address, use PERCPU(). |
451 | * end offset. | ||
452 | */ | 451 | */ |
453 | #define PERCPU_VADDR(vaddr, phdr) \ | 452 | #define PERCPU_VADDR(vaddr, phdr) \ |
454 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | 453 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ |
@@ -470,7 +469,20 @@ | |||
470 | * Align to @align and outputs output section for percpu area. This | 469 | * Align to @align and outputs output section for percpu area. This |
471 | * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and | 470 | * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and |
472 | * __per_cpu_start will be identical. | 471 | * __per_cpu_start will be identical. |
472 | * | ||
473 | * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except | ||
474 | * that __per_cpu_load is defined as a relative symbol against | ||
475 | * .data.percpu which is required for relocatable x86_32 | ||
476 | * configuration. | ||
473 | */ | 477 | */ |
474 | #define PERCPU(align) \ | 478 | #define PERCPU(align) \ |
475 | . = ALIGN(align); \ | 479 | . = ALIGN(align); \ |
476 | PERCPU_VADDR( , ) | 480 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ |
481 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | ||
482 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | ||
483 | *(.data.percpu.first) \ | ||
484 | *(.data.percpu.page_aligned) \ | ||
485 | *(.data.percpu) \ | ||
486 | *(.data.percpu.shared_aligned) \ | ||
487 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | ||
488 | } | ||