diff options
author | Ian Campbell <Ian.Campbell@XenSource.com> | 2006-09-26 04:52:38 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 04:52:38 -0400 |
commit | f2a9e1dec27189a09ff642f2648e49ad9e76607b (patch) | |
tree | 0ff8688e89dcbd47fa2bda2cc418820841fdefde /arch/x86_64 | |
parent | 26374c7b7dca1ff90607c83d9b82e917119f0456 (diff) |
[PATCH] Put .note.* sections into a PT_NOTE segment
This patch updates x86_64 linker script to pack any .note.* sections
into a PT_NOTE segment in the output file.
To do this, we tell ld that we need a PT_NOTE segment. This requires
us to start explicitly mapping sections to segments, so we also need
to explicitly create PT_LOAD segments for text and data, and map the
sections to them appropriately. Fortunately, each section will
default to its previous section's segment, so it doesn't take many
changes to vmlinux.lds.S.
The corresponding change is already made for i386 in -mm and I'd like
this patch to join it. The section to segment mappings do change as do
the segment flags so some time in -mm would be good for that reason as
well, just in case.
In particular .data and .bss move from the text segment to the data
segment and .data.cacheline_aligned .data.read_mostly are put in the
data segment instead of a separate one.
I think that it would be possible to exactly match the existing section
to segment mapping and flags but it would be a more intrusive change and
I'm not sure there is a reason for the existing layout other than it is
what you get by default if you don't explicitly specify something else.
If there is a reason for the existing layout then I will of course make
the more intrusive change. If there is no reason we could probably drop
the executable or writable flags from some segments but I don't know how
much attention is paid to them anyway so it might not be worth the
effort.
The vsyscall related sections need to go in a different segment to the
normal data segment and so I invented a "user" segment to contain them.
I believe this should appear to be another data segment as far as the
kernel is concerned so the flags are setup accordingly.
The notes will be used in the Xen paravirt_ops backend to provide
additional information to the domain builder. I am in the process of
converting the xen-unstable kernels and tools over to this scheme at the
moment to support this in the future.
It has been suggested to me that the notes segment should have flags 0
(i.e. not readable) since it is only used by the loader and is not used
at runtime. For now I went with a readable segment since that is what
the i386 patch uses.
AK: dropped NOTES addition right now because the needed infrastructure
for that is not merged yet
Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/x86_64')
-rw-r--r-- | arch/x86_64/kernel/vmlinux.lds.S | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S index 8d5a5149bb3a..2802aa963fa3 100644 --- a/arch/x86_64/kernel/vmlinux.lds.S +++ b/arch/x86_64/kernel/vmlinux.lds.S | |||
@@ -13,6 +13,12 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64") | |||
13 | OUTPUT_ARCH(i386:x86-64) | 13 | OUTPUT_ARCH(i386:x86-64) |
14 | ENTRY(phys_startup_64) | 14 | ENTRY(phys_startup_64) |
15 | jiffies_64 = jiffies; | 15 | jiffies_64 = jiffies; |
16 | PHDRS { | ||
17 | text PT_LOAD FLAGS(5); /* R_E */ | ||
18 | data PT_LOAD FLAGS(7); /* RWE */ | ||
19 | user PT_LOAD FLAGS(7); /* RWE */ | ||
20 | note PT_NOTE FLAGS(4); /* R__ */ | ||
21 | } | ||
16 | SECTIONS | 22 | SECTIONS |
17 | { | 23 | { |
18 | . = __START_KERNEL; | 24 | . = __START_KERNEL; |
@@ -31,7 +37,7 @@ SECTIONS | |||
31 | KPROBES_TEXT | 37 | KPROBES_TEXT |
32 | *(.fixup) | 38 | *(.fixup) |
33 | *(.gnu.warning) | 39 | *(.gnu.warning) |
34 | } = 0x9090 | 40 | } :text = 0x9090 |
35 | /* out-of-line lock text */ | 41 | /* out-of-line lock text */ |
36 | .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) } | 42 | .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) } |
37 | 43 | ||
@@ -57,7 +63,7 @@ SECTIONS | |||
57 | .data : AT(ADDR(.data) - LOAD_OFFSET) { | 63 | .data : AT(ADDR(.data) - LOAD_OFFSET) { |
58 | *(.data) | 64 | *(.data) |
59 | CONSTRUCTORS | 65 | CONSTRUCTORS |
60 | } | 66 | } :data |
61 | 67 | ||
62 | _edata = .; /* End of data section */ | 68 | _edata = .; /* End of data section */ |
63 | 69 | ||
@@ -89,7 +95,7 @@ SECTIONS | |||
89 | #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) | 95 | #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) |
90 | 96 | ||
91 | . = VSYSCALL_ADDR; | 97 | . = VSYSCALL_ADDR; |
92 | .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } | 98 | .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user |
93 | __vsyscall_0 = VSYSCALL_VIRT_ADDR; | 99 | __vsyscall_0 = VSYSCALL_VIRT_ADDR; |
94 | 100 | ||
95 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); | 101 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); |
@@ -135,7 +141,7 @@ SECTIONS | |||
135 | . = ALIGN(8192); /* init_task */ | 141 | . = ALIGN(8192); /* init_task */ |
136 | .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { | 142 | .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { |
137 | *(.data.init_task) | 143 | *(.data.init_task) |
138 | } | 144 | } :data |
139 | 145 | ||
140 | . = ALIGN(4096); | 146 | . = ALIGN(4096); |
141 | .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { | 147 | .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { |