aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@in.ibm.com>2006-12-06 20:14:03 -0500
committerAndi Kleen <andi@basil.nowhere.org>2006-12-06 20:14:03 -0500
commit6569580de7ae367def89b7671029cb97c1965574 (patch)
treee20a70b2b3bb71c3197446bc4265a2f458ddee03
parent67fd44fea274a5033ceb90284683bc44df61df54 (diff)
[PATCH] i386: Distinguish absolute symbols
Ld knows about 2 kinds of symbols, absolute and section relative. Section relative symbols symbols change value when a section is moved and absolute symbols do not. Currently in the linker script we have several labels marking the beginning and ending of sections that are outside of sections, making them absolute symbols. Having a mixture of absolute and section relative symbols refereing to the same data is currently harmless but it is confusing. This must be done carefully as newer revs of ld do not place symbols that appear in sections without data and instead ld makes those symbols global :( My ultimate goal is to build a relocatable kernel. The safest and least intrusive technique is to generate relocation entries so the kernel can be relocated at load time. The only penalty would be an increase in the size of the kernel binary. The problem is that if absolute and relocatable symbols are not properly specified absolute symbols will be relocated or section relative symbols won't be, which is fatal. The practical motivation is that when generating kernels that will run from a reserved area for analyzing what caused a kernel panic, it is simpler if you don't need to hard code the physical memory location they will run at, especially for the distributions. [AK: and merged:] o Also put a message so that in future people can be aware of it and avoid introducing absolute symbols. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r--arch/i386/kernel/vmlinux.lds.S113
-rw-r--r--include/asm-generic/vmlinux.lds.h10
2 files changed, 68 insertions, 55 deletions
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index c6f84a0322ba..cbd24860fbb7 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -1,5 +1,11 @@
1/* ld script to make i386 Linux kernel 1/* ld script to make i386 Linux kernel
2 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>; 2 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
3 *
4 * Don't define absolute symbols until and unless you know that symbol
5 * value is should remain constant even if kernel image is relocated
6 * at run time. Absolute symbols are not relocated. If symbol value should
7 * change if kernel is relocated, make the symbol section relative and
8 * put it inside the section definition.
3 */ 9 */
4 10
5#define LOAD_OFFSET __PAGE_OFFSET 11#define LOAD_OFFSET __PAGE_OFFSET
@@ -24,31 +30,32 @@ SECTIONS
24 . = __KERNEL_START; 30 . = __KERNEL_START;
25 phys_startup_32 = startup_32 - LOAD_OFFSET; 31 phys_startup_32 = startup_32 - LOAD_OFFSET;
26 /* read-only */ 32 /* read-only */
27 _text = .; /* Text and read-only data */
28 .text : AT(ADDR(.text) - LOAD_OFFSET) { 33 .text : AT(ADDR(.text) - LOAD_OFFSET) {
34 _text = .; /* Text and read-only data */
29 *(.text) 35 *(.text)
30 SCHED_TEXT 36 SCHED_TEXT
31 LOCK_TEXT 37 LOCK_TEXT
32 KPROBES_TEXT 38 KPROBES_TEXT
33 *(.fixup) 39 *(.fixup)
34 *(.gnu.warning) 40 *(.gnu.warning)
35 } :text = 0x9090 41 _etext = .; /* End of text section */
36 42 } :text = 0x9090
37 _etext = .; /* End of text section */
38 43
39 . = ALIGN(16); /* Exception table */ 44 . = ALIGN(16); /* Exception table */
40 __start___ex_table = .; 45 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
41 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { *(__ex_table) } 46 __start___ex_table = .;
42 __stop___ex_table = .; 47 *(__ex_table)
48 __stop___ex_table = .;
49 }
43 50
44 RODATA 51 RODATA
45 52
46 . = ALIGN(4); 53 . = ALIGN(4);
47 __tracedata_start = .;
48 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { 54 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {
55 __tracedata_start = .;
49 *(.tracedata) 56 *(.tracedata)
57 __tracedata_end = .;
50 } 58 }
51 __tracedata_end = .;
52 59
53 /* writeable */ 60 /* writeable */
54 . = ALIGN(4096); 61 . = ALIGN(4096);
@@ -58,10 +65,12 @@ SECTIONS
58 } :data 65 } :data
59 66
60 . = ALIGN(4096); 67 . = ALIGN(4096);
61 __nosave_begin = .; 68 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
62 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) } 69 __nosave_begin = .;
63 . = ALIGN(4096); 70 *(.data.nosave)
64 __nosave_end = .; 71 . = ALIGN(4096);
72 __nosave_end = .;
73 }
65 74
66 . = ALIGN(4096); 75 . = ALIGN(4096);
67 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { 76 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
@@ -75,8 +84,10 @@ SECTIONS
75 84
76 /* rarely changed data like cpu maps */ 85 /* rarely changed data like cpu maps */
77 . = ALIGN(32); 86 . = ALIGN(32);
78 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) } 87 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
79 _edata = .; /* End of data section */ 88 *(.data.read_mostly)
89 _edata = .; /* End of data section */
90 }
80 91
81#ifdef CONFIG_STACK_UNWIND 92#ifdef CONFIG_STACK_UNWIND
82 . = ALIGN(4); 93 . = ALIGN(4);
@@ -94,54 +105,56 @@ SECTIONS
94 105
95 /* might get freed after init */ 106 /* might get freed after init */
96 . = ALIGN(4096); 107 . = ALIGN(4096);
97 __smp_alt_begin = .;
98 __smp_alt_instructions = .;
99 .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) { 108 .smp_altinstructions : AT(ADDR(.smp_altinstructions) - LOAD_OFFSET) {
109 __smp_alt_begin = .;
110 __smp_alt_instructions = .;
100 *(.smp_altinstructions) 111 *(.smp_altinstructions)
112 __smp_alt_instructions_end = .;
101 } 113 }
102 __smp_alt_instructions_end = .;
103 . = ALIGN(4); 114 . = ALIGN(4);
104 __smp_locks = .;
105 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { 115 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
116 __smp_locks = .;
106 *(.smp_locks) 117 *(.smp_locks)
118 __smp_locks_end = .;
107 } 119 }
108 __smp_locks_end = .;
109 .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) { 120 .smp_altinstr_replacement : AT(ADDR(.smp_altinstr_replacement) - LOAD_OFFSET) {
110 *(.smp_altinstr_replacement) 121 *(.smp_altinstr_replacement)
122 __smp_alt_end = .;
111 } 123 }
112 . = ALIGN(4096); 124 . = ALIGN(4096);
113 __smp_alt_end = .;
114 125
115 /* will be freed after init */ 126 /* will be freed after init */
116 . = ALIGN(4096); /* Init code and data */ 127 . = ALIGN(4096); /* Init code and data */
117 __init_begin = .;
118 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { 128 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
129 __init_begin = .;
119 _sinittext = .; 130 _sinittext = .;
120 *(.init.text) 131 *(.init.text)
121 _einittext = .; 132 _einittext = .;
122 } 133 }
123 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) } 134 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
124 . = ALIGN(16); 135 . = ALIGN(16);
125 __setup_start = .; 136 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
126 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) } 137 __setup_start = .;
127 __setup_end = .; 138 *(.init.setup)
128 __initcall_start = .; 139 __setup_end = .;
140 }
129 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { 141 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
142 __initcall_start = .;
130 INITCALLS 143 INITCALLS
144 __initcall_end = .;
131 } 145 }
132 __initcall_end = .;
133 __con_initcall_start = .;
134 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { 146 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
147 __con_initcall_start = .;
135 *(.con_initcall.init) 148 *(.con_initcall.init)
149 __con_initcall_end = .;
136 } 150 }
137 __con_initcall_end = .;
138 SECURITY_INIT 151 SECURITY_INIT
139 . = ALIGN(4); 152 . = ALIGN(4);
140 __alt_instructions = .;
141 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 153 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
154 __alt_instructions = .;
142 *(.altinstructions) 155 *(.altinstructions)
156 __alt_instructions_end = .;
143 } 157 }
144 __alt_instructions_end = .;
145 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { 158 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
146 *(.altinstr_replacement) 159 *(.altinstr_replacement)
147 } 160 }
@@ -150,32 +163,32 @@ SECTIONS
150 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } 163 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
151 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } 164 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
152 . = ALIGN(4096); 165 . = ALIGN(4096);
153 __initramfs_start = .; 166 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
154 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) } 167 __initramfs_start = .;
155 __initramfs_end = .; 168 *(.init.ramfs)
169 __initramfs_end = .;
170 }
156 . = ALIGN(L1_CACHE_BYTES); 171 . = ALIGN(L1_CACHE_BYTES);
157 __per_cpu_start = .; 172 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
158 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) } 173 __per_cpu_start = .;
159 __per_cpu_end = .; 174 *(.data.percpu)
175 __per_cpu_end = .;
176 }
160 . = ALIGN(4096); 177 . = ALIGN(4096);
161 __init_end = .;
162 /* freed after init ends here */ 178 /* freed after init ends here */
163 179
164 __bss_start = .; /* BSS */
165 .bss.page_aligned : AT(ADDR(.bss.page_aligned) - LOAD_OFFSET) {
166 *(.bss.page_aligned)
167 }
168 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { 180 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
181 __init_end = .;
182 __bss_start = .; /* BSS */
183 *(.bss.page_aligned)
169 *(.bss) 184 *(.bss)
185 . = ALIGN(4);
186 __bss_stop = .;
187 _end = . ;
188 /* This is where the kernel creates the early boot page tables */
189 . = ALIGN(4096);
190 pg0 = . ;
170 } 191 }
171 . = ALIGN(4);
172 __bss_stop = .;
173
174 _end = . ;
175
176 /* This is where the kernel creates the early boot page tables */
177 . = ALIGN(4096);
178 pg0 = .;
179 192
180 /* Sections to be discarded */ 193 /* Sections to be discarded */
181 /DISCARD/ : { 194 /DISCARD/ : {
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index e60d6f21fa62..9f4747780dac 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -11,8 +11,8 @@
11 11
12#define RODATA \ 12#define RODATA \
13 . = ALIGN(4096); \ 13 . = ALIGN(4096); \
14 __start_rodata = .; \
15 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 14 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
15 VMLINUX_SYMBOL(__start_rodata) = .; \
16 *(.rodata) *(.rodata.*) \ 16 *(.rodata) *(.rodata.*) \
17 *(__vermagic) /* Kernel version magic */ \ 17 *(__vermagic) /* Kernel version magic */ \
18 } \ 18 } \
@@ -119,17 +119,17 @@
119 *(__ksymtab_strings) \ 119 *(__ksymtab_strings) \
120 } \ 120 } \
121 \ 121 \
122 /* Unwind data binary search table */ \
123 EH_FRAME_HDR \
124 \
122 /* Built-in module parameters. */ \ 125 /* Built-in module parameters. */ \
123 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 126 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
124 VMLINUX_SYMBOL(__start___param) = .; \ 127 VMLINUX_SYMBOL(__start___param) = .; \
125 *(__param) \ 128 *(__param) \
126 VMLINUX_SYMBOL(__stop___param) = .; \ 129 VMLINUX_SYMBOL(__stop___param) = .; \
130 VMLINUX_SYMBOL(__end_rodata) = .; \
127 } \ 131 } \
128 \ 132 \
129 /* Unwind data binary search table */ \
130 EH_FRAME_HDR \
131 \
132 __end_rodata = .; \
133 . = ALIGN(4096); 133 . = ALIGN(4096);
134 134
135#define SECURITY_INIT \ 135#define SECURITY_INIT \