aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2009-04-29 03:47:25 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 04:20:33 -0400
commite58bdaa8f810332e5c1760ce496b01e07d51642c (patch)
tree701dce42e2d33487fe14674c9d9ae31bf157dccb /arch
parentff6f87e1626e10beef675084c9b5384a9477e3d5 (diff)
x86, vmlinux.lds: unify first part of initdata
32-bit: - Move definition of __init_begin outside output_section because it covers more than one section - Move ALIGN() for end-of-section inside .smp_locks output section. Same effect but the intent is better documented that we need both start and end aligned. 64-bit: - Move ALIGN() outside output section in .init.setup - Deleted unused __smp_alt_* symbols None of the above should result in any functional change. [ Impact: refactor and unify linker script ] Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Cc: Tim Abbott <tabbott@MIT.EDU> Cc: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <1240991249-27117-9-git-send-email-sam@ravnborg.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/vmlinux.lds.S61
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S60
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S59
3 files changed, 61 insertions, 119 deletions
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index b3106c2a0373..8b203c4ced9b 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -231,6 +231,67 @@ SECTIONS
231 231
232#endif /* CONFIG_X86_64 */ 232#endif /* CONFIG_X86_64 */
233 233
234 /* init_task */
235 . = ALIGN(THREAD_SIZE);
236 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
237 *(.data.init_task)
238 }
239#ifdef CONFIG_X86_64
240 :data.init
241#endif
242
243 /*
244 * smp_locks might be freed after init
245 * start/end must be page aligned
246 */
247 . = ALIGN(PAGE_SIZE);
248 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
249 __smp_locks = .;
250 *(.smp_locks)
251 __smp_locks_end = .;
252 . = ALIGN(PAGE_SIZE);
253 }
254
255 /* Init code and data - will be freed after init */
256 . = ALIGN(PAGE_SIZE);
257 __init_begin = .; /* paired with __init_end */
258 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
259 _sinittext = .;
260 INIT_TEXT
261 _einittext = .;
262 }
263
264 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
265 INIT_DATA
266 }
267
268 . = ALIGN(16);
269 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
270 __setup_start = .;
271 *(.init.setup)
272 __setup_end = .;
273 }
274 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
275 __initcall_start = .;
276 INITCALLS
277 __initcall_end = .;
278 }
279
280 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
281 __con_initcall_start = .;
282 *(.con_initcall.init)
283 __con_initcall_end = .;
284 }
285
286 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
287 __x86_cpu_dev_start = .;
288 *(.x86_cpu_dev.init)
289 __x86_cpu_dev_end = .;
290 }
291
292 SECURITY_INIT
293
294
234#ifdef CONFIG_X86_32 295#ifdef CONFIG_X86_32
235# include "vmlinux_32.lds.S" 296# include "vmlinux_32.lds.S"
236#else 297#else
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index 8ade84687b2d..d8ba5394af03 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -1,63 +1,3 @@
1 /* init_task */
2 . = ALIGN(THREAD_SIZE);
3 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
4 *(.data.init_task)
5 }
6
7 . = ALIGN(PAGE_SIZE);
8 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
9 /* might get freed after init */
10 __smp_locks = .;
11 *(.smp_locks)
12 __smp_locks_end = .;
13 }
14 /* will be freed after init
15 * Following ALIGN() is required to make sure no other data falls on the
16 * same page where __smp_alt_end is pointing as that page might be freed
17 * after boot. Always make sure that ALIGN() directive is present after
18 * the section which contains __smp_alt_end.
19 */
20 . = ALIGN(PAGE_SIZE);
21
22 /* Init code and data - will be freed after init */
23 . = ALIGN(PAGE_SIZE);
24 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
25 __init_begin = .;
26 _sinittext = .;
27 INIT_TEXT
28 _einittext = .;
29 }
30
31 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
32 INIT_DATA
33 }
34
35 . = ALIGN(16);
36 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
37 __setup_start = .;
38 *(.init.setup)
39 __setup_end = .;
40 }
41 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
42 __initcall_start = .;
43 INITCALLS
44 __initcall_end = .;
45 }
46
47 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
48 __con_initcall_start = .;
49 *(.con_initcall.init)
50 __con_initcall_end = .;
51 }
52
53 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
54 __x86_cpu_dev_start = .;
55 *(.x86_cpu_dev.init)
56 __x86_cpu_dev_end = .;
57 }
58
59 SECURITY_INIT
60
61 . = ALIGN(4); 1 . = ALIGN(4);
62 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 2 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
63 __alt_instructions = .; 3 __alt_instructions = .;
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 013aa0e1dd3a..0e8054e0c5c4 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -1,62 +1,3 @@
1 /* init_task */
2 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
3 . = ALIGN(THREAD_SIZE);
4 *(.data.init_task)
5 } :data.init
6
7 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
8 /* might get freed after init */
9 . = ALIGN(PAGE_SIZE);
10 __smp_alt_begin = .;
11 __smp_locks = .;
12 *(.smp_locks)
13 __smp_locks_end = .;
14 . = ALIGN(PAGE_SIZE);
15 __smp_alt_end = .;
16 }
17
18 /* Init code and data */
19 . = ALIGN(PAGE_SIZE);
20 __init_begin = .; /* paired with __init_end */
21 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
22 _sinittext = .;
23 INIT_TEXT
24 _einittext = .;
25 }
26
27 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
28 __initdata_begin = .;
29 INIT_DATA
30 __initdata_end = .;
31 }
32
33 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
34 . = ALIGN(16);
35 __setup_start = .;
36 *(.init.setup)
37 __setup_end = .;
38 }
39
40 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
41 __initcall_start = .;
42 INITCALLS
43 __initcall_end = .;
44 }
45
46 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
47 __con_initcall_start = .;
48 *(.con_initcall.init)
49 __con_initcall_end = .;
50 }
51
52 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
53 __x86_cpu_dev_start = .;
54 *(.x86_cpu_dev.init)
55 __x86_cpu_dev_end = .;
56 }
57
58 SECURITY_INIT
59
60 . = ALIGN(8); 1 . = ALIGN(8);
61 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { 2 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
62 __parainstructions = .; 3 __parainstructions = .;