aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/vmlinux.lds.h
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2009-06-07 14:46:37 -0400
committerSam Ravnborg <sam@ravnborg.org>2009-06-09 17:02:22 -0400
commitef53dae8658cf0e93d380983824a661067948d87 (patch)
treed5f16da0d7f3353db1233f6399a23d0e187b7d1d /include/asm-generic/vmlinux.lds.h
parenteedc9d83eaab2d35fb9dd1ec25b765dec964e26c (diff)
Improve vmlinux.lds.h support for arch specific linker scripts
To support alingment of the individual architecture specific linker scripts provide a set of general definitions in vmlinux.lds.h With these definitions applied the diverse linekr scripts can be reduced in line count and their readability are improved - IMO. A sample linker script is included to give the preferred order of the sections for the architectures that do not have any special requirments. These definitions are also a first step towards eventual support for -ffunction-sections. The definitions makes it much easier to do a global renaming of section names - but the main purpose is to clean up the linker scripts. Tim Aboot has provided a lot of inputs to improve the definitions - all faults are mine. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Cc: Tim Abbott <tabbott@mit.edu>
Diffstat (limited to 'include/asm-generic/vmlinux.lds.h')
-rw-r--r--include/asm-generic/vmlinux.lds.h231
1 files changed, 224 insertions, 7 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 3edb11499743..fba42236e942 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -1,4 +1,58 @@
1#include <linux/section-names.h> 1/*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU(PAGE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * __bss_start = .;
34 * BSS_SECTION(0, 0)
35 * __bss_stop = .;
36 * _end = .;
37 *
38 * /DISCARD/ : {
39 * EXIT_TEXT
40 * EXIT_DATA
41 * *(.exitcall.exit)
42 * }
43 * STABS_DEBUG
44 * DWARF_DEBUG
45 * }
46 *
47 * [__init_begin, __init_end] is the init section that may be freed after init
48 * [_stext, _etext] is the text section
49 * [_sdata, _edata] is the data section
50 *
51 * Some of the included output section have their own set of constants.
52 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
53 * [__nosave_begin, __nosave_end] for the nosave data
54 */
55 #include <linux/section-names.h>
2 56
3#ifndef LOAD_OFFSET 57#ifndef LOAD_OFFSET
4#define LOAD_OFFSET 0 58#define LOAD_OFFSET 0
@@ -116,7 +170,36 @@
116 FTRACE_EVENTS() \ 170 FTRACE_EVENTS() \
117 TRACE_SYSCALLS() 171 TRACE_SYSCALLS()
118 172
119#define RO_DATA(align) \ 173/*
174 * Data section helpers
175 */
176#define NOSAVE_DATA \
177 . = ALIGN(PAGE_SIZE); \
178 VMLINUX_SYMBOL(__nosave_begin) = .; \
179 *(.data.nosave) \
180 . = ALIGN(PAGE_SIZE); \
181 VMLINUX_SYMBOL(__nosave_end) = .;
182
183#define PAGE_ALIGNED_DATA(page_align) \
184 . = ALIGN(page_align); \
185 *(.data.page_aligned)
186
187#define READ_MOSTLY_DATA(align) \
188 . = ALIGN(align); \
189 *(.data.read_mostly)
190
191#define CACHELINE_ALIGNED_DATA(align) \
192 . = ALIGN(align); \
193 *(.data.cacheline_aligned)
194
195#define INIT_TASK(align) \
196 . = ALIGN(align); \
197 *(.data.init_task)
198
199/*
200 * Read only Data
201 */
202#define RO_DATA_SECTION(align) \
120 . = ALIGN((align)); \ 203 . = ALIGN((align)); \
121 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 204 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
122 VMLINUX_SYMBOL(__start_rodata) = .; \ 205 VMLINUX_SYMBOL(__start_rodata) = .; \
@@ -270,9 +353,10 @@
270 } \ 353 } \
271 . = ALIGN((align)); 354 . = ALIGN((align));
272 355
273/* RODATA provided for backward compatibility. 356/* RODATA & RO_DATA provided for backward compatibility.
274 * All archs are supposed to use RO_DATA() */ 357 * All archs are supposed to use RO_DATA() */
275#define RODATA RO_DATA(4096) 358#define RODATA RO_DATA_SECTION(4096)
359#define RO_DATA(align) RO_DATA_SECTION(align)
276 360
277#define SECURITY_INIT \ 361#define SECURITY_INIT \
278 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 362 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
@@ -332,6 +416,31 @@
332/* Section used for early init (in .S files) */ 416/* Section used for early init (in .S files) */
333#define HEAD_TEXT *(HEAD_TEXT_SECTION) 417#define HEAD_TEXT *(HEAD_TEXT_SECTION)
334 418
419#define HEAD_SECTION \
420 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
421 HEAD_TEXT \
422 }
423
424/*
425 * Exception table
426 */
427#define EXCEPTION_TABLE(align) \
428 . = ALIGN(align); \
429 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
430 VMLINUX_SYMBOL(__start___ex_table) = .; \
431 *(__ex_table) \
432 VMLINUX_SYMBOL(__stop___ex_table) = .; \
433 }
434
435/*
436 * Init task
437 */
438#define INIT_TASK_DATA(align) \
439 . = ALIGN(align); \
440 .data.init_task : { \
441 INIT_TASK \
442 }
443
335/* init and exit section handling */ 444/* init and exit section handling */
336#define INIT_DATA \ 445#define INIT_DATA \
337 *(.init.data) \ 446 *(.init.data) \
@@ -364,9 +473,32 @@
364 CPU_DISCARD(exit.text) \ 473 CPU_DISCARD(exit.text) \
365 MEM_DISCARD(exit.text) 474 MEM_DISCARD(exit.text)
366 475
367 /* DWARF debug sections. 476/*
368 Symbols in the DWARF debugging sections are relative to 477 * bss (Block Started by Symbol) - uninitialized data
369 the beginning of the section so we begin them at 0. */ 478 * zeroed during startup
479 */
480#define SBSS \
481 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
482 *(.sbss) \
483 *(.scommon) \
484 }
485
486#define BSS(bss_align) \
487 . = ALIGN(bss_align); \
488 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
489 VMLINUX_SYMBOL(__bss_start) = .; \
490 *(.bss.page_aligned) \
491 *(.dynbss) \
492 *(.bss) \
493 *(COMMON) \
494 VMLINUX_SYMBOL(__bss_stop) = .; \
495 }
496
497/*
498 * DWARF debug sections.
499 * Symbols in the DWARF debugging sections are relative to
500 * the beginning of the section so we begin them at 0.
501 */
370#define DWARF_DEBUG \ 502#define DWARF_DEBUG \
371 /* DWARF 1 */ \ 503 /* DWARF 1 */ \
372 .debug 0 : { *(.debug) } \ 504 .debug 0 : { *(.debug) } \
@@ -433,6 +565,12 @@
433 VMLINUX_SYMBOL(__stop_notes) = .; \ 565 VMLINUX_SYMBOL(__stop_notes) = .; \
434 } 566 }
435 567
568#define INIT_SETUP(initsetup_align) \
569 . = ALIGN(initsetup_align); \
570 VMLINUX_SYMBOL(__setup_start) = .; \
571 *(.init.setup) \
572 VMLINUX_SYMBOL(__setup_end) = .;
573
436#define INITCALLS \ 574#define INITCALLS \
437 *(.initcallearly.init) \ 575 *(.initcallearly.init) \
438 VMLINUX_SYMBOL(__early_initcall_end) = .; \ 576 VMLINUX_SYMBOL(__early_initcall_end) = .; \
@@ -454,6 +592,31 @@
454 *(.initcall7.init) \ 592 *(.initcall7.init) \
455 *(.initcall7s.init) 593 *(.initcall7s.init)
456 594
595#define INIT_CALLS \
596 VMLINUX_SYMBOL(__initcall_start) = .; \
597 INITCALLS \
598 VMLINUX_SYMBOL(__initcall_end) = .;
599
600#define CON_INITCALL \
601 VMLINUX_SYMBOL(__con_initcall_start) = .; \
602 *(.con_initcall.init) \
603 VMLINUX_SYMBOL(__con_initcall_end) = .;
604
605#define SECURITY_INITCALL \
606 VMLINUX_SYMBOL(__security_initcall_start) = .; \
607 *(.security_initcall.init) \
608 VMLINUX_SYMBOL(__security_initcall_end) = .;
609
610#ifdef CONFIG_BLK_DEV_INITRD
611#define INIT_RAM_FS \
612 . = ALIGN(PAGE_SIZE); \
613 VMLINUX_SYMBOL(__initramfs_start) = .; \
614 *(.init.ramfs) \
615 VMLINUX_SYMBOL(__initramfs_end) = .;
616#else
617#define INITRAMFS
618#endif
619
457/** 620/**
458 * PERCPU_VADDR - define output section for percpu area 621 * PERCPU_VADDR - define output section for percpu area
459 * @vaddr: explicit base address (optional) 622 * @vaddr: explicit base address (optional)
@@ -510,3 +673,57 @@
510 *(.data.percpu.shared_aligned) \ 673 *(.data.percpu.shared_aligned) \
511 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 674 VMLINUX_SYMBOL(__per_cpu_end) = .; \
512 } 675 }
676
677
678/*
679 * Definition of the high level *_SECTION macros
680 * They will fit only a subset of the architectures
681 */
682
683
684/*
685 * Writeable data.
686 * All sections are combined in a single .data section.
687 * The sections following CONSTRUCTORS are arranged so their
688 * typical alignment matches.
689 * A cacheline is typical/always less than a PAGE_SIZE so
690 * the sections that has this restriction (or similar)
691 * is located before the ones requiring PAGE_SIZE alignment.
692 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
693 * matches the requirment of PAGE_ALIGNED_DATA.
694 *
695/* use 0 as page_align if page_aligned data is not used */
696#define RW_DATA_SECTION(cacheline, nosave, pagealigned, inittask) \
697 . = ALIGN(PAGE_SIZE); \
698 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
699 INIT_TASK(inittask) \
700 CACHELINE_ALIGNED_DATA(cacheline) \
701 READ_MOSTLY_DATA(cacheline) \
702 DATA_DATA \
703 CONSTRUCTORS \
704 NOSAVE_DATA(nosave) \
705 PAGE_ALIGNED_DATA(pagealigned) \
706 }
707
708#define INIT_TEXT_SECTION(inittext_align) \
709 . = ALIGN(inittext_align); \
710 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
711 VMLINUX_SYMBOL(_sinittext) = .; \
712 INIT_TEXT \
713 VMLINUX_SYMBOL(_einittext) = .; \
714 }
715
716#define INIT_DATA_SECTION(initsetup_align) \
717 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
718 INIT_DATA \
719 INIT_SETUP(initsetup_align) \
720 INIT_CALLS \
721 CON_INITCALL \
722 SECURITY_INITCALL \
723 INIT_RAM_FS \
724 }
725
726#define BSS_SECTION(sbss_align, bss_align) \
727 SBSS \
728 BSS(bss_align) \
729 . = ALIGN(4); \