aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/vmlinux.lds.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic/vmlinux.lds.h')
-rw-r--r--include/asm-generic/vmlinux.lds.h252
1 files changed, 241 insertions, 11 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 89853bcd27a6..92b73b6140ff 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -1,4 +1,57 @@
1#include <linux/section-names.h> 1/*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU(PAGE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * __bss_start = .;
34 * BSS_SECTION(0, 0)
35 * __bss_stop = .;
36 * _end = .;
37 *
38 * /DISCARD/ : {
39 * EXIT_TEXT
40 * EXIT_DATA
41 * EXIT_CALL
42 * }
43 * STABS_DEBUG
44 * DWARF_DEBUG
45 * }
46 *
47 * [__init_begin, __init_end] is the init section that may be freed after init
48 * [_stext, _etext] is the text section
49 * [_sdata, _edata] is the data section
50 *
51 * Some of the included output section have their own set of constants.
52 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
53 * [__nosave_begin, __nosave_end] for the nosave data
54 */
2 55
3#ifndef LOAD_OFFSET 56#ifndef LOAD_OFFSET
4#define LOAD_OFFSET 0 57#define LOAD_OFFSET 0
@@ -63,7 +116,7 @@
63#define BRANCH_PROFILE() 116#define BRANCH_PROFILE()
64#endif 117#endif
65 118
66#ifdef CONFIG_EVENT_TRACER 119#ifdef CONFIG_EVENT_TRACING
67#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 120#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
68 *(_ftrace_events) \ 121 *(_ftrace_events) \
69 VMLINUX_SYMBOL(__stop_ftrace_events) = .; 122 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
@@ -116,7 +169,36 @@
116 FTRACE_EVENTS() \ 169 FTRACE_EVENTS() \
117 TRACE_SYSCALLS() 170 TRACE_SYSCALLS()
118 171
119#define RO_DATA(align) \ 172/*
173 * Data section helpers
174 */
175#define NOSAVE_DATA \
176 . = ALIGN(PAGE_SIZE); \
177 VMLINUX_SYMBOL(__nosave_begin) = .; \
178 *(.data.nosave) \
179 . = ALIGN(PAGE_SIZE); \
180 VMLINUX_SYMBOL(__nosave_end) = .;
181
182#define PAGE_ALIGNED_DATA(page_align) \
183 . = ALIGN(page_align); \
184 *(.data.page_aligned)
185
186#define READ_MOSTLY_DATA(align) \
187 . = ALIGN(align); \
188 *(.data.read_mostly)
189
190#define CACHELINE_ALIGNED_DATA(align) \
191 . = ALIGN(align); \
192 *(.data.cacheline_aligned)
193
194#define INIT_TASK(align) \
195 . = ALIGN(align); \
196 *(.data.init_task)
197
198/*
199 * Read only Data
200 */
201#define RO_DATA_SECTION(align) \
120 . = ALIGN((align)); \ 202 . = ALIGN((align)); \
121 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ 203 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
122 VMLINUX_SYMBOL(__start_rodata) = .; \ 204 VMLINUX_SYMBOL(__start_rodata) = .; \
@@ -270,9 +352,10 @@
270 } \ 352 } \
271 . = ALIGN((align)); 353 . = ALIGN((align));
272 354
273/* RODATA provided for backward compatibility. 355/* RODATA & RO_DATA provided for backward compatibility.
274 * All archs are supposed to use RO_DATA() */ 356 * All archs are supposed to use RO_DATA() */
275#define RODATA RO_DATA(4096) 357#define RODATA RO_DATA_SECTION(4096)
358#define RO_DATA(align) RO_DATA_SECTION(align)
276 359
277#define SECURITY_INIT \ 360#define SECURITY_INIT \
278 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 361 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
@@ -330,16 +413,51 @@
330#endif 413#endif
331 414
332/* Section used for early init (in .S files) */ 415/* Section used for early init (in .S files) */
333#define HEAD_TEXT *(HEAD_TEXT_SECTION) 416#define HEAD_TEXT *(.head.text)
417
418#define HEAD_TEXT_SECTION \
419 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
420 HEAD_TEXT \
421 }
422
423/*
424 * Exception table
425 */
426#define EXCEPTION_TABLE(align) \
427 . = ALIGN(align); \
428 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
429 VMLINUX_SYMBOL(__start___ex_table) = .; \
430 *(__ex_table) \
431 VMLINUX_SYMBOL(__stop___ex_table) = .; \
432 }
433
434/*
435 * Init task
436 */
437#define INIT_TASK_DATA(align) \
438 . = ALIGN(align); \
439 .data.init_task : { \
440 INIT_TASK \
441 }
442
443#ifdef CONFIG_CONSTRUCTORS
444#define KERNEL_CTORS() VMLINUX_SYMBOL(__ctors_start) = .; \
445 *(.ctors) \
446 VMLINUX_SYMBOL(__ctors_end) = .;
447#else
448#define KERNEL_CTORS()
449#endif
334 450
335/* init and exit section handling */ 451/* init and exit section handling */
336#define INIT_DATA \ 452#define INIT_DATA \
337 *(.init.data) \ 453 *(.init.data) \
338 DEV_DISCARD(init.data) \ 454 DEV_DISCARD(init.data) \
339 DEV_DISCARD(init.rodata) \
340 CPU_DISCARD(init.data) \ 455 CPU_DISCARD(init.data) \
341 CPU_DISCARD(init.rodata) \
342 MEM_DISCARD(init.data) \ 456 MEM_DISCARD(init.data) \
457 KERNEL_CTORS() \
458 *(.init.rodata) \
459 DEV_DISCARD(init.rodata) \
460 CPU_DISCARD(init.rodata) \
343 MEM_DISCARD(init.rodata) 461 MEM_DISCARD(init.rodata)
344 462
345#define INIT_TEXT \ 463#define INIT_TEXT \
@@ -363,9 +481,35 @@
363 CPU_DISCARD(exit.text) \ 481 CPU_DISCARD(exit.text) \
364 MEM_DISCARD(exit.text) 482 MEM_DISCARD(exit.text)
365 483
366 /* DWARF debug sections. 484#define EXIT_CALL \
367 Symbols in the DWARF debugging sections are relative to 485 *(.exitcall.exit)
368 the beginning of the section so we begin them at 0. */ 486
487/*
488 * bss (Block Started by Symbol) - uninitialized data
489 * zeroed during startup
490 */
491#define SBSS \
492 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
493 *(.sbss) \
494 *(.scommon) \
495 }
496
497#define BSS(bss_align) \
498 . = ALIGN(bss_align); \
499 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
500 VMLINUX_SYMBOL(__bss_start) = .; \
501 *(.bss.page_aligned) \
502 *(.dynbss) \
503 *(.bss) \
504 *(COMMON) \
505 VMLINUX_SYMBOL(__bss_stop) = .; \
506 }
507
508/*
509 * DWARF debug sections.
510 * Symbols in the DWARF debugging sections are relative to
511 * the beginning of the section so we begin them at 0.
512 */
369#define DWARF_DEBUG \ 513#define DWARF_DEBUG \
370 /* DWARF 1 */ \ 514 /* DWARF 1 */ \
371 .debug 0 : { *(.debug) } \ 515 .debug 0 : { *(.debug) } \
@@ -432,6 +576,12 @@
432 VMLINUX_SYMBOL(__stop_notes) = .; \ 576 VMLINUX_SYMBOL(__stop_notes) = .; \
433 } 577 }
434 578
579#define INIT_SETUP(initsetup_align) \
580 . = ALIGN(initsetup_align); \
581 VMLINUX_SYMBOL(__setup_start) = .; \
582 *(.init.setup) \
583 VMLINUX_SYMBOL(__setup_end) = .;
584
435#define INITCALLS \ 585#define INITCALLS \
436 *(.initcallearly.init) \ 586 *(.initcallearly.init) \
437 VMLINUX_SYMBOL(__early_initcall_end) = .; \ 587 VMLINUX_SYMBOL(__early_initcall_end) = .; \
@@ -453,6 +603,31 @@
453 *(.initcall7.init) \ 603 *(.initcall7.init) \
454 *(.initcall7s.init) 604 *(.initcall7s.init)
455 605
606#define INIT_CALLS \
607 VMLINUX_SYMBOL(__initcall_start) = .; \
608 INITCALLS \
609 VMLINUX_SYMBOL(__initcall_end) = .;
610
611#define CON_INITCALL \
612 VMLINUX_SYMBOL(__con_initcall_start) = .; \
613 *(.con_initcall.init) \
614 VMLINUX_SYMBOL(__con_initcall_end) = .;
615
616#define SECURITY_INITCALL \
617 VMLINUX_SYMBOL(__security_initcall_start) = .; \
618 *(.security_initcall.init) \
619 VMLINUX_SYMBOL(__security_initcall_end) = .;
620
621#ifdef CONFIG_BLK_DEV_INITRD
622#define INIT_RAM_FS \
623 . = ALIGN(PAGE_SIZE); \
624 VMLINUX_SYMBOL(__initramfs_start) = .; \
625 *(.init.ramfs) \
626 VMLINUX_SYMBOL(__initramfs_end) = .;
627#else
628#define INIT_RAM_FS
629#endif
630
456/** 631/**
457 * PERCPU_VADDR - define output section for percpu area 632 * PERCPU_VADDR - define output section for percpu area
458 * @vaddr: explicit base address (optional) 633 * @vaddr: explicit base address (optional)
@@ -509,3 +684,58 @@
509 *(.data.percpu.shared_aligned) \ 684 *(.data.percpu.shared_aligned) \
510 VMLINUX_SYMBOL(__per_cpu_end) = .; \ 685 VMLINUX_SYMBOL(__per_cpu_end) = .; \
511 } 686 }
687
688
689/*
690 * Definition of the high level *_SECTION macros
691 * They will fit only a subset of the architectures
692 */
693
694
695/*
696 * Writeable data.
697 * All sections are combined in a single .data section.
698 * The sections following CONSTRUCTORS are arranged so their
699 * typical alignment matches.
700 * A cacheline is typical/always less than a PAGE_SIZE so
701 * the sections that has this restriction (or similar)
702 * is located before the ones requiring PAGE_SIZE alignment.
703 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
704 * matches the requirment of PAGE_ALIGNED_DATA.
705 *
706 * use 0 as page_align if page_aligned data is not used */
707#define RW_DATA_SECTION(cacheline, nosave, pagealigned, inittask) \
708 . = ALIGN(PAGE_SIZE); \
709 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
710 INIT_TASK(inittask) \
711 CACHELINE_ALIGNED_DATA(cacheline) \
712 READ_MOSTLY_DATA(cacheline) \
713 DATA_DATA \
714 CONSTRUCTORS \
715 NOSAVE_DATA(nosave) \
716 PAGE_ALIGNED_DATA(pagealigned) \
717 }
718
719#define INIT_TEXT_SECTION(inittext_align) \
720 . = ALIGN(inittext_align); \
721 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
722 VMLINUX_SYMBOL(_sinittext) = .; \
723 INIT_TEXT \
724 VMLINUX_SYMBOL(_einittext) = .; \
725 }
726
727#define INIT_DATA_SECTION(initsetup_align) \
728 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
729 INIT_DATA \
730 INIT_SETUP(initsetup_align) \
731 INIT_CALLS \
732 CON_INITCALL \
733 SECURITY_INITCALL \
734 INIT_RAM_FS \
735 }
736
737#define BSS_SECTION(sbss_align, bss_align) \
738 SBSS \
739 BSS(bss_align) \
740 . = ALIGN(4);
741