aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2018-05-09 08:59:58 -0400
committerMasahiro Yamada <yamada.masahiro@socionext.com>2018-05-17 09:45:00 -0400
commit266ff2a8f51f02b429a987d87634697eb0d01d6a (patch)
tree22f39b7763a55e89268cbfd52de94221dfae185a
parent8b1857436baa2b9b6d7330715180aa47a63b15ca (diff)
kbuild: Fix asm-generic/vmlinux.lds.h for LD_DEAD_CODE_DATA_ELIMINATION
KEEP more tables, and add the function/data section wildcard to more section selections. This is a little ad-hoc at the moment, but kernel code should be moved to consistently use .text..x (note: double dots) for explicit sections and all references to it in the linker script can be made with TEXT_MAIN, and similarly for other sections. For now, let's see if major architectures move to enabling this option then we can do some refactoring passes. Otherwise if it remains unused or superseded by LTO, this may not be required. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
-rw-r--r--include/asm-generic/vmlinux.lds.h49
1 files changed, 29 insertions, 20 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index f4980c72d389..e373e2e10f6a 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -64,15 +64,24 @@
64 * generates .data.identifier sections, which need to be pulled in with 64 * generates .data.identifier sections, which need to be pulled in with
65 * .data. We don't want to pull in .data..other sections, which Linux 65 * .data. We don't want to pull in .data..other sections, which Linux
66 * has defined. Same for text and bss. 66 * has defined. Same for text and bss.
67 *
68 * RODATA_MAIN is not used because existing code already defines .rodata.x
69 * sections to be brought in with rodata.
67 */ 70 */
68#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION 71#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
69#define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 72#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
70#define DATA_MAIN .data .data.[0-9a-zA-Z_]* 73#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
74#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
75#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
71#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* 76#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
77#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
72#else 78#else
73#define TEXT_MAIN .text 79#define TEXT_MAIN .text
74#define DATA_MAIN .data 80#define DATA_MAIN .data
81#define SDATA_MAIN .sdata
82#define RODATA_MAIN .rodata
75#define BSS_MAIN .bss 83#define BSS_MAIN .bss
84#define SBSS_MAIN .sbss
76#endif 85#endif
77 86
78/* 87/*
@@ -105,7 +114,7 @@
105#ifdef CONFIG_FTRACE_MCOUNT_RECORD 114#ifdef CONFIG_FTRACE_MCOUNT_RECORD
106#define MCOUNT_REC() . = ALIGN(8); \ 115#define MCOUNT_REC() . = ALIGN(8); \
107 __start_mcount_loc = .; \ 116 __start_mcount_loc = .; \
108 *(__mcount_loc) \ 117 KEEP(*(__mcount_loc)) \
109 __stop_mcount_loc = .; 118 __stop_mcount_loc = .;
110#else 119#else
111#define MCOUNT_REC() 120#define MCOUNT_REC()
@@ -113,7 +122,7 @@
113 122
114#ifdef CONFIG_TRACE_BRANCH_PROFILING 123#ifdef CONFIG_TRACE_BRANCH_PROFILING
115#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ 124#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
116 *(_ftrace_annotated_branch) \ 125 KEEP(*(_ftrace_annotated_branch)) \
117 __stop_annotated_branch_profile = .; 126 __stop_annotated_branch_profile = .;
118#else 127#else
119#define LIKELY_PROFILE() 128#define LIKELY_PROFILE()
@@ -121,7 +130,7 @@
121 130
122#ifdef CONFIG_PROFILE_ALL_BRANCHES 131#ifdef CONFIG_PROFILE_ALL_BRANCHES
123#define BRANCH_PROFILE() __start_branch_profile = .; \ 132#define BRANCH_PROFILE() __start_branch_profile = .; \
124 *(_ftrace_branch) \ 133 KEEP(*(_ftrace_branch)) \
125 __stop_branch_profile = .; 134 __stop_branch_profile = .;
126#else 135#else
127#define BRANCH_PROFILE() 136#define BRANCH_PROFILE()
@@ -238,8 +247,8 @@
238 *(DATA_MAIN) \ 247 *(DATA_MAIN) \
239 *(.ref.data) \ 248 *(.ref.data) \
240 *(.data..shared_aligned) /* percpu related */ \ 249 *(.data..shared_aligned) /* percpu related */ \
241 MEM_KEEP(init.data) \ 250 MEM_KEEP(init.data*) \
242 MEM_KEEP(exit.data) \ 251 MEM_KEEP(exit.data*) \
243 *(.data.unlikely) \ 252 *(.data.unlikely) \
244 __start_once = .; \ 253 __start_once = .; \
245 *(.data.once) \ 254 *(.data.once) \
@@ -289,8 +298,8 @@
289 __start_init_task = .; \ 298 __start_init_task = .; \
290 init_thread_union = .; \ 299 init_thread_union = .; \
291 init_stack = .; \ 300 init_stack = .; \
292 *(.data..init_task) \ 301 KEEP(*(.data..init_task)) \
293 *(.data..init_thread_info) \ 302 KEEP(*(.data..init_thread_info)) \
294 . = __start_init_task + THREAD_SIZE; \ 303 . = __start_init_task + THREAD_SIZE; \
295 __end_init_task = .; 304 __end_init_task = .;
296 305
@@ -487,8 +496,8 @@
487 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ 496 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
488 *(.text..refcount) \ 497 *(.text..refcount) \
489 *(.ref.text) \ 498 *(.ref.text) \
490 MEM_KEEP(init.text) \ 499 MEM_KEEP(init.text*) \
491 MEM_KEEP(exit.text) \ 500 MEM_KEEP(exit.text*) \
492 501
493 502
494/* sched.text is aling to function alignment to secure we have same 503/* sched.text is aling to function alignment to secure we have same
@@ -538,7 +547,7 @@
538 __softirqentry_text_end = .; 547 __softirqentry_text_end = .;
539 548
540/* Section used for early init (in .S files) */ 549/* Section used for early init (in .S files) */
541#define HEAD_TEXT *(.head.text) 550#define HEAD_TEXT KEEP(*(.head.text))
542 551
543#define HEAD_TEXT_SECTION \ 552#define HEAD_TEXT_SECTION \
544 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ 553 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
@@ -579,11 +588,11 @@
579/* init and exit section handling */ 588/* init and exit section handling */
580#define INIT_DATA \ 589#define INIT_DATA \
581 KEEP(*(SORT(___kentry+*))) \ 590 KEEP(*(SORT(___kentry+*))) \
582 *(.init.data) \ 591 *(.init.data init.data.*) \
583 MEM_DISCARD(init.data) \ 592 MEM_DISCARD(init.data*) \
584 KERNEL_CTORS() \ 593 KERNEL_CTORS() \
585 MCOUNT_REC() \ 594 MCOUNT_REC() \
586 *(.init.rodata) \ 595 *(.init.rodata .init.rodata.*) \
587 FTRACE_EVENTS() \ 596 FTRACE_EVENTS() \
588 TRACE_SYSCALLS() \ 597 TRACE_SYSCALLS() \
589 KPROBE_BLACKLIST() \ 598 KPROBE_BLACKLIST() \
@@ -602,16 +611,16 @@
602 EARLYCON_TABLE() 611 EARLYCON_TABLE()
603 612
604#define INIT_TEXT \ 613#define INIT_TEXT \
605 *(.init.text) \ 614 *(.init.text .init.text.*) \
606 *(.text.startup) \ 615 *(.text.startup) \
607 MEM_DISCARD(init.text) 616 MEM_DISCARD(init.text*)
608 617
609#define EXIT_DATA \ 618#define EXIT_DATA \
610 *(.exit.data) \ 619 *(.exit.data .exit.data.*) \
611 *(.fini_array) \ 620 *(.fini_array) \
612 *(.dtors) \ 621 *(.dtors) \
613 MEM_DISCARD(exit.data) \ 622 MEM_DISCARD(exit.data*) \
614 MEM_DISCARD(exit.rodata) 623 MEM_DISCARD(exit.rodata*)
615 624
616#define EXIT_TEXT \ 625#define EXIT_TEXT \
617 *(.exit.text) \ 626 *(.exit.text) \
@@ -629,7 +638,7 @@
629 . = ALIGN(sbss_align); \ 638 . = ALIGN(sbss_align); \
630 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ 639 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
631 *(.dynsbss) \ 640 *(.dynsbss) \
632 *(.sbss) \ 641 *(SBSS_MAIN) \
633 *(.scommon) \ 642 *(.scommon) \
634 } 643 }
635 644
@@ -754,7 +763,7 @@
754#define NOTES \ 763#define NOTES \
755 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 764 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
756 __start_notes = .; \ 765 __start_notes = .; \
757 *(.note.*) \ 766 KEEP(*(.note.*)) \
758 __stop_notes = .; \ 767 __stop_notes = .; \
759 } 768 }
760 769