aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2016-11-23 11:41:41 -0500
committerMichal Marek <mmarek@suse.com>2016-11-29 09:58:57 -0500
commit4b89b7f7aad5742523db801b7107598e5b5a42c8 (patch)
tree020266a59636b6c1f939ec1f85cd67ba64e60dc0
parent9da2f584b54fa4bf0df4db220cd89b6c7e691c6e (diff)
kbuild: keep data tables through dead code elimination
When CONFIG_LD_DEAD_CODE_DATA_ELIMINATION is enabled we must ensure that we still keep various programatically-accessed tables. [npiggin: Fold Paul's patches into one, and add a few more tables. diff symbol tables of allyesconfig with/without -gc-sections shows up lost tables quite easily.] Signed-off-by: Paul Burton <paul.burton@imgtec.com> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michal Marek <mmarek@suse.com>
-rw-r--r--include/asm-generic/vmlinux.lds.h68
1 files changed, 34 insertions, 34 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 30747960bc54..5372775161d0 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -114,7 +114,7 @@
114#ifdef CONFIG_KPROBES 114#ifdef CONFIG_KPROBES
115#define KPROBE_BLACKLIST() . = ALIGN(8); \ 115#define KPROBE_BLACKLIST() . = ALIGN(8); \
116 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ 116 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
117 *(_kprobe_blacklist) \ 117 KEEP(*(_kprobe_blacklist)) \
118 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; 118 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
119#else 119#else
120#define KPROBE_BLACKLIST() 120#define KPROBE_BLACKLIST()
@@ -123,10 +123,10 @@
123#ifdef CONFIG_EVENT_TRACING 123#ifdef CONFIG_EVENT_TRACING
124#define FTRACE_EVENTS() . = ALIGN(8); \ 124#define FTRACE_EVENTS() . = ALIGN(8); \
125 VMLINUX_SYMBOL(__start_ftrace_events) = .; \ 125 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
126 *(_ftrace_events) \ 126 KEEP(*(_ftrace_events)) \
127 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \ 127 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
128 VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \ 128 VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
129 *(_ftrace_enum_map) \ 129 KEEP(*(_ftrace_enum_map)) \
130 VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .; 130 VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
131#else 131#else
132#define FTRACE_EVENTS() 132#define FTRACE_EVENTS()
@@ -134,10 +134,10 @@
134 134
135#ifdef CONFIG_TRACING 135#ifdef CONFIG_TRACING
136#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ 136#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
137 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ 137 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
138 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; 138 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
139#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ 139#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
140 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \ 140 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
141 VMLINUX_SYMBOL(__stop___tracepoint_str) = .; 141 VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
142#else 142#else
143#define TRACE_PRINTKS() 143#define TRACE_PRINTKS()
@@ -147,7 +147,7 @@
147#ifdef CONFIG_FTRACE_SYSCALLS 147#ifdef CONFIG_FTRACE_SYSCALLS
148#define TRACE_SYSCALLS() . = ALIGN(8); \ 148#define TRACE_SYSCALLS() . = ALIGN(8); \
149 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ 149 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
150 *(__syscalls_metadata) \ 150 KEEP(*(__syscalls_metadata)) \
151 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; 151 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
152#else 152#else
153#define TRACE_SYSCALLS() 153#define TRACE_SYSCALLS()
@@ -156,7 +156,7 @@
156#ifdef CONFIG_SERIAL_EARLYCON 156#ifdef CONFIG_SERIAL_EARLYCON
157#define EARLYCON_TABLE() STRUCT_ALIGN(); \ 157#define EARLYCON_TABLE() STRUCT_ALIGN(); \
158 VMLINUX_SYMBOL(__earlycon_table) = .; \ 158 VMLINUX_SYMBOL(__earlycon_table) = .; \
159 *(__earlycon_table) \ 159 KEEP(*(__earlycon_table)) \
160 VMLINUX_SYMBOL(__earlycon_table_end) = .; 160 VMLINUX_SYMBOL(__earlycon_table_end) = .;
161#else 161#else
162#define EARLYCON_TABLE() 162#define EARLYCON_TABLE()
@@ -169,8 +169,8 @@
169#define _OF_TABLE_1(name) \ 169#define _OF_TABLE_1(name) \
170 . = ALIGN(8); \ 170 . = ALIGN(8); \
171 VMLINUX_SYMBOL(__##name##_of_table) = .; \ 171 VMLINUX_SYMBOL(__##name##_of_table) = .; \
172 *(__##name##_of_table) \ 172 KEEP(*(__##name##_of_table)) \
173 *(__##name##_of_table_end) 173 KEEP(*(__##name##_of_table_end))
174 174
175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) 175#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
176#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) 176#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
@@ -184,7 +184,7 @@
184#define ACPI_PROBE_TABLE(name) \ 184#define ACPI_PROBE_TABLE(name) \
185 . = ALIGN(8); \ 185 . = ALIGN(8); \
186 VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \ 186 VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \
187 *(__##name##_acpi_probe_table) \ 187 KEEP(*(__##name##_acpi_probe_table)) \
188 VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .; 188 VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .;
189#else 189#else
190#define ACPI_PROBE_TABLE(name) 190#define ACPI_PROBE_TABLE(name)
@@ -193,7 +193,7 @@
193#define KERNEL_DTB() \ 193#define KERNEL_DTB() \
194 STRUCT_ALIGN(); \ 194 STRUCT_ALIGN(); \
195 VMLINUX_SYMBOL(__dtb_start) = .; \ 195 VMLINUX_SYMBOL(__dtb_start) = .; \
196 *(.dtb.init.rodata) \ 196 KEEP(*(.dtb.init.rodata)) \
197 VMLINUX_SYMBOL(__dtb_end) = .; 197 VMLINUX_SYMBOL(__dtb_end) = .;
198 198
199/* 199/*
@@ -214,11 +214,11 @@
214 /* implement dynamic printk debug */ \ 214 /* implement dynamic printk debug */ \
215 . = ALIGN(8); \ 215 . = ALIGN(8); \
216 VMLINUX_SYMBOL(__start___jump_table) = .; \ 216 VMLINUX_SYMBOL(__start___jump_table) = .; \
217 *(__jump_table) \ 217 KEEP(*(__jump_table)) \
218 VMLINUX_SYMBOL(__stop___jump_table) = .; \ 218 VMLINUX_SYMBOL(__stop___jump_table) = .; \
219 . = ALIGN(8); \ 219 . = ALIGN(8); \
220 VMLINUX_SYMBOL(__start___verbose) = .; \ 220 VMLINUX_SYMBOL(__start___verbose) = .; \
221 *(__verbose) \ 221 KEEP(*(__verbose)) \
222 VMLINUX_SYMBOL(__stop___verbose) = .; \ 222 VMLINUX_SYMBOL(__stop___verbose) = .; \
223 LIKELY_PROFILE() \ 223 LIKELY_PROFILE() \
224 BRANCH_PROFILE() \ 224 BRANCH_PROFILE() \
@@ -271,10 +271,10 @@
271 VMLINUX_SYMBOL(__start_rodata) = .; \ 271 VMLINUX_SYMBOL(__start_rodata) = .; \
272 *(.rodata) *(.rodata.*) \ 272 *(.rodata) *(.rodata.*) \
273 RO_AFTER_INIT_DATA /* Read only after init */ \ 273 RO_AFTER_INIT_DATA /* Read only after init */ \
274 *(__vermagic) /* Kernel version magic */ \ 274 KEEP(*(__vermagic)) /* Kernel version magic */ \
275 . = ALIGN(8); \ 275 . = ALIGN(8); \
276 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ 276 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
277 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\ 277 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
278 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ 278 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
279 *(__tracepoints_strings)/* Tracepoints: strings */ \ 279 *(__tracepoints_strings)/* Tracepoints: strings */ \
280 } \ 280 } \
@@ -288,35 +288,35 @@
288 /* PCI quirks */ \ 288 /* PCI quirks */ \
289 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 289 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
290 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 290 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
291 *(.pci_fixup_early) \ 291 KEEP(*(.pci_fixup_early)) \
292 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ 292 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
293 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ 293 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
294 *(.pci_fixup_header) \ 294 KEEP(*(.pci_fixup_header)) \
295 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ 295 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
296 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ 296 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
297 *(.pci_fixup_final) \ 297 KEEP(*(.pci_fixup_final)) \
298 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ 298 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
299 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ 299 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
300 *(.pci_fixup_enable) \ 300 KEEP(*(.pci_fixup_enable)) \
301 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ 301 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
302 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 302 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
303 *(.pci_fixup_resume) \ 303 KEEP(*(.pci_fixup_resume)) \
304 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 304 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
305 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ 305 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
306 *(.pci_fixup_resume_early) \ 306 KEEP(*(.pci_fixup_resume_early)) \
307 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ 307 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
308 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ 308 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
309 *(.pci_fixup_suspend) \ 309 KEEP(*(.pci_fixup_suspend)) \
310 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ 310 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
311 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ 311 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
312 *(.pci_fixup_suspend_late) \ 312 KEEP(*(.pci_fixup_suspend_late)) \
313 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ 313 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
314 } \ 314 } \
315 \ 315 \
316 /* Built-in firmware blobs */ \ 316 /* Built-in firmware blobs */ \
317 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ 317 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
318 VMLINUX_SYMBOL(__start_builtin_fw) = .; \ 318 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
319 *(.builtin_fw) \ 319 KEEP(*(.builtin_fw)) \
320 VMLINUX_SYMBOL(__end_builtin_fw) = .; \ 320 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
321 } \ 321 } \
322 \ 322 \
@@ -394,7 +394,7 @@
394 \ 394 \
395 /* Kernel symbol table: strings */ \ 395 /* Kernel symbol table: strings */ \
396 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 396 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
397 KEEP(*(__ksymtab_strings)) \ 397 *(__ksymtab_strings) \
398 } \ 398 } \
399 \ 399 \
400 /* __*init sections */ \ 400 /* __*init sections */ \
@@ -407,14 +407,14 @@
407 /* Built-in module parameters. */ \ 407 /* Built-in module parameters. */ \
408 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 408 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
409 VMLINUX_SYMBOL(__start___param) = .; \ 409 VMLINUX_SYMBOL(__start___param) = .; \
410 *(__param) \ 410 KEEP(*(__param)) \
411 VMLINUX_SYMBOL(__stop___param) = .; \ 411 VMLINUX_SYMBOL(__stop___param) = .; \
412 } \ 412 } \
413 \ 413 \
414 /* Built-in module versions. */ \ 414 /* Built-in module versions. */ \
415 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ 415 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
416 VMLINUX_SYMBOL(__start___modver) = .; \ 416 VMLINUX_SYMBOL(__start___modver) = .; \
417 *(__modver) \ 417 KEEP(*(__modver)) \
418 VMLINUX_SYMBOL(__stop___modver) = .; \ 418 VMLINUX_SYMBOL(__stop___modver) = .; \
419 . = ALIGN((align)); \ 419 . = ALIGN((align)); \
420 VMLINUX_SYMBOL(__end_rodata) = .; \ 420 VMLINUX_SYMBOL(__end_rodata) = .; \
@@ -517,7 +517,7 @@
517 . = ALIGN(align); \ 517 . = ALIGN(align); \
518 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ 518 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
519 VMLINUX_SYMBOL(__start___ex_table) = .; \ 519 VMLINUX_SYMBOL(__start___ex_table) = .; \
520 *(__ex_table) \ 520 KEEP(*(__ex_table)) \
521 VMLINUX_SYMBOL(__stop___ex_table) = .; \ 521 VMLINUX_SYMBOL(__stop___ex_table) = .; \
522 } 522 }
523 523
@@ -533,9 +533,9 @@
533#ifdef CONFIG_CONSTRUCTORS 533#ifdef CONFIG_CONSTRUCTORS
534#define KERNEL_CTORS() . = ALIGN(8); \ 534#define KERNEL_CTORS() . = ALIGN(8); \
535 VMLINUX_SYMBOL(__ctors_start) = .; \ 535 VMLINUX_SYMBOL(__ctors_start) = .; \
536 *(.ctors) \ 536 KEEP(*(.ctors)) \
537 *(SORT(.init_array.*)) \ 537 KEEP(*(SORT(.init_array.*))) \
538 *(.init_array) \ 538 KEEP(*(.init_array)) \
539 VMLINUX_SYMBOL(__ctors_end) = .; 539 VMLINUX_SYMBOL(__ctors_end) = .;
540#else 540#else
541#define KERNEL_CTORS() 541#define KERNEL_CTORS()
@@ -659,7 +659,7 @@
659 . = ALIGN(8); \ 659 . = ALIGN(8); \
660 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 660 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
661 VMLINUX_SYMBOL(__start___bug_table) = .; \ 661 VMLINUX_SYMBOL(__start___bug_table) = .; \
662 *(__bug_table) \ 662 KEEP(*(__bug_table)) \
663 VMLINUX_SYMBOL(__stop___bug_table) = .; \ 663 VMLINUX_SYMBOL(__stop___bug_table) = .; \
664 } 664 }
665#else 665#else
@@ -671,7 +671,7 @@
671 . = ALIGN(4); \ 671 . = ALIGN(4); \
672 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 672 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
673 VMLINUX_SYMBOL(__tracedata_start) = .; \ 673 VMLINUX_SYMBOL(__tracedata_start) = .; \
674 *(.tracedata) \ 674 KEEP(*(.tracedata)) \
675 VMLINUX_SYMBOL(__tracedata_end) = .; \ 675 VMLINUX_SYMBOL(__tracedata_end) = .; \
676 } 676 }
677#else 677#else
@@ -688,7 +688,7 @@
688#define INIT_SETUP(initsetup_align) \ 688#define INIT_SETUP(initsetup_align) \
689 . = ALIGN(initsetup_align); \ 689 . = ALIGN(initsetup_align); \
690 VMLINUX_SYMBOL(__setup_start) = .; \ 690 VMLINUX_SYMBOL(__setup_start) = .; \
691 *(.init.setup) \ 691 KEEP(*(.init.setup)) \
692 VMLINUX_SYMBOL(__setup_end) = .; 692 VMLINUX_SYMBOL(__setup_end) = .;
693 693
694#define INIT_CALLS_LEVEL(level) \ 694#define INIT_CALLS_LEVEL(level) \