summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile9
-rw-r--r--arch/Kconfig13
-rw-r--r--include/asm-generic/vmlinux.lds.h52
-rw-r--r--include/linux/compiler.h23
-rw-r--r--include/linux/export.h30
-rw-r--r--include/linux/init.h38
-rw-r--r--init/Makefile2
7 files changed, 104 insertions, 63 deletions
diff --git a/Makefile b/Makefile
index 70de1448c571..6d40f3a3a8cc 100644
--- a/Makefile
+++ b/Makefile
@@ -622,6 +622,11 @@ include arch/$(SRCARCH)/Makefile
622KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) 622KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
623KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) 623KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
624 624
625ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
626KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,)
627KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
628endif
629
625ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 630ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
626KBUILD_CFLAGS += -Os 631KBUILD_CFLAGS += -Os
627else 632else
@@ -809,6 +814,10 @@ LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
809KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID) 814KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
810LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID) 815LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
811 816
817ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
818LDFLAGS_vmlinux += $(call ld-option, --gc-sections,)
819endif
820
812ifeq ($(CONFIG_STRIP_ASM_SYMS),y) 821ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
813LDFLAGS_vmlinux += $(call ld-option, -X,) 822LDFLAGS_vmlinux += $(call ld-option, -X,)
814endif 823endif
diff --git a/arch/Kconfig b/arch/Kconfig
index 6842154813e5..3f948c422d9d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -467,6 +467,19 @@ config THIN_ARCHIVES
467 Select this if the architecture wants to use thin archives 467 Select this if the architecture wants to use thin archives
468 instead of ld -r to create the built-in.o files. 468 instead of ld -r to create the built-in.o files.
469 469
470config LD_DEAD_CODE_DATA_ELIMINATION
471 bool
472 help
473 Select this if the architecture wants to do dead code and
474 data elimination with the linker by compiling with
475 -ffunction-sections -fdata-sections and linking with
476 --gc-sections.
477
478 This requires that the arch annotates or otherwise protects
479 its external entry points from being discarded. Linker scripts
480 must also merge .text.*, .data.*, and .bss.* correctly into
481 output sections.
482
470config HAVE_CONTEXT_TRACKING 483config HAVE_CONTEXT_TRACKING
471 bool 484 bool
472 help 485 help
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 24563970ff7b..ad9d8f94dc7a 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -196,9 +196,14 @@
196 *(.dtb.init.rodata) \ 196 *(.dtb.init.rodata) \
197 VMLINUX_SYMBOL(__dtb_end) = .; 197 VMLINUX_SYMBOL(__dtb_end) = .;
198 198
199/* .data section */ 199/*
200 * .data section
201 * -fdata-sections generates .data.identifier which needs to be pulled in
202 * with .data, but don't want to pull in .data..stuff which has its own
203 * requirements. Same for bss.
204 */
200#define DATA_DATA \ 205#define DATA_DATA \
201 *(.data) \ 206 *(.data .data.[0-9a-zA-Z_]*) \
202 *(.ref.data) \ 207 *(.ref.data) \
203 *(.data..shared_aligned) /* percpu related */ \ 208 *(.data..shared_aligned) /* percpu related */ \
204 MEM_KEEP(init.data) \ 209 MEM_KEEP(init.data) \
@@ -320,76 +325,76 @@
320 /* Kernel symbol table: Normal symbols */ \ 325 /* Kernel symbol table: Normal symbols */ \
321 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 326 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
322 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 327 VMLINUX_SYMBOL(__start___ksymtab) = .; \
323 *(SORT(___ksymtab+*)) \ 328 KEEP(*(SORT(___ksymtab+*))) \
324 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 329 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
325 } \ 330 } \
326 \ 331 \
327 /* Kernel symbol table: GPL-only symbols */ \ 332 /* Kernel symbol table: GPL-only symbols */ \
328 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 333 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
329 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 334 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
330 *(SORT(___ksymtab_gpl+*)) \ 335 KEEP(*(SORT(___ksymtab_gpl+*))) \
331 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 336 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
332 } \ 337 } \
333 \ 338 \
334 /* Kernel symbol table: Normal unused symbols */ \ 339 /* Kernel symbol table: Normal unused symbols */ \
335 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 340 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
336 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 341 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
337 *(SORT(___ksymtab_unused+*)) \ 342 KEEP(*(SORT(___ksymtab_unused+*))) \
338 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 343 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
339 } \ 344 } \
340 \ 345 \
341 /* Kernel symbol table: GPL-only unused symbols */ \ 346 /* Kernel symbol table: GPL-only unused symbols */ \
342 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 347 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
343 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 348 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
344 *(SORT(___ksymtab_unused_gpl+*)) \ 349 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
345 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 350 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
346 } \ 351 } \
347 \ 352 \
348 /* Kernel symbol table: GPL-future-only symbols */ \ 353 /* Kernel symbol table: GPL-future-only symbols */ \
349 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 354 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
350 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 355 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
351 *(SORT(___ksymtab_gpl_future+*)) \ 356 KEEP(*(SORT(___ksymtab_gpl_future+*))) \
352 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 357 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
353 } \ 358 } \
354 \ 359 \
355 /* Kernel symbol table: Normal symbols */ \ 360 /* Kernel symbol table: Normal symbols */ \
356 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 361 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
357 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 362 VMLINUX_SYMBOL(__start___kcrctab) = .; \
358 *(SORT(___kcrctab+*)) \ 363 KEEP(*(SORT(___kcrctab+*))) \
359 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 364 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
360 } \ 365 } \
361 \ 366 \
362 /* Kernel symbol table: GPL-only symbols */ \ 367 /* Kernel symbol table: GPL-only symbols */ \
363 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 368 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
364 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 369 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
365 *(SORT(___kcrctab_gpl+*)) \ 370 KEEP(*(SORT(___kcrctab_gpl+*))) \
366 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 371 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
367 } \ 372 } \
368 \ 373 \
369 /* Kernel symbol table: Normal unused symbols */ \ 374 /* Kernel symbol table: Normal unused symbols */ \
370 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 375 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
371 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 376 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
372 *(SORT(___kcrctab_unused+*)) \ 377 KEEP(*(SORT(___kcrctab_unused+*))) \
373 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 378 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
374 } \ 379 } \
375 \ 380 \
376 /* Kernel symbol table: GPL-only unused symbols */ \ 381 /* Kernel symbol table: GPL-only unused symbols */ \
377 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 382 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
378 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 383 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
379 *(SORT(___kcrctab_unused_gpl+*)) \ 384 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
380 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 385 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
381 } \ 386 } \
382 \ 387 \
383 /* Kernel symbol table: GPL-future-only symbols */ \ 388 /* Kernel symbol table: GPL-future-only symbols */ \
384 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 389 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
385 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 390 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
386 *(SORT(___kcrctab_gpl_future+*)) \ 391 KEEP(*(SORT(___kcrctab_gpl_future+*))) \
387 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 392 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
388 } \ 393 } \
389 \ 394 \
390 /* Kernel symbol table: strings */ \ 395 /* Kernel symbol table: strings */ \
391 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 396 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
392 *(__ksymtab_strings) \ 397 KEEP(*(__ksymtab_strings)) \
393 } \ 398 } \
394 \ 399 \
395 /* __*init sections */ \ 400 /* __*init sections */ \
@@ -424,7 +429,7 @@
424#define SECURITY_INIT \ 429#define SECURITY_INIT \
425 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 430 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
426 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 431 VMLINUX_SYMBOL(__security_initcall_start) = .; \
427 *(.security_initcall.init) \ 432 KEEP(*(.security_initcall.init)) \
428 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 433 VMLINUX_SYMBOL(__security_initcall_end) = .; \
429 } 434 }
430 435
@@ -432,7 +437,7 @@
432 * during second ld run in second ld pass when generating System.map */ 437 * during second ld run in second ld pass when generating System.map */
433#define TEXT_TEXT \ 438#define TEXT_TEXT \
434 ALIGN_FUNCTION(); \ 439 ALIGN_FUNCTION(); \
435 *(.text.hot .text .text.fixup .text.unlikely) \ 440 *(.text.hot .text .text.fixup .text.unlikely .text.*) \
436 *(.ref.text) \ 441 *(.ref.text) \
437 MEM_KEEP(init.text) \ 442 MEM_KEEP(init.text) \
438 MEM_KEEP(exit.text) \ 443 MEM_KEEP(exit.text) \
@@ -527,6 +532,7 @@
527 532
528/* init and exit section handling */ 533/* init and exit section handling */
529#define INIT_DATA \ 534#define INIT_DATA \
535 KEEP(*(SORT(___kentry+*))) \
530 *(.init.data) \ 536 *(.init.data) \
531 MEM_DISCARD(init.data) \ 537 MEM_DISCARD(init.data) \
532 KERNEL_CTORS() \ 538 KERNEL_CTORS() \
@@ -593,7 +599,7 @@
593 BSS_FIRST_SECTIONS \ 599 BSS_FIRST_SECTIONS \
594 *(.bss..page_aligned) \ 600 *(.bss..page_aligned) \
595 *(.dynbss) \ 601 *(.dynbss) \
596 *(.bss) \ 602 *(.bss .bss.[0-9a-zA-Z_]*) \
597 *(COMMON) \ 603 *(COMMON) \
598 } 604 }
599 605
@@ -676,12 +682,12 @@
676 682
677#define INIT_CALLS_LEVEL(level) \ 683#define INIT_CALLS_LEVEL(level) \
678 VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 684 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
679 *(.initcall##level##.init) \ 685 KEEP(*(.initcall##level##.init)) \
680 *(.initcall##level##s.init) \ 686 KEEP(*(.initcall##level##s.init)) \
681 687
682#define INIT_CALLS \ 688#define INIT_CALLS \
683 VMLINUX_SYMBOL(__initcall_start) = .; \ 689 VMLINUX_SYMBOL(__initcall_start) = .; \
684 *(.initcallearly.init) \ 690 KEEP(*(.initcallearly.init)) \
685 INIT_CALLS_LEVEL(0) \ 691 INIT_CALLS_LEVEL(0) \
686 INIT_CALLS_LEVEL(1) \ 692 INIT_CALLS_LEVEL(1) \
687 INIT_CALLS_LEVEL(2) \ 693 INIT_CALLS_LEVEL(2) \
@@ -695,21 +701,21 @@
695 701
696#define CON_INITCALL \ 702#define CON_INITCALL \
697 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 703 VMLINUX_SYMBOL(__con_initcall_start) = .; \
698 *(.con_initcall.init) \ 704 KEEP(*(.con_initcall.init)) \
699 VMLINUX_SYMBOL(__con_initcall_end) = .; 705 VMLINUX_SYMBOL(__con_initcall_end) = .;
700 706
701#define SECURITY_INITCALL \ 707#define SECURITY_INITCALL \
702 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 708 VMLINUX_SYMBOL(__security_initcall_start) = .; \
703 *(.security_initcall.init) \ 709 KEEP(*(.security_initcall.init)) \
704 VMLINUX_SYMBOL(__security_initcall_end) = .; 710 VMLINUX_SYMBOL(__security_initcall_end) = .;
705 711
706#ifdef CONFIG_BLK_DEV_INITRD 712#ifdef CONFIG_BLK_DEV_INITRD
707#define INIT_RAM_FS \ 713#define INIT_RAM_FS \
708 . = ALIGN(4); \ 714 . = ALIGN(4); \
709 VMLINUX_SYMBOL(__initramfs_start) = .; \ 715 VMLINUX_SYMBOL(__initramfs_start) = .; \
710 *(.init.ramfs) \ 716 KEEP(*(.init.ramfs)) \
711 . = ALIGN(8); \ 717 . = ALIGN(8); \
712 *(.init.ramfs.info) 718 KEEP(*(.init.ramfs.info))
713#else 719#else
714#define INIT_RAM_FS 720#define INIT_RAM_FS
715#endif 721#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 1bb954842725..86130cded110 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -182,6 +182,29 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
182# define unreachable() do { } while (1) 182# define unreachable() do { } while (1)
183#endif 183#endif
184 184
185/*
186 * KENTRY - kernel entry point
187 * This can be used to annotate symbols (functions or data) that are used
188 * without their linker symbol being referenced explicitly. For example,
189 * interrupt vector handlers, or functions in the kernel image that are found
190 * programatically.
191 *
192 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
193 * are handled in their own way (with KEEP() in linker scripts).
194 *
195 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
196 * linker script. For example an architecture could KEEP() its entire
197 * boot/exception vector code rather than annotate each function and data.
198 */
199#ifndef KENTRY
200# define KENTRY(sym) \
201 extern typeof(sym) sym; \
202 static const unsigned long __kentry_##sym \
203 __used \
204 __attribute__((section("___kentry" "+" #sym ), used)) \
205 = (unsigned long)&sym;
206#endif
207
185#ifndef RELOC_HIDE 208#ifndef RELOC_HIDE
186# define RELOC_HIDE(ptr, off) \ 209# define RELOC_HIDE(ptr, off) \
187 ({ unsigned long __ptr; \ 210 ({ unsigned long __ptr; \
diff --git a/include/linux/export.h b/include/linux/export.h
index c565f87f005e..337cb90f3668 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -1,5 +1,6 @@
1#ifndef _LINUX_EXPORT_H 1#ifndef _LINUX_EXPORT_H
2#define _LINUX_EXPORT_H 2#define _LINUX_EXPORT_H
3
3/* 4/*
4 * Export symbols from the kernel to modules. Forked from module.h 5 * Export symbols from the kernel to modules. Forked from module.h
5 * to reduce the amount of pointless cruft we feed to gcc when only 6 * to reduce the amount of pointless cruft we feed to gcc when only
@@ -42,27 +43,26 @@ extern struct module __this_module;
42#ifdef CONFIG_MODVERSIONS 43#ifdef CONFIG_MODVERSIONS
43/* Mark the CRC weak since genksyms apparently decides not to 44/* Mark the CRC weak since genksyms apparently decides not to
44 * generate a checksums for some symbols */ 45 * generate a checksums for some symbols */
45#define __CRC_SYMBOL(sym, sec) \ 46#define __CRC_SYMBOL(sym, sec) \
46 extern __visible void *__crc_##sym __attribute__((weak)); \ 47 extern __visible void *__crc_##sym __attribute__((weak)); \
47 static const unsigned long __kcrctab_##sym \ 48 static const unsigned long __kcrctab_##sym \
48 __used \ 49 __used \
49 __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ 50 __attribute__((section("___kcrctab" sec "+" #sym), used)) \
50 = (unsigned long) &__crc_##sym; 51 = (unsigned long) &__crc_##sym;
51#else 52#else
52#define __CRC_SYMBOL(sym, sec) 53#define __CRC_SYMBOL(sym, sec)
53#endif 54#endif
54 55
55/* For every exported symbol, place a struct in the __ksymtab section */ 56/* For every exported symbol, place a struct in the __ksymtab section */
56#define ___EXPORT_SYMBOL(sym, sec) \ 57#define ___EXPORT_SYMBOL(sym, sec) \
57 extern typeof(sym) sym; \ 58 extern typeof(sym) sym; \
58 __CRC_SYMBOL(sym, sec) \ 59 __CRC_SYMBOL(sym, sec) \
59 static const char __kstrtab_##sym[] \ 60 static const char __kstrtab_##sym[] \
60 __attribute__((section("__ksymtab_strings"), aligned(1))) \ 61 __attribute__((section("__ksymtab_strings"), aligned(1))) \
61 = VMLINUX_SYMBOL_STR(sym); \ 62 = VMLINUX_SYMBOL_STR(sym); \
62 extern const struct kernel_symbol __ksymtab_##sym; \ 63 static const struct kernel_symbol __ksymtab_##sym \
63 __visible const struct kernel_symbol __ksymtab_##sym \ 64 __used \
64 __used \ 65 __attribute__((section("___ksymtab" sec "+" #sym), used)) \
65 __attribute__((section("___ksymtab" sec "+" #sym), unused)) \
66 = { (unsigned long)&sym, __kstrtab_##sym } 66 = { (unsigned long)&sym, __kstrtab_##sym }
67 67
68#if defined(__KSYM_DEPS__) 68#if defined(__KSYM_DEPS__)
diff --git a/include/linux/init.h b/include/linux/init.h
index 6935d02474aa..e571fec4bb28 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -150,24 +150,8 @@ extern bool initcall_debug;
150 150
151#ifndef __ASSEMBLY__ 151#ifndef __ASSEMBLY__
152 152
153#ifdef CONFIG_LTO 153/*
154/* Work around a LTO gcc problem: when there is no reference to a variable 154 * initcalls are now grouped by functionality into separate
155 * in a module it will be moved to the end of the program. This causes
156 * reordering of initcalls which the kernel does not like.
157 * Add a dummy reference function to avoid this. The function is
158 * deleted by the linker.
159 */
160#define LTO_REFERENCE_INITCALL(x) \
161 ; /* yes this is needed */ \
162 static __used __exit void *reference_##x(void) \
163 { \
164 return &x; \
165 }
166#else
167#define LTO_REFERENCE_INITCALL(x)
168#endif
169
170/* initcalls are now grouped by functionality into separate
171 * subsections. Ordering inside the subsections is determined 155 * subsections. Ordering inside the subsections is determined
172 * by link order. 156 * by link order.
173 * For backwards compatibility, initcall() puts the call in 157 * For backwards compatibility, initcall() puts the call in
@@ -175,12 +159,16 @@ extern bool initcall_debug;
175 * 159 *
176 * The `id' arg to __define_initcall() is needed so that multiple initcalls 160 * The `id' arg to __define_initcall() is needed so that multiple initcalls
177 * can point at the same handler without causing duplicate-symbol build errors. 161 * can point at the same handler without causing duplicate-symbol build errors.
162 *
163 * Initcalls are run by placing pointers in initcall sections that the
164 * kernel iterates at runtime. The linker can do dead code / data elimination
165 * and remove that completely, so the initcall sections have to be marked
166 * as KEEP() in the linker script.
178 */ 167 */
179 168
180#define __define_initcall(fn, id) \ 169#define __define_initcall(fn, id) \
181 static initcall_t __initcall_##fn##id __used \ 170 static initcall_t __initcall_##fn##id __used \
182 __attribute__((__section__(".initcall" #id ".init"))) = fn; \ 171 __attribute__((__section__(".initcall" #id ".init"))) = fn;
183 LTO_REFERENCE_INITCALL(__initcall_##fn##id)
184 172
185/* 173/*
186 * Early initcalls run before initializing SMP. 174 * Early initcalls run before initializing SMP.
@@ -216,15 +204,15 @@ extern bool initcall_debug;
216 204
217#define __initcall(fn) device_initcall(fn) 205#define __initcall(fn) device_initcall(fn)
218 206
219#define __exitcall(fn) \ 207#define __exitcall(fn) \
220 static exitcall_t __exitcall_##fn __exit_call = fn 208 static exitcall_t __exitcall_##fn __exit_call = fn
221 209
222#define console_initcall(fn) \ 210#define console_initcall(fn) \
223 static initcall_t __initcall_##fn \ 211 static initcall_t __initcall_##fn \
224 __used __section(.con_initcall.init) = fn 212 __used __section(.con_initcall.init) = fn
225 213
226#define security_initcall(fn) \ 214#define security_initcall(fn) \
227 static initcall_t __initcall_##fn \ 215 static initcall_t __initcall_##fn \
228 __used __section(.security_initcall.init) = fn 216 __used __section(.security_initcall.init) = fn
229 217
230struct obs_kernel_param { 218struct obs_kernel_param {
diff --git a/init/Makefile b/init/Makefile
index 7bc47ee31c36..c4fb45525d08 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -2,6 +2,8 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5ccflags-y := -fno-function-sections -fno-data-sections
6
5obj-y := main.o version.o mounts.o 7obj-y := main.o version.o mounts.o
6ifneq ($(CONFIG_BLK_DEV_INITRD),y) 8ifneq ($(CONFIG_BLK_DEV_INITRD),y)
7obj-y += noinitramfs.o 9obj-y += noinitramfs.o