aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-14 17:26:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-14 17:26:58 -0400
commit84d69848c97faab0c25aa2667b273404d2e2a64a (patch)
tree594f3fe1b271b5255a1c5281e36f8bf938acd1c0 /include
parentd4d24d2d0a7ea3b62efd7336bfc2344e29b36bc5 (diff)
parent590abbdd273304b55824bcb9ea91840ea375575d (diff)
Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
Pull kbuild updates from Michal Marek: - EXPORT_SYMBOL for asm source by Al Viro. This does bring a regression, because genksyms no longer generates checksums for these symbols (CONFIG_MODVERSIONS). Nick Piggin is working on a patch to fix this. Plus, we are talking about functions like strcpy(), which rarely change prototypes. - Fixes for PPC fallout of the above by Stephen Rothwell and Nick Piggin - fixdep speedup by Alexey Dobriyan. - preparatory work by Nick Piggin to allow architectures to build with -ffunction-sections, -fdata-sections and --gc-sections - CONFIG_THIN_ARCHIVES support by Stephen Rothwell - fix for filenames with colons in the initramfs source by me. * 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild: (22 commits) initramfs: Escape colons in depfile ppc: there is no clear_pages to export powerpc/64: whitelist unresolved modversions CRCs kbuild: -ffunction-sections fix for archs with conflicting sections kbuild: add arch specific post-link Makefile kbuild: allow archs to select link dead code/data elimination kbuild: allow architectures to use thin archives instead of ld -r kbuild: Regenerate genksyms lexer kbuild: genksyms fix for typeof handling fixdep: faster CONFIG_ search ia64: move exports to definitions sparc32: debride memcpy.S a bit [sparc] unify 32bit and 64bit string.h sparc: move exports to definitions ppc: move exports to definitions arm: move exports to definitions s390: move exports to definitions m68k: move exports to definitions alpha: move exports to actual definitions x86: move exports to actual definitions ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/export.h94
-rw-r--r--include/asm-generic/vmlinux.lds.h57
-rw-r--r--include/linux/compiler.h23
-rw-r--r--include/linux/export.h30
-rw-r--r--include/linux/init.h38
5 files changed, 179 insertions, 63 deletions
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
new file mode 100644
index 000000000000..43199a049da5
--- /dev/null
+++ b/include/asm-generic/export.h
@@ -0,0 +1,94 @@
1#ifndef __ASM_GENERIC_EXPORT_H
2#define __ASM_GENERIC_EXPORT_H
3
4#ifndef KSYM_FUNC
5#define KSYM_FUNC(x) x
6#endif
7#ifdef CONFIG_64BIT
8#define __put .quad
9#ifndef KSYM_ALIGN
10#define KSYM_ALIGN 8
11#endif
12#ifndef KCRC_ALIGN
13#define KCRC_ALIGN 8
14#endif
15#else
16#define __put .long
17#ifndef KSYM_ALIGN
18#define KSYM_ALIGN 4
19#endif
20#ifndef KCRC_ALIGN
21#define KCRC_ALIGN 4
22#endif
23#endif
24
25#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
26#define KSYM(name) _##name
27#else
28#define KSYM(name) name
29#endif
30
31/*
32 * note on .section use: @progbits vs %progbits nastiness doesn't matter,
33 * since we immediately emit into those sections anyway.
34 */
35.macro ___EXPORT_SYMBOL name,val,sec
36#ifdef CONFIG_MODULES
37 .globl KSYM(__ksymtab_\name)
38 .section ___ksymtab\sec+\name,"a"
39 .balign KSYM_ALIGN
40KSYM(__ksymtab_\name):
41 __put \val, KSYM(__kstrtab_\name)
42 .previous
43 .section __ksymtab_strings,"a"
44KSYM(__kstrtab_\name):
45#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
46 .asciz "_\name"
47#else
48 .asciz "\name"
49#endif
50 .previous
51#ifdef CONFIG_MODVERSIONS
52 .section ___kcrctab\sec+\name,"a"
53 .balign KCRC_ALIGN
54KSYM(__kcrctab_\name):
55 __put KSYM(__crc_\name)
56 .weak KSYM(__crc_\name)
57 .previous
58#endif
59#endif
60.endm
61#undef __put
62
63#if defined(__KSYM_DEPS__)
64
65#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym ===
66
67#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
68
69#include <linux/kconfig.h>
70#include <generated/autoksyms.h>
71
72#define __EXPORT_SYMBOL(sym, val, sec) \
73 __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym))
74#define __cond_export_sym(sym, val, sec, conf) \
75 ___cond_export_sym(sym, val, sec, conf)
76#define ___cond_export_sym(sym, val, sec, enabled) \
77 __cond_export_sym_##enabled(sym, val, sec)
78#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
79#define __cond_export_sym_0(sym, val, sec) /* nothing */
80
81#else
82#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
83#endif
84
85#define EXPORT_SYMBOL(name) \
86 __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
87#define EXPORT_SYMBOL_GPL(name) \
88 __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
89#define EXPORT_DATA_SYMBOL(name) \
90 __EXPORT_SYMBOL(name, KSYM(name),)
91#define EXPORT_DATA_SYMBOL_GPL(name) \
92 __EXPORT_SYMBOL(name, KSYM(name),_gpl)
93
94#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 3e42bcdd014b..30747960bc54 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -196,9 +196,14 @@
196 *(.dtb.init.rodata) \ 196 *(.dtb.init.rodata) \
197 VMLINUX_SYMBOL(__dtb_end) = .; 197 VMLINUX_SYMBOL(__dtb_end) = .;
198 198
199/* .data section */ 199/*
200 * .data section
201 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
202 * .data.identifier which needs to be pulled in with .data, but don't want to
203 * pull in .data..stuff which has its own requirements. Same for bss.
204 */
200#define DATA_DATA \ 205#define DATA_DATA \
201 *(.data) \ 206 *(.data .data.[0-9a-zA-Z_]*) \
202 *(.ref.data) \ 207 *(.ref.data) \
203 *(.data..shared_aligned) /* percpu related */ \ 208 *(.data..shared_aligned) /* percpu related */ \
204 MEM_KEEP(init.data) \ 209 MEM_KEEP(init.data) \
@@ -320,76 +325,76 @@
320 /* Kernel symbol table: Normal symbols */ \ 325 /* Kernel symbol table: Normal symbols */ \
321 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 326 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
322 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 327 VMLINUX_SYMBOL(__start___ksymtab) = .; \
323 *(SORT(___ksymtab+*)) \ 328 KEEP(*(SORT(___ksymtab+*))) \
324 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 329 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
325 } \ 330 } \
326 \ 331 \
327 /* Kernel symbol table: GPL-only symbols */ \ 332 /* Kernel symbol table: GPL-only symbols */ \
328 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 333 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
329 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 334 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
330 *(SORT(___ksymtab_gpl+*)) \ 335 KEEP(*(SORT(___ksymtab_gpl+*))) \
331 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 336 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
332 } \ 337 } \
333 \ 338 \
334 /* Kernel symbol table: Normal unused symbols */ \ 339 /* Kernel symbol table: Normal unused symbols */ \
335 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 340 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
336 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 341 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
337 *(SORT(___ksymtab_unused+*)) \ 342 KEEP(*(SORT(___ksymtab_unused+*))) \
338 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 343 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
339 } \ 344 } \
340 \ 345 \
341 /* Kernel symbol table: GPL-only unused symbols */ \ 346 /* Kernel symbol table: GPL-only unused symbols */ \
342 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 347 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
343 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 348 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
344 *(SORT(___ksymtab_unused_gpl+*)) \ 349 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
345 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 350 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
346 } \ 351 } \
347 \ 352 \
348 /* Kernel symbol table: GPL-future-only symbols */ \ 353 /* Kernel symbol table: GPL-future-only symbols */ \
349 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 354 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
350 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 355 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
351 *(SORT(___ksymtab_gpl_future+*)) \ 356 KEEP(*(SORT(___ksymtab_gpl_future+*))) \
352 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 357 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
353 } \ 358 } \
354 \ 359 \
355 /* Kernel symbol table: Normal symbols */ \ 360 /* Kernel symbol table: Normal symbols */ \
356 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 361 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
357 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 362 VMLINUX_SYMBOL(__start___kcrctab) = .; \
358 *(SORT(___kcrctab+*)) \ 363 KEEP(*(SORT(___kcrctab+*))) \
359 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 364 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
360 } \ 365 } \
361 \ 366 \
362 /* Kernel symbol table: GPL-only symbols */ \ 367 /* Kernel symbol table: GPL-only symbols */ \
363 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 368 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
364 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 369 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
365 *(SORT(___kcrctab_gpl+*)) \ 370 KEEP(*(SORT(___kcrctab_gpl+*))) \
366 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 371 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
367 } \ 372 } \
368 \ 373 \
369 /* Kernel symbol table: Normal unused symbols */ \ 374 /* Kernel symbol table: Normal unused symbols */ \
370 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 375 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
371 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 376 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
372 *(SORT(___kcrctab_unused+*)) \ 377 KEEP(*(SORT(___kcrctab_unused+*))) \
373 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 378 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
374 } \ 379 } \
375 \ 380 \
376 /* Kernel symbol table: GPL-only unused symbols */ \ 381 /* Kernel symbol table: GPL-only unused symbols */ \
377 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 382 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
378 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 383 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
379 *(SORT(___kcrctab_unused_gpl+*)) \ 384 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
380 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 385 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
381 } \ 386 } \
382 \ 387 \
383 /* Kernel symbol table: GPL-future-only symbols */ \ 388 /* Kernel symbol table: GPL-future-only symbols */ \
384 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 389 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
385 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 390 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
386 *(SORT(___kcrctab_gpl_future+*)) \ 391 KEEP(*(SORT(___kcrctab_gpl_future+*))) \
387 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 392 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
388 } \ 393 } \
389 \ 394 \
390 /* Kernel symbol table: strings */ \ 395 /* Kernel symbol table: strings */ \
391 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ 396 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
392 *(__ksymtab_strings) \ 397 KEEP(*(__ksymtab_strings)) \
393 } \ 398 } \
394 \ 399 \
395 /* __*init sections */ \ 400 /* __*init sections */ \
@@ -424,12 +429,17 @@
424#define SECURITY_INIT \ 429#define SECURITY_INIT \
425 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 430 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
426 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 431 VMLINUX_SYMBOL(__security_initcall_start) = .; \
427 *(.security_initcall.init) \ 432 KEEP(*(.security_initcall.init)) \
428 VMLINUX_SYMBOL(__security_initcall_end) = .; \ 433 VMLINUX_SYMBOL(__security_initcall_end) = .; \
429 } 434 }
430 435
431/* .text section. Map to function alignment to avoid address changes 436/* .text section. Map to function alignment to avoid address changes
432 * during second ld run in second ld pass when generating System.map */ 437 * during second ld run in second ld pass when generating System.map
438 * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
439 * .text.identifier which needs to be pulled in with .text , but some
440 * architectures define .text.foo which is not intended to be pulled in here.
441 * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
442 * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
433#define TEXT_TEXT \ 443#define TEXT_TEXT \
434 ALIGN_FUNCTION(); \ 444 ALIGN_FUNCTION(); \
435 *(.text.hot .text .text.fixup .text.unlikely) \ 445 *(.text.hot .text .text.fixup .text.unlikely) \
@@ -533,6 +543,7 @@
533 543
534/* init and exit section handling */ 544/* init and exit section handling */
535#define INIT_DATA \ 545#define INIT_DATA \
546 KEEP(*(SORT(___kentry+*))) \
536 *(.init.data) \ 547 *(.init.data) \
537 MEM_DISCARD(init.data) \ 548 MEM_DISCARD(init.data) \
538 KERNEL_CTORS() \ 549 KERNEL_CTORS() \
@@ -599,7 +610,7 @@
599 BSS_FIRST_SECTIONS \ 610 BSS_FIRST_SECTIONS \
600 *(.bss..page_aligned) \ 611 *(.bss..page_aligned) \
601 *(.dynbss) \ 612 *(.dynbss) \
602 *(.bss) \ 613 *(.bss .bss.[0-9a-zA-Z_]*) \
603 *(COMMON) \ 614 *(COMMON) \
604 } 615 }
605 616
@@ -682,12 +693,12 @@
682 693
683#define INIT_CALLS_LEVEL(level) \ 694#define INIT_CALLS_LEVEL(level) \
684 VMLINUX_SYMBOL(__initcall##level##_start) = .; \ 695 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
685 *(.initcall##level##.init) \ 696 KEEP(*(.initcall##level##.init)) \
686 *(.initcall##level##s.init) \ 697 KEEP(*(.initcall##level##s.init)) \
687 698
688#define INIT_CALLS \ 699#define INIT_CALLS \
689 VMLINUX_SYMBOL(__initcall_start) = .; \ 700 VMLINUX_SYMBOL(__initcall_start) = .; \
690 *(.initcallearly.init) \ 701 KEEP(*(.initcallearly.init)) \
691 INIT_CALLS_LEVEL(0) \ 702 INIT_CALLS_LEVEL(0) \
692 INIT_CALLS_LEVEL(1) \ 703 INIT_CALLS_LEVEL(1) \
693 INIT_CALLS_LEVEL(2) \ 704 INIT_CALLS_LEVEL(2) \
@@ -701,21 +712,21 @@
701 712
702#define CON_INITCALL \ 713#define CON_INITCALL \
703 VMLINUX_SYMBOL(__con_initcall_start) = .; \ 714 VMLINUX_SYMBOL(__con_initcall_start) = .; \
704 *(.con_initcall.init) \ 715 KEEP(*(.con_initcall.init)) \
705 VMLINUX_SYMBOL(__con_initcall_end) = .; 716 VMLINUX_SYMBOL(__con_initcall_end) = .;
706 717
707#define SECURITY_INITCALL \ 718#define SECURITY_INITCALL \
708 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 719 VMLINUX_SYMBOL(__security_initcall_start) = .; \
709 *(.security_initcall.init) \ 720 KEEP(*(.security_initcall.init)) \
710 VMLINUX_SYMBOL(__security_initcall_end) = .; 721 VMLINUX_SYMBOL(__security_initcall_end) = .;
711 722
712#ifdef CONFIG_BLK_DEV_INITRD 723#ifdef CONFIG_BLK_DEV_INITRD
713#define INIT_RAM_FS \ 724#define INIT_RAM_FS \
714 . = ALIGN(4); \ 725 . = ALIGN(4); \
715 VMLINUX_SYMBOL(__initramfs_start) = .; \ 726 VMLINUX_SYMBOL(__initramfs_start) = .; \
716 *(.init.ramfs) \ 727 KEEP(*(.init.ramfs)) \
717 . = ALIGN(8); \ 728 . = ALIGN(8); \
718 *(.init.ramfs.info) 729 KEEP(*(.init.ramfs.info))
719#else 730#else
720#define INIT_RAM_FS 731#define INIT_RAM_FS
721#endif 732#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 668569844d37..f1bfa15b6f9b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -182,6 +182,29 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
182# define unreachable() do { } while (1) 182# define unreachable() do { } while (1)
183#endif 183#endif
184 184
185/*
186 * KENTRY - kernel entry point
187 * This can be used to annotate symbols (functions or data) that are used
188 * without their linker symbol being referenced explicitly. For example,
189 * interrupt vector handlers, or functions in the kernel image that are found
190 * programatically.
191 *
192 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
193 * are handled in their own way (with KEEP() in linker scripts).
194 *
195 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
196 * linker script. For example an architecture could KEEP() its entire
197 * boot/exception vector code rather than annotate each function and data.
198 */
199#ifndef KENTRY
200# define KENTRY(sym) \
201 extern typeof(sym) sym; \
202 static const unsigned long __kentry_##sym \
203 __used \
204 __attribute__((section("___kentry" "+" #sym ), used)) \
205 = (unsigned long)&sym;
206#endif
207
185#ifndef RELOC_HIDE 208#ifndef RELOC_HIDE
186# define RELOC_HIDE(ptr, off) \ 209# define RELOC_HIDE(ptr, off) \
187 ({ unsigned long __ptr; \ 210 ({ unsigned long __ptr; \
diff --git a/include/linux/export.h b/include/linux/export.h
index d7df4922da1d..2a0f61fbc731 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -1,5 +1,6 @@
1#ifndef _LINUX_EXPORT_H 1#ifndef _LINUX_EXPORT_H
2#define _LINUX_EXPORT_H 2#define _LINUX_EXPORT_H
3
3/* 4/*
4 * Export symbols from the kernel to modules. Forked from module.h 5 * Export symbols from the kernel to modules. Forked from module.h
5 * to reduce the amount of pointless cruft we feed to gcc when only 6 * to reduce the amount of pointless cruft we feed to gcc when only
@@ -42,27 +43,26 @@ extern struct module __this_module;
42#ifdef CONFIG_MODVERSIONS 43#ifdef CONFIG_MODVERSIONS
43/* Mark the CRC weak since genksyms apparently decides not to 44/* Mark the CRC weak since genksyms apparently decides not to
44 * generate a checksums for some symbols */ 45 * generate a checksums for some symbols */
45#define __CRC_SYMBOL(sym, sec) \ 46#define __CRC_SYMBOL(sym, sec) \
46 extern __visible void *__crc_##sym __attribute__((weak)); \ 47 extern __visible void *__crc_##sym __attribute__((weak)); \
47 static const unsigned long __kcrctab_##sym \ 48 static const unsigned long __kcrctab_##sym \
48 __used \ 49 __used \
49 __attribute__((section("___kcrctab" sec "+" #sym), unused)) \ 50 __attribute__((section("___kcrctab" sec "+" #sym), used)) \
50 = (unsigned long) &__crc_##sym; 51 = (unsigned long) &__crc_##sym;
51#else 52#else
52#define __CRC_SYMBOL(sym, sec) 53#define __CRC_SYMBOL(sym, sec)
53#endif 54#endif
54 55
55/* For every exported symbol, place a struct in the __ksymtab section */ 56/* For every exported symbol, place a struct in the __ksymtab section */
56#define ___EXPORT_SYMBOL(sym, sec) \ 57#define ___EXPORT_SYMBOL(sym, sec) \
57 extern typeof(sym) sym; \ 58 extern typeof(sym) sym; \
58 __CRC_SYMBOL(sym, sec) \ 59 __CRC_SYMBOL(sym, sec) \
59 static const char __kstrtab_##sym[] \ 60 static const char __kstrtab_##sym[] \
60 __attribute__((section("__ksymtab_strings"), aligned(1))) \ 61 __attribute__((section("__ksymtab_strings"), aligned(1))) \
61 = VMLINUX_SYMBOL_STR(sym); \ 62 = VMLINUX_SYMBOL_STR(sym); \
62 extern const struct kernel_symbol __ksymtab_##sym; \ 63 static const struct kernel_symbol __ksymtab_##sym \
63 __visible const struct kernel_symbol __ksymtab_##sym \ 64 __used \
64 __used \ 65 __attribute__((section("___ksymtab" sec "+" #sym), used)) \
65 __attribute__((section("___ksymtab" sec "+" #sym), unused)) \
66 = { (unsigned long)&sym, __kstrtab_##sym } 66 = { (unsigned long)&sym, __kstrtab_##sym }
67 67
68#if defined(__KSYM_DEPS__) 68#if defined(__KSYM_DEPS__)
diff --git a/include/linux/init.h b/include/linux/init.h
index 5a3321a7909b..024a0b5b3ed0 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -139,24 +139,8 @@ extern bool initcall_debug;
139 139
140#ifndef __ASSEMBLY__ 140#ifndef __ASSEMBLY__
141 141
142#ifdef CONFIG_LTO 142/*
143/* Work around a LTO gcc problem: when there is no reference to a variable 143 * initcalls are now grouped by functionality into separate
144 * in a module it will be moved to the end of the program. This causes
145 * reordering of initcalls which the kernel does not like.
146 * Add a dummy reference function to avoid this. The function is
147 * deleted by the linker.
148 */
149#define LTO_REFERENCE_INITCALL(x) \
150 ; /* yes this is needed */ \
151 static __used __exit void *reference_##x(void) \
152 { \
153 return &x; \
154 }
155#else
156#define LTO_REFERENCE_INITCALL(x)
157#endif
158
159/* initcalls are now grouped by functionality into separate
160 * subsections. Ordering inside the subsections is determined 144 * subsections. Ordering inside the subsections is determined
161 * by link order. 145 * by link order.
162 * For backwards compatibility, initcall() puts the call in 146 * For backwards compatibility, initcall() puts the call in
@@ -164,12 +148,16 @@ extern bool initcall_debug;
164 * 148 *
165 * The `id' arg to __define_initcall() is needed so that multiple initcalls 149 * The `id' arg to __define_initcall() is needed so that multiple initcalls
166 * can point at the same handler without causing duplicate-symbol build errors. 150 * can point at the same handler without causing duplicate-symbol build errors.
151 *
152 * Initcalls are run by placing pointers in initcall sections that the
153 * kernel iterates at runtime. The linker can do dead code / data elimination
154 * and remove that completely, so the initcall sections have to be marked
155 * as KEEP() in the linker script.
167 */ 156 */
168 157
169#define __define_initcall(fn, id) \ 158#define __define_initcall(fn, id) \
170 static initcall_t __initcall_##fn##id __used \ 159 static initcall_t __initcall_##fn##id __used \
171 __attribute__((__section__(".initcall" #id ".init"))) = fn; \ 160 __attribute__((__section__(".initcall" #id ".init"))) = fn;
172 LTO_REFERENCE_INITCALL(__initcall_##fn##id)
173 161
174/* 162/*
175 * Early initcalls run before initializing SMP. 163 * Early initcalls run before initializing SMP.
@@ -205,15 +193,15 @@ extern bool initcall_debug;
205 193
206#define __initcall(fn) device_initcall(fn) 194#define __initcall(fn) device_initcall(fn)
207 195
208#define __exitcall(fn) \ 196#define __exitcall(fn) \
209 static exitcall_t __exitcall_##fn __exit_call = fn 197 static exitcall_t __exitcall_##fn __exit_call = fn
210 198
211#define console_initcall(fn) \ 199#define console_initcall(fn) \
212 static initcall_t __initcall_##fn \ 200 static initcall_t __initcall_##fn \
213 __used __section(.con_initcall.init) = fn 201 __used __section(.con_initcall.init) = fn
214 202
215#define security_initcall(fn) \ 203#define security_initcall(fn) \
216 static initcall_t __initcall_##fn \ 204 static initcall_t __initcall_##fn \
217 __used __section(.security_initcall.init) = fn 205 __used __section(.security_initcall.init) = fn
218 206
219struct obs_kernel_param { 207struct obs_kernel_param {