aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/vmlinux.lds.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic/vmlinux.lds.h')
-rw-r--r--include/asm-generic/vmlinux.lds.h47
1 files changed, 42 insertions, 5 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 69e5c1182fde..7440a0dceddb 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -67,6 +67,8 @@
67 *(.rodata1) \ 67 *(.rodata1) \
68 } \ 68 } \
69 \ 69 \
70 BUG_TABLE \
71 \
70 /* PCI quirks */ \ 72 /* PCI quirks */ \
71 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 73 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
72 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 74 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
@@ -84,6 +86,19 @@
84 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ 86 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
85 *(.pci_fixup_resume) \ 87 *(.pci_fixup_resume) \
86 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ 88 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
89 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
90 *(.pci_fixup_resume_early) \
91 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
92 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
93 *(.pci_fixup_suspend) \
94 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
95 } \
96 \
97 /* Built-in firmware blobs */ \
98 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
99 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
100 *(.builtin_fw) \
101 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
87 } \ 102 } \
88 \ 103 \
89 /* RapidIO route ops */ \ 104 /* RapidIO route ops */ \
@@ -93,6 +108,8 @@
93 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ 108 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
94 } \ 109 } \
95 \ 110 \
111 TRACEDATA \
112 \
96 /* Kernel symbol table: Normal symbols */ \ 113 /* Kernel symbol table: Normal symbols */ \
97 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 114 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
98 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 115 VMLINUX_SYMBOL(__start___ksymtab) = .; \
@@ -204,6 +221,7 @@
204 * during second ld run in second ld pass when generating System.map */ 221 * during second ld run in second ld pass when generating System.map */
205#define TEXT_TEXT \ 222#define TEXT_TEXT \
206 ALIGN_FUNCTION(); \ 223 ALIGN_FUNCTION(); \
224 *(.text.hot) \
207 *(.text) \ 225 *(.text) \
208 *(.ref.text) \ 226 *(.ref.text) \
209 *(.text.init.refok) \ 227 *(.text.init.refok) \
@@ -213,7 +231,8 @@
213 CPU_KEEP(init.text) \ 231 CPU_KEEP(init.text) \
214 CPU_KEEP(exit.text) \ 232 CPU_KEEP(exit.text) \
215 MEM_KEEP(init.text) \ 233 MEM_KEEP(init.text) \
216 MEM_KEEP(exit.text) 234 MEM_KEEP(exit.text) \
235 *(.text.unlikely)
217 236
218 237
219/* sched.text is aling to function alignment to secure we have same 238/* sched.text is aling to function alignment to secure we have same
@@ -310,13 +329,29 @@
310 .stab.indexstr 0 : { *(.stab.indexstr) } \ 329 .stab.indexstr 0 : { *(.stab.indexstr) } \
311 .comment 0 : { *(.comment) } 330 .comment 0 : { *(.comment) }
312 331
332#ifdef CONFIG_GENERIC_BUG
313#define BUG_TABLE \ 333#define BUG_TABLE \
314 . = ALIGN(8); \ 334 . = ALIGN(8); \
315 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 335 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
316 __start___bug_table = .; \ 336 VMLINUX_SYMBOL(__start___bug_table) = .; \
317 *(__bug_table) \ 337 *(__bug_table) \
318 __stop___bug_table = .; \ 338 VMLINUX_SYMBOL(__stop___bug_table) = .; \
339 }
340#else
341#define BUG_TABLE
342#endif
343
344#ifdef CONFIG_PM_TRACE
345#define TRACEDATA \
346 . = ALIGN(4); \
347 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
348 VMLINUX_SYMBOL(__tracedata_start) = .; \
349 *(.tracedata) \
350 VMLINUX_SYMBOL(__tracedata_end) = .; \
319 } 351 }
352#else
353#define TRACEDATA
354#endif
320 355
321#define NOTES \ 356#define NOTES \
322 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 357 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
@@ -326,6 +361,8 @@
326 } 361 }
327 362
328#define INITCALLS \ 363#define INITCALLS \
364 *(.initcallearly.init) \
365 VMLINUX_SYMBOL(__early_initcall_end) = .; \
329 *(.initcall0.init) \ 366 *(.initcall0.init) \
330 *(.initcall0s.init) \ 367 *(.initcall0s.init) \
331 *(.initcall1.init) \ 368 *(.initcall1.init) \
@@ -346,10 +383,10 @@
346 383
347#define PERCPU(align) \ 384#define PERCPU(align) \
348 . = ALIGN(align); \ 385 . = ALIGN(align); \
349 __per_cpu_start = .; \ 386 VMLINUX_SYMBOL(__per_cpu_start) = .; \
350 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ 387 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
351 *(.data.percpu.page_aligned) \ 388 *(.data.percpu.page_aligned) \
352 *(.data.percpu) \ 389 *(.data.percpu) \
353 *(.data.percpu.shared_aligned) \ 390 *(.data.percpu.shared_aligned) \
354 } \ 391 } \
355 __per_cpu_end = .; 392 VMLINUX_SYMBOL(__per_cpu_end) = .;