diff options
Diffstat (limited to 'include/asm-generic/vmlinux.lds.h')
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 159 |
1 files changed, 102 insertions, 57 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8a92a170fb7d..db22d136ad08 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * HEAD_TEXT_SECTION | 15 | * HEAD_TEXT_SECTION |
16 | * INIT_TEXT_SECTION(PAGE_SIZE) | 16 | * INIT_TEXT_SECTION(PAGE_SIZE) |
17 | * INIT_DATA_SECTION(...) | 17 | * INIT_DATA_SECTION(...) |
18 | * PERCPU(PAGE_SIZE) | 18 | * PERCPU_SECTION(CACHELINE_SIZE) |
19 | * __init_end = .; | 19 | * __init_end = .; |
20 | * | 20 | * |
21 | * _stext = .; | 21 | * _stext = .; |
@@ -67,7 +67,8 @@ | |||
67 | * Align to a 32 byte boundary equal to the | 67 | * Align to a 32 byte boundary equal to the |
68 | * alignment gcc 4.5 uses for a struct | 68 | * alignment gcc 4.5 uses for a struct |
69 | */ | 69 | */ |
70 | #define STRUCT_ALIGN() . = ALIGN(32) | 70 | #define STRUCT_ALIGNMENT 32 |
71 | #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT) | ||
71 | 72 | ||
72 | /* The actual configuration determine if the init/exit sections | 73 | /* The actual configuration determine if the init/exit sections |
73 | * are handled as text/data or they can be discarded (which | 74 | * are handled as text/data or they can be discarded (which |
@@ -123,7 +124,8 @@ | |||
123 | #endif | 124 | #endif |
124 | 125 | ||
125 | #ifdef CONFIG_EVENT_TRACING | 126 | #ifdef CONFIG_EVENT_TRACING |
126 | #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ | 127 | #define FTRACE_EVENTS() . = ALIGN(8); \ |
128 | VMLINUX_SYMBOL(__start_ftrace_events) = .; \ | ||
127 | *(_ftrace_events) \ | 129 | *(_ftrace_events) \ |
128 | VMLINUX_SYMBOL(__stop_ftrace_events) = .; | 130 | VMLINUX_SYMBOL(__stop_ftrace_events) = .; |
129 | #else | 131 | #else |
@@ -139,41 +141,46 @@ | |||
139 | #endif | 141 | #endif |
140 | 142 | ||
141 | #ifdef CONFIG_FTRACE_SYSCALLS | 143 | #ifdef CONFIG_FTRACE_SYSCALLS |
142 | #define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ | 144 | #define TRACE_SYSCALLS() . = ALIGN(8); \ |
145 | VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ | ||
143 | *(__syscalls_metadata) \ | 146 | *(__syscalls_metadata) \ |
144 | VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; | 147 | VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; |
145 | #else | 148 | #else |
146 | #define TRACE_SYSCALLS() | 149 | #define TRACE_SYSCALLS() |
147 | #endif | 150 | #endif |
148 | 151 | ||
152 | |||
153 | #define KERNEL_DTB() \ | ||
154 | STRUCT_ALIGN(); \ | ||
155 | VMLINUX_SYMBOL(__dtb_start) = .; \ | ||
156 | *(.dtb.init.rodata) \ | ||
157 | VMLINUX_SYMBOL(__dtb_end) = .; | ||
158 | |||
149 | /* .data section */ | 159 | /* .data section */ |
150 | #define DATA_DATA \ | 160 | #define DATA_DATA \ |
151 | *(.data) \ | 161 | *(.data) \ |
152 | *(.ref.data) \ | 162 | *(.ref.data) \ |
163 | *(.data..shared_aligned) /* percpu related */ \ | ||
153 | DEV_KEEP(init.data) \ | 164 | DEV_KEEP(init.data) \ |
154 | DEV_KEEP(exit.data) \ | 165 | DEV_KEEP(exit.data) \ |
155 | CPU_KEEP(init.data) \ | 166 | CPU_KEEP(init.data) \ |
156 | CPU_KEEP(exit.data) \ | 167 | CPU_KEEP(exit.data) \ |
157 | MEM_KEEP(init.data) \ | 168 | MEM_KEEP(init.data) \ |
158 | MEM_KEEP(exit.data) \ | 169 | MEM_KEEP(exit.data) \ |
159 | . = ALIGN(32); \ | 170 | STRUCT_ALIGN(); \ |
160 | VMLINUX_SYMBOL(__start___tracepoints) = .; \ | ||
161 | *(__tracepoints) \ | 171 | *(__tracepoints) \ |
162 | VMLINUX_SYMBOL(__stop___tracepoints) = .; \ | ||
163 | /* implement dynamic printk debug */ \ | 172 | /* implement dynamic printk debug */ \ |
173 | . = ALIGN(8); \ | ||
174 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
175 | *(__jump_table) \ | ||
176 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
164 | . = ALIGN(8); \ | 177 | . = ALIGN(8); \ |
165 | VMLINUX_SYMBOL(__start___verbose) = .; \ | 178 | VMLINUX_SYMBOL(__start___verbose) = .; \ |
166 | *(__verbose) \ | 179 | *(__verbose) \ |
167 | VMLINUX_SYMBOL(__stop___verbose) = .; \ | 180 | VMLINUX_SYMBOL(__stop___verbose) = .; \ |
168 | LIKELY_PROFILE() \ | 181 | LIKELY_PROFILE() \ |
169 | BRANCH_PROFILE() \ | 182 | BRANCH_PROFILE() \ |
170 | TRACE_PRINTKS() \ | 183 | TRACE_PRINTKS() |
171 | \ | ||
172 | STRUCT_ALIGN(); \ | ||
173 | FTRACE_EVENTS() \ | ||
174 | \ | ||
175 | STRUCT_ALIGN(); \ | ||
176 | TRACE_SYSCALLS() | ||
177 | 184 | ||
178 | /* | 185 | /* |
179 | * Data section helpers | 186 | * Data section helpers |
@@ -191,7 +198,8 @@ | |||
191 | 198 | ||
192 | #define READ_MOSTLY_DATA(align) \ | 199 | #define READ_MOSTLY_DATA(align) \ |
193 | . = ALIGN(align); \ | 200 | . = ALIGN(align); \ |
194 | *(.data..read_mostly) | 201 | *(.data..read_mostly) \ |
202 | . = ALIGN(align); | ||
195 | 203 | ||
196 | #define CACHELINE_ALIGNED_DATA(align) \ | 204 | #define CACHELINE_ALIGNED_DATA(align) \ |
197 | . = ALIGN(align); \ | 205 | . = ALIGN(align); \ |
@@ -210,6 +218,10 @@ | |||
210 | VMLINUX_SYMBOL(__start_rodata) = .; \ | 218 | VMLINUX_SYMBOL(__start_rodata) = .; \ |
211 | *(.rodata) *(.rodata.*) \ | 219 | *(.rodata) *(.rodata.*) \ |
212 | *(__vermagic) /* Kernel version magic */ \ | 220 | *(__vermagic) /* Kernel version magic */ \ |
221 | . = ALIGN(8); \ | ||
222 | VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \ | ||
223 | *(__tracepoints_ptrs) /* Tracepoints: pointer array */\ | ||
224 | VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \ | ||
213 | *(__markers_strings) /* Markers: strings */ \ | 225 | *(__markers_strings) /* Markers: strings */ \ |
214 | *(__tracepoints_strings)/* Tracepoints: strings */ \ | 226 | *(__tracepoints_strings)/* Tracepoints: strings */ \ |
215 | } \ | 227 | } \ |
@@ -264,70 +276,70 @@ | |||
264 | /* Kernel symbol table: Normal symbols */ \ | 276 | /* Kernel symbol table: Normal symbols */ \ |
265 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ | 277 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ |
266 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ | 278 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ |
267 | *(__ksymtab) \ | 279 | *(SORT(___ksymtab+*)) \ |
268 | VMLINUX_SYMBOL(__stop___ksymtab) = .; \ | 280 | VMLINUX_SYMBOL(__stop___ksymtab) = .; \ |
269 | } \ | 281 | } \ |
270 | \ | 282 | \ |
271 | /* Kernel symbol table: GPL-only symbols */ \ | 283 | /* Kernel symbol table: GPL-only symbols */ \ |
272 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ | 284 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ |
273 | VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ | 285 | VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ |
274 | *(__ksymtab_gpl) \ | 286 | *(SORT(___ksymtab_gpl+*)) \ |
275 | VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ | 287 | VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ |
276 | } \ | 288 | } \ |
277 | \ | 289 | \ |
278 | /* Kernel symbol table: Normal unused symbols */ \ | 290 | /* Kernel symbol table: Normal unused symbols */ \ |
279 | __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ | 291 | __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ |
280 | VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ | 292 | VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ |
281 | *(__ksymtab_unused) \ | 293 | *(SORT(___ksymtab_unused+*)) \ |
282 | VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ | 294 | VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ |
283 | } \ | 295 | } \ |
284 | \ | 296 | \ |
285 | /* Kernel symbol table: GPL-only unused symbols */ \ | 297 | /* Kernel symbol table: GPL-only unused symbols */ \ |
286 | __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ | 298 | __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ |
287 | VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ | 299 | VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ |
288 | *(__ksymtab_unused_gpl) \ | 300 | *(SORT(___ksymtab_unused_gpl+*)) \ |
289 | VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ | 301 | VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ |
290 | } \ | 302 | } \ |
291 | \ | 303 | \ |
292 | /* Kernel symbol table: GPL-future-only symbols */ \ | 304 | /* Kernel symbol table: GPL-future-only symbols */ \ |
293 | __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ | 305 | __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ |
294 | VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ | 306 | VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ |
295 | *(__ksymtab_gpl_future) \ | 307 | *(SORT(___ksymtab_gpl_future+*)) \ |
296 | VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ | 308 | VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ |
297 | } \ | 309 | } \ |
298 | \ | 310 | \ |
299 | /* Kernel symbol table: Normal symbols */ \ | 311 | /* Kernel symbol table: Normal symbols */ \ |
300 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ | 312 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ |
301 | VMLINUX_SYMBOL(__start___kcrctab) = .; \ | 313 | VMLINUX_SYMBOL(__start___kcrctab) = .; \ |
302 | *(__kcrctab) \ | 314 | *(SORT(___kcrctab+*)) \ |
303 | VMLINUX_SYMBOL(__stop___kcrctab) = .; \ | 315 | VMLINUX_SYMBOL(__stop___kcrctab) = .; \ |
304 | } \ | 316 | } \ |
305 | \ | 317 | \ |
306 | /* Kernel symbol table: GPL-only symbols */ \ | 318 | /* Kernel symbol table: GPL-only symbols */ \ |
307 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ | 319 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ |
308 | VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ | 320 | VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ |
309 | *(__kcrctab_gpl) \ | 321 | *(SORT(___kcrctab_gpl+*)) \ |
310 | VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ | 322 | VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ |
311 | } \ | 323 | } \ |
312 | \ | 324 | \ |
313 | /* Kernel symbol table: Normal unused symbols */ \ | 325 | /* Kernel symbol table: Normal unused symbols */ \ |
314 | __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ | 326 | __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ |
315 | VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ | 327 | VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ |
316 | *(__kcrctab_unused) \ | 328 | *(SORT(___kcrctab_unused+*)) \ |
317 | VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ | 329 | VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ |
318 | } \ | 330 | } \ |
319 | \ | 331 | \ |
320 | /* Kernel symbol table: GPL-only unused symbols */ \ | 332 | /* Kernel symbol table: GPL-only unused symbols */ \ |
321 | __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ | 333 | __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ |
322 | VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ | 334 | VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ |
323 | *(__kcrctab_unused_gpl) \ | 335 | *(SORT(___kcrctab_unused_gpl+*)) \ |
324 | VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ | 336 | VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ |
325 | } \ | 337 | } \ |
326 | \ | 338 | \ |
327 | /* Kernel symbol table: GPL-future-only symbols */ \ | 339 | /* Kernel symbol table: GPL-future-only symbols */ \ |
328 | __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ | 340 | __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ |
329 | VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ | 341 | VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ |
330 | *(__kcrctab_gpl_future) \ | 342 | *(SORT(___kcrctab_gpl_future+*)) \ |
331 | VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ | 343 | VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ |
332 | } \ | 344 | } \ |
333 | \ | 345 | \ |
@@ -352,6 +364,13 @@ | |||
352 | VMLINUX_SYMBOL(__start___param) = .; \ | 364 | VMLINUX_SYMBOL(__start___param) = .; \ |
353 | *(__param) \ | 365 | *(__param) \ |
354 | VMLINUX_SYMBOL(__stop___param) = .; \ | 366 | VMLINUX_SYMBOL(__stop___param) = .; \ |
367 | } \ | ||
368 | \ | ||
369 | /* Built-in module versions. */ \ | ||
370 | __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ | ||
371 | VMLINUX_SYMBOL(__start___modver) = .; \ | ||
372 | *(__modver) \ | ||
373 | VMLINUX_SYMBOL(__stop___modver) = .; \ | ||
355 | . = ALIGN((align)); \ | 374 | . = ALIGN((align)); \ |
356 | VMLINUX_SYMBOL(__end_rodata) = .; \ | 375 | VMLINUX_SYMBOL(__end_rodata) = .; \ |
357 | } \ | 376 | } \ |
@@ -407,6 +426,12 @@ | |||
407 | *(.kprobes.text) \ | 426 | *(.kprobes.text) \ |
408 | VMLINUX_SYMBOL(__kprobes_text_end) = .; | 427 | VMLINUX_SYMBOL(__kprobes_text_end) = .; |
409 | 428 | ||
429 | #define ENTRY_TEXT \ | ||
430 | ALIGN_FUNCTION(); \ | ||
431 | VMLINUX_SYMBOL(__entry_text_start) = .; \ | ||
432 | *(.entry.text) \ | ||
433 | VMLINUX_SYMBOL(__entry_text_end) = .; | ||
434 | |||
410 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 435 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
411 | #define IRQENTRY_TEXT \ | 436 | #define IRQENTRY_TEXT \ |
412 | ALIGN_FUNCTION(); \ | 437 | ALIGN_FUNCTION(); \ |
@@ -463,9 +488,12 @@ | |||
463 | KERNEL_CTORS() \ | 488 | KERNEL_CTORS() \ |
464 | *(.init.rodata) \ | 489 | *(.init.rodata) \ |
465 | MCOUNT_REC() \ | 490 | MCOUNT_REC() \ |
491 | FTRACE_EVENTS() \ | ||
492 | TRACE_SYSCALLS() \ | ||
466 | DEV_DISCARD(init.rodata) \ | 493 | DEV_DISCARD(init.rodata) \ |
467 | CPU_DISCARD(init.rodata) \ | 494 | CPU_DISCARD(init.rodata) \ |
468 | MEM_DISCARD(init.rodata) | 495 | MEM_DISCARD(init.rodata) \ |
496 | KERNEL_DTB() | ||
469 | 497 | ||
470 | #define INIT_TEXT \ | 498 | #define INIT_TEXT \ |
471 | *(.init.text) \ | 499 | *(.init.text) \ |
@@ -626,10 +654,11 @@ | |||
626 | 654 | ||
627 | #ifdef CONFIG_BLK_DEV_INITRD | 655 | #ifdef CONFIG_BLK_DEV_INITRD |
628 | #define INIT_RAM_FS \ | 656 | #define INIT_RAM_FS \ |
629 | . = ALIGN(PAGE_SIZE); \ | 657 | . = ALIGN(4); \ |
630 | VMLINUX_SYMBOL(__initramfs_start) = .; \ | 658 | VMLINUX_SYMBOL(__initramfs_start) = .; \ |
631 | *(.init.ramfs) \ | 659 | *(.init.ramfs) \ |
632 | VMLINUX_SYMBOL(__initramfs_end) = .; | 660 | . = ALIGN(8); \ |
661 | *(.init.ramfs.info) | ||
633 | #else | 662 | #else |
634 | #define INIT_RAM_FS | 663 | #define INIT_RAM_FS |
635 | #endif | 664 | #endif |
@@ -653,14 +682,41 @@ | |||
653 | } | 682 | } |
654 | 683 | ||
655 | /** | 684 | /** |
685 | * PERCPU_INPUT - the percpu input sections | ||
686 | * @cacheline: cacheline size | ||
687 | * | ||
688 | * The core percpu section names and core symbols which do not rely | ||
689 | * directly upon load addresses. | ||
690 | * | ||
691 | * @cacheline is used to align subsections to avoid false cacheline | ||
692 | * sharing between subsections for different purposes. | ||
693 | */ | ||
694 | #define PERCPU_INPUT(cacheline) \ | ||
695 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | ||
696 | *(.data..percpu..first) \ | ||
697 | . = ALIGN(PAGE_SIZE); \ | ||
698 | *(.data..percpu..page_aligned) \ | ||
699 | . = ALIGN(cacheline); \ | ||
700 | *(.data..percpu..readmostly) \ | ||
701 | . = ALIGN(cacheline); \ | ||
702 | *(.data..percpu) \ | ||
703 | *(.data..percpu..shared_aligned) \ | ||
704 | VMLINUX_SYMBOL(__per_cpu_end) = .; | ||
705 | |||
706 | /** | ||
656 | * PERCPU_VADDR - define output section for percpu area | 707 | * PERCPU_VADDR - define output section for percpu area |
708 | * @cacheline: cacheline size | ||
657 | * @vaddr: explicit base address (optional) | 709 | * @vaddr: explicit base address (optional) |
658 | * @phdr: destination PHDR (optional) | 710 | * @phdr: destination PHDR (optional) |
659 | * | 711 | * |
660 | * Macro which expands to output section for percpu area. If @vaddr | 712 | * Macro which expands to output section for percpu area. |
661 | * is not blank, it specifies explicit base address and all percpu | 713 | * |
662 | * symbols will be offset from the given address. If blank, @vaddr | 714 | * @cacheline is used to align subsections to avoid false cacheline |
663 | * always equals @laddr + LOAD_OFFSET. | 715 | * sharing between subsections for different purposes. |
716 | * | ||
717 | * If @vaddr is not blank, it specifies explicit base address and all | ||
718 | * percpu symbols will be offset from the given address. If blank, | ||
719 | * @vaddr always equals @laddr + LOAD_OFFSET. | ||
664 | * | 720 | * |
665 | * @phdr defines the output PHDR to use if not blank. Be warned that | 721 | * @phdr defines the output PHDR to use if not blank. Be warned that |
666 | * output PHDR is sticky. If @phdr is specified, the next output | 722 | * output PHDR is sticky. If @phdr is specified, the next output |
@@ -669,44 +725,33 @@ | |||
669 | * | 725 | * |
670 | * Note that this macros defines __per_cpu_load as an absolute symbol. | 726 | * Note that this macros defines __per_cpu_load as an absolute symbol. |
671 | * If there is no need to put the percpu section at a predetermined | 727 | * If there is no need to put the percpu section at a predetermined |
672 | * address, use PERCPU(). | 728 | * address, use PERCPU_SECTION. |
673 | */ | 729 | */ |
674 | #define PERCPU_VADDR(vaddr, phdr) \ | 730 | #define PERCPU_VADDR(cacheline, vaddr, phdr) \ |
675 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | 731 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ |
676 | .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ | 732 | .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ |
677 | - LOAD_OFFSET) { \ | 733 | - LOAD_OFFSET) { \ |
678 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 734 | PERCPU_INPUT(cacheline) \ |
679 | *(.data..percpu..first) \ | ||
680 | *(.data..percpu..page_aligned) \ | ||
681 | *(.data..percpu) \ | ||
682 | *(.data..percpu..shared_aligned) \ | ||
683 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | ||
684 | } phdr \ | 735 | } phdr \ |
685 | . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); | 736 | . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); |
686 | 737 | ||
687 | /** | 738 | /** |
688 | * PERCPU - define output section for percpu area, simple version | 739 | * PERCPU_SECTION - define output section for percpu area, simple version |
689 | * @align: required alignment | 740 | * @cacheline: cacheline size |
690 | * | 741 | * |
691 | * Align to @align and outputs output section for percpu area. This | 742 | * Align to PAGE_SIZE and outputs output section for percpu area. This |
692 | * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and | 743 | * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and |
693 | * __per_cpu_start will be identical. | 744 | * __per_cpu_start will be identical. |
694 | * | 745 | * |
695 | * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except | 746 | * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,) |
696 | * that __per_cpu_load is defined as a relative symbol against | 747 | * except that __per_cpu_load is defined as a relative symbol against |
697 | * .data..percpu which is required for relocatable x86_32 | 748 | * .data..percpu which is required for relocatable x86_32 configuration. |
698 | * configuration. | ||
699 | */ | 749 | */ |
700 | #define PERCPU(align) \ | 750 | #define PERCPU_SECTION(cacheline) \ |
701 | . = ALIGN(align); \ | 751 | . = ALIGN(PAGE_SIZE); \ |
702 | .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ | 752 | .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ |
703 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | 753 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ |
704 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 754 | PERCPU_INPUT(cacheline) \ |
705 | *(.data..percpu..first) \ | ||
706 | *(.data..percpu..page_aligned) \ | ||
707 | *(.data..percpu) \ | ||
708 | *(.data..percpu..shared_aligned) \ | ||
709 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | ||
710 | } | 755 | } |
711 | 756 | ||
712 | 757 | ||
@@ -725,7 +770,7 @@ | |||
725 | * the sections that has this restriction (or similar) | 770 | * the sections that has this restriction (or similar) |
726 | * is located before the ones requiring PAGE_SIZE alignment. | 771 | * is located before the ones requiring PAGE_SIZE alignment. |
727 | * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which | 772 | * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which |
728 | * matches the requirment of PAGE_ALIGNED_DATA. | 773 | * matches the requirement of PAGE_ALIGNED_DATA. |
729 | * | 774 | * |
730 | * use 0 as page_align if page_aligned data is not used */ | 775 | * use 0 as page_align if page_aligned data is not used */ |
731 | #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ | 776 | #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ |