aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/vmlinux.lds.h
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2008-01-20 14:07:28 -0500
committerSam Ravnborg <sam@ravnborg.org>2008-01-28 17:21:17 -0500
commiteb8f689046b857874e964463619f09df06d59fad (patch)
treeec726cd06764746a07689ede3b782c36a24d3e55 /include/asm-generic/vmlinux.lds.h
parentf3fe866d59d707c7a2bba0b23add078e19edb3dc (diff)
Use separate sections for __dev/__cpu/__mem code/data
Introducing separate sections for __dev* (HOTPLUG), __cpu* (HOTPLUG_CPU) and __mem* (MEMORY_HOTPLUG) allows us to do a much more reliable Section mismatch check in modpost. We are no longer dependent on the actual configuration of for example HOTPLUG. This has the effect that all users see much more Section mismatch warnings than before because they were almost all hidden when HOTPLUG was enabled. The advantage of this is that when building a piece of code then it is much more likely that the Section mismatch errors are spotted and the warnings will be felt less random of nature. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Cc: Greg KH <greg@kroah.com> Cc: Randy Dunlap <randy.dunlap@oracle.com> Cc: Adrian Bunk <bunk@kernel.org>
Diffstat (limited to 'include/asm-generic/vmlinux.lds.h')
-rw-r--r--include/asm-generic/vmlinux.lds.h88
1 files changed, 82 insertions, 6 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ae0166e83490..e0a56fb8f813 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -9,10 +9,46 @@
9/* Align . to a 8 byte boundary equals to maximum function alignment. */ 9/* Align . to a 8 byte boundary equals to maximum function alignment. */
10#define ALIGN_FUNCTION() . = ALIGN(8) 10#define ALIGN_FUNCTION() . = ALIGN(8)
11 11
12/* The actual configuration determine if the init/exit sections
13 * are handled as text/data or they can be discarded (which
14 * often happens at runtime)
15 */
16#ifdef CONFIG_HOTPLUG
17#define DEV_KEEP(sec) *(.dev##sec)
18#define DEV_DISCARD(sec)
19#else
20#define DEV_KEEP(sec)
21#define DEV_DISCARD(sec) *(.dev##sec)
22#endif
23
24#ifdef CONFIG_HOTPLUG_CPU
25#define CPU_KEEP(sec) *(.cpu##sec)
26#define CPU_DISCARD(sec)
27#else
28#define CPU_KEEP(sec)
29#define CPU_DISCARD(sec) *(.cpu##sec)
30#endif
31
32#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) \
33 || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
34#define MEM_KEEP(sec) *(.mem##sec)
35#define MEM_DISCARD(sec)
36#else
37#define MEM_KEEP(sec)
38#define MEM_DISCARD(sec) *(.mem##sec)
39#endif
40
41
12/* .data section */ 42/* .data section */
13#define DATA_DATA \ 43#define DATA_DATA \
14 *(.data) \ 44 *(.data) \
15 *(.data.init.refok) \ 45 *(.data.init.refok) \
46 DEV_KEEP(init.data) \
47 DEV_KEEP(exit.data) \
48 CPU_KEEP(init.data) \
49 CPU_KEEP(exit.data) \
50 MEM_KEEP(init.data) \
51 MEM_KEEP(exit.data) \
16 . = ALIGN(8); \ 52 . = ALIGN(8); \
17 VMLINUX_SYMBOL(__start___markers) = .; \ 53 VMLINUX_SYMBOL(__start___markers) = .; \
18 *(__markers) \ 54 *(__markers) \
@@ -132,6 +168,16 @@
132 *(__ksymtab_strings) \ 168 *(__ksymtab_strings) \
133 } \ 169 } \
134 \ 170 \
171 /* __*init sections */ \
172 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
173 DEV_KEEP(init.rodata) \
174 DEV_KEEP(exit.rodata) \
175 CPU_KEEP(init.rodata) \
176 CPU_KEEP(exit.rodata) \
177 MEM_KEEP(init.rodata) \
178 MEM_KEEP(exit.rodata) \
179 } \
180 \
135 /* Built-in module parameters. */ \ 181 /* Built-in module parameters. */ \
136 __param : AT(ADDR(__param) - LOAD_OFFSET) { \ 182 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
137 VMLINUX_SYMBOL(__start___param) = .; \ 183 VMLINUX_SYMBOL(__start___param) = .; \
@@ -139,7 +185,6 @@
139 VMLINUX_SYMBOL(__stop___param) = .; \ 185 VMLINUX_SYMBOL(__stop___param) = .; \
140 VMLINUX_SYMBOL(__end_rodata) = .; \ 186 VMLINUX_SYMBOL(__end_rodata) = .; \
141 } \ 187 } \
142 \
143 . = ALIGN((align)); 188 . = ALIGN((align));
144 189
145/* RODATA provided for backward compatibility. 190/* RODATA provided for backward compatibility.
@@ -159,7 +204,14 @@
159 ALIGN_FUNCTION(); \ 204 ALIGN_FUNCTION(); \
160 *(.text) \ 205 *(.text) \
161 *(.text.init.refok) \ 206 *(.text.init.refok) \
162 *(.exit.text.refok) 207 *(.exit.text.refok) \
208 DEV_KEEP(init.text) \
209 DEV_KEEP(exit.text) \
210 CPU_KEEP(init.text) \
211 CPU_KEEP(exit.text) \
212 MEM_KEEP(init.text) \
213 MEM_KEEP(exit.text)
214
163 215
164/* sched.text is aling to function alignment to secure we have same 216/* sched.text is aling to function alignment to secure we have same
165 * address even at second ld pass when generating System.map */ 217 * address even at second ld pass when generating System.map */
@@ -184,11 +236,35 @@
184 VMLINUX_SYMBOL(__kprobes_text_end) = .; 236 VMLINUX_SYMBOL(__kprobes_text_end) = .;
185 237
186/* init and exit section handling */ 238/* init and exit section handling */
187#define INIT_TEXT *(.init.text) 239#define INIT_DATA \
188#define INIT_DATA *(.init.data) 240 *(.init.data) \
189#define EXIT_TEXT *(.exit.text) 241 DEV_DISCARD(init.data) \
190#define EXIT_DATA *(.exit.data) 242 DEV_DISCARD(init.rodata) \
243 CPU_DISCARD(init.data) \
244 CPU_DISCARD(init.rodata) \
245 MEM_DISCARD(init.data) \
246 MEM_DISCARD(init.rodata)
247
248#define INIT_TEXT \
249 *(.init.text) \
250 DEV_DISCARD(init.text) \
251 CPU_DISCARD(init.text) \
252 MEM_DISCARD(init.text)
253
254#define EXIT_DATA \
255 *(.exit.data) \
256 DEV_DISCARD(exit.data) \
257 DEV_DISCARD(exit.rodata) \
258 CPU_DISCARD(exit.data) \
259 CPU_DISCARD(exit.rodata) \
260 MEM_DISCARD(exit.data) \
261 MEM_DISCARD(exit.rodata)
191 262
263#define EXIT_TEXT \
264 *(.exit.text) \
265 DEV_DISCARD(exit.text) \
266 CPU_DISCARD(exit.text) \
267 MEM_DISCARD(exit.text)
192 268
193 /* DWARF debug sections. 269 /* DWARF debug sections.
194 Symbols in the DWARF debugging sections are relative to 270 Symbols in the DWARF debugging sections are relative to