diff options
author | Sam Ravnborg <sam@ravnborg.org> | 2007-10-18 03:04:25 -0400 |
---|---|---|
committer | Kyle McMartin <kyle@shortfin.cabal.ca> | 2007-10-18 03:59:12 -0400 |
commit | be1b3d8cb141c0705d61af2e2372d72ff16c7d04 (patch) | |
tree | 37ba423c1929ebd9c102390aac9c9398dde8fb45 /arch/parisc/kernel/vmlinux.lds.S | |
parent | e9a03990d90ac5006f37f3ff7a6b87966d208697 (diff) |
[PARISC] Beautify parisc vmlinux.lds.S
Introduce a consistent layout of vmlinux.
The same layout has been introduced for most
architectures.
And the same time move a few label definitions inside
the curly brackets so they are assigned the correct
starting address. Before a ld inserted alignment
would have casued the label to pint before the actual
start of the section.
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
Diffstat (limited to 'arch/parisc/kernel/vmlinux.lds.S')
-rw-r--r-- | arch/parisc/kernel/vmlinux.lds.S | 319 |
1 files changed, 185 insertions, 134 deletions
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index ee7a16eb6fdd..d6951bb5a9c9 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S | |||
@@ -46,168 +46,219 @@ jiffies = jiffies_64; | |||
46 | #endif | 46 | #endif |
47 | SECTIONS | 47 | SECTIONS |
48 | { | 48 | { |
49 | . = KERNEL_BINARY_TEXT_START; | ||
49 | 50 | ||
50 | . = KERNEL_BINARY_TEXT_START; | 51 | _text = .; /* Text and read-only data */ |
51 | 52 | .text ALIGN(16) : { | |
52 | _text = .; /* Text and read-only data */ | 53 | TEXT_TEXT |
53 | .text ALIGN(16) : { | 54 | SCHED_TEXT |
54 | TEXT_TEXT | 55 | LOCK_TEXT |
55 | SCHED_TEXT | 56 | *(.text.do_softirq) |
56 | LOCK_TEXT | 57 | *(.text.sys_exit) |
57 | *(.text.do_softirq) | 58 | *(.text.do_sigaltstack) |
58 | *(.text.sys_exit) | 59 | *(.text.do_fork) |
59 | *(.text.do_sigaltstack) | 60 | *(.text.*) |
60 | *(.text.do_fork) | 61 | *(.fixup) |
61 | *(.text.*) | 62 | *(.lock.text) /* out-of-line lock text */ |
62 | *(.fixup) | 63 | *(.gnu.warning) |
63 | *(.lock.text) /* out-of-line lock text */ | ||
64 | *(.gnu.warning) | ||
65 | } = 0 | 64 | } = 0 |
65 | /* End of text section */ | ||
66 | _etext = .; | ||
66 | 67 | ||
67 | _etext = .; /* End of text section */ | 68 | RODATA |
69 | BUG_TABLE | ||
68 | 70 | ||
69 | RODATA | 71 | /* writeable */ |
70 | 72 | /* Make sure this is page aligned so | |
71 | BUG_TABLE | 73 | * that we can properly leave these |
72 | 74 | * as writable | |
73 | /* writeable */ | 75 | */ |
74 | . = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so | 76 | . = ALIGN(ASM_PAGE_SIZE); |
75 | that we can properly leave these | 77 | data_start = .; |
76 | as writable */ | 78 | . = ALIGN(16); |
77 | data_start = .; | 79 | /* Exception table */ |
78 | 80 | __ex_table : { | |
79 | . = ALIGN(16); /* Exception table */ | 81 | __start___ex_table = .; |
80 | __start___ex_table = .; | 82 | *(__ex_table) |
81 | __ex_table : { *(__ex_table) } | 83 | __stop___ex_table = .; |
82 | __stop___ex_table = .; | 84 | } |
83 | 85 | ||
84 | NOTES | 86 | NOTES |
85 | 87 | ||
86 | __start___unwind = .; /* unwind info */ | 88 | /* unwind info */ |
87 | .PARISC.unwind : { *(.PARISC.unwind) } | 89 | .PARISC.unwind : { |
88 | __stop___unwind = .; | 90 | __start___unwind = .; |
91 | *(.PARISC.unwind) | ||
92 | __stop___unwind = .; | ||
93 | } | ||
89 | 94 | ||
90 | /* rarely changed data like cpu maps */ | 95 | /* rarely changed data like cpu maps */ |
91 | . = ALIGN(16); | 96 | . = ALIGN(16); |
92 | .data.read_mostly : { *(.data.read_mostly) } | 97 | .data.read_mostly : { |
98 | *(.data.read_mostly) | ||
99 | } | ||
93 | 100 | ||
94 | . = ALIGN(L1_CACHE_BYTES); | 101 | . = ALIGN(L1_CACHE_BYTES); |
95 | .data : { /* Data */ | 102 | /* Data */ |
96 | DATA_DATA | 103 | .data : { |
97 | CONSTRUCTORS | 104 | DATA_DATA |
105 | CONSTRUCTORS | ||
98 | } | 106 | } |
99 | 107 | ||
100 | . = ALIGN(L1_CACHE_BYTES); | 108 | . = ALIGN(L1_CACHE_BYTES); |
101 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | 109 | .data.cacheline_aligned : { |
110 | *(.data.cacheline_aligned) | ||
111 | } | ||
102 | 112 | ||
103 | /* PA-RISC locks requires 16-byte alignment */ | 113 | /* PA-RISC locks requires 16-byte alignment */ |
104 | . = ALIGN(16); | 114 | . = ALIGN(16); |
105 | .data.lock_aligned : { *(.data.lock_aligned) } | 115 | .data.lock_aligned : { |
116 | *(.data.lock_aligned) | ||
117 | } | ||
106 | 118 | ||
107 | . = ALIGN(ASM_PAGE_SIZE); | 119 | /* nosave data is really only used for software suspend...it's here |
108 | /* nosave data is really only used for software suspend...it's here | 120 | * just in case we ever implement it |
109 | * just in case we ever implement it */ | 121 | */ |
110 | __nosave_begin = .; | 122 | . = ALIGN(ASM_PAGE_SIZE); |
111 | .data_nosave : { *(.data.nosave) } | 123 | __nosave_begin = .; |
112 | . = ALIGN(ASM_PAGE_SIZE); | 124 | .data_nosave : { |
113 | __nosave_end = .; | 125 | *(.data.nosave) |
126 | } | ||
127 | . = ALIGN(ASM_PAGE_SIZE); | ||
128 | __nosave_end = .; | ||
114 | 129 | ||
115 | _edata = .; /* End of data section */ | 130 | /* End of data section */ |
131 | _edata = .; | ||
116 | 132 | ||
117 | __bss_start = .; /* BSS */ | 133 | /* BSS */ |
118 | /* page table entries need to be PAGE_SIZE aligned */ | 134 | __bss_start = .; |
119 | . = ALIGN(ASM_PAGE_SIZE); | 135 | /* page table entries need to be PAGE_SIZE aligned */ |
120 | .data.vmpages : { | 136 | . = ALIGN(ASM_PAGE_SIZE); |
121 | *(.data.vm0.pmd) | 137 | .data.vmpages : { |
122 | *(.data.vm0.pgd) | 138 | *(.data.vm0.pmd) |
123 | *(.data.vm0.pte) | 139 | *(.data.vm0.pgd) |
140 | *(.data.vm0.pte) | ||
141 | } | ||
142 | .bss : { | ||
143 | *(.bss) | ||
144 | *(COMMON) | ||
124 | } | 145 | } |
125 | .bss : { *(.bss) *(COMMON) } | 146 | __bss_stop = .; |
126 | __bss_stop = .; | ||
127 | 147 | ||
128 | 148 | ||
129 | /* assembler code expects init_task to be 16k aligned */ | 149 | /* assembler code expects init_task to be 16k aligned */ |
130 | . = ALIGN(16384); /* init_task */ | 150 | . = ALIGN(16384); |
131 | .data.init_task : { *(.data.init_task) } | 151 | /* init_task */ |
152 | .data.init_task : { | ||
153 | *(.data.init_task) | ||
154 | } | ||
132 | 155 | ||
133 | /* The interrupt stack is currently partially coded, but not yet | 156 | /* The interrupt stack is currently partially coded, but not yet |
134 | * implemented */ | 157 | * implemented |
135 | . = ALIGN(16384); | 158 | */ |
136 | init_istack : { *(init_istack) } | 159 | . = ALIGN(16384); |
160 | init_istack : { | ||
161 | *(init_istack) | ||
162 | } | ||
137 | 163 | ||
138 | #ifdef CONFIG_64BIT | 164 | #ifdef CONFIG_64BIT |
139 | . = ALIGN(16); /* Linkage tables */ | 165 | . = ALIGN(16); |
140 | .opd : { *(.opd) } PROVIDE (__gp = .); | 166 | /* Linkage tables */ |
141 | .plt : { *(.plt) } | 167 | .opd : { |
142 | .dlt : { *(.dlt) } | 168 | *(.opd) |
169 | } PROVIDE (__gp = .); | ||
170 | .plt : { | ||
171 | *(.plt) | ||
172 | } | ||
173 | .dlt : { | ||
174 | *(.dlt) | ||
175 | } | ||
143 | #endif | 176 | #endif |
144 | 177 | ||
145 | /* reserve space for interrupt stack by aligning __init* to 16k */ | 178 | /* reserve space for interrupt stack by aligning __init* to 16k */ |
146 | . = ALIGN(16384); | 179 | . = ALIGN(16384); |
147 | __init_begin = .; | 180 | __init_begin = .; |
148 | .init.text : { | 181 | .init.text : { |
149 | _sinittext = .; | 182 | _sinittext = .; |
150 | *(.init.text) | 183 | *(.init.text) |
151 | _einittext = .; | 184 | _einittext = .; |
152 | } | 185 | } |
153 | .init.data : { *(.init.data) } | 186 | .init.data : { |
154 | . = ALIGN(16); | 187 | *(.init.data) |
155 | __setup_start = .; | 188 | } |
156 | .init.setup : { *(.init.setup) } | 189 | . = ALIGN(16); |
157 | __setup_end = .; | 190 | .init.setup : { |
158 | __initcall_start = .; | 191 | __setup_start = .; |
159 | .initcall.init : { | 192 | *(.init.setup) |
160 | INITCALLS | 193 | __setup_end = .; |
161 | } | 194 | } |
162 | __initcall_end = .; | 195 | .initcall.init : { |
163 | __con_initcall_start = .; | 196 | __initcall_start = .; |
164 | .con_initcall.init : { *(.con_initcall.init) } | 197 | INITCALLS |
165 | __con_initcall_end = .; | 198 | __initcall_end = .; |
166 | SECURITY_INIT | 199 | } |
167 | /* alternate instruction replacement. This is a mechanism x86 uses | 200 | .con_initcall.init : { |
168 | * to detect the CPU type and replace generic instruction sequences | 201 | __con_initcall_start = .; |
169 | * with CPU specific ones. We don't currently do this in PA, but | 202 | *(.con_initcall.init) |
170 | * it seems like a good idea... */ | 203 | __con_initcall_end = .; |
171 | . = ALIGN(4); | 204 | } |
172 | __alt_instructions = .; | 205 | SECURITY_INIT |
173 | .altinstructions : { *(.altinstructions) } | 206 | |
174 | __alt_instructions_end = .; | 207 | /* alternate instruction replacement. This is a mechanism x86 uses |
175 | .altinstr_replacement : { *(.altinstr_replacement) } | 208 | * to detect the CPU type and replace generic instruction sequences |
176 | /* .exit.text is discard at runtime, not link time, to deal with references | 209 | * with CPU specific ones. We don't currently do this in PA, but |
177 | from .altinstructions and .eh_frame */ | 210 | * it seems like a good idea... |
178 | .exit.text : { *(.exit.text) } | 211 | */ |
179 | .exit.data : { *(.exit.data) } | 212 | . = ALIGN(4); |
213 | .altinstructions : { | ||
214 | __alt_instructions = .; | ||
215 | *(.altinstructions) | ||
216 | __alt_instructions_end = .; | ||
217 | } | ||
218 | .altinstr_replacement : { | ||
219 | *(.altinstr_replacement) | ||
220 | } | ||
221 | |||
222 | /* .exit.text is discard at runtime, not link time, to deal with references | ||
223 | * from .altinstructions and .eh_frame | ||
224 | */ | ||
225 | .exit.text : { | ||
226 | *(.exit.text) | ||
227 | } | ||
228 | .exit.data : { | ||
229 | *(.exit.data) | ||
230 | } | ||
180 | #ifdef CONFIG_BLK_DEV_INITRD | 231 | #ifdef CONFIG_BLK_DEV_INITRD |
181 | . = ALIGN(ASM_PAGE_SIZE); | 232 | . = ALIGN(ASM_PAGE_SIZE); |
182 | __initramfs_start = .; | 233 | .init.ramfs : { |
183 | .init.ramfs : { *(.init.ramfs) } | 234 | __initramfs_start = .; |
184 | __initramfs_end = .; | 235 | *(.init.ramfs) |
236 | __initramfs_end = .; | ||
237 | } | ||
185 | #endif | 238 | #endif |
186 | 239 | ||
187 | PERCPU(ASM_PAGE_SIZE) | 240 | PERCPU(ASM_PAGE_SIZE) |
241 | . = ALIGN(ASM_PAGE_SIZE); | ||
242 | __init_end = .; | ||
243 | /* freed after init ends here */ | ||
244 | _end = . ; | ||
188 | 245 | ||
189 | . = ALIGN(ASM_PAGE_SIZE); | 246 | /* Sections to be discarded */ |
190 | __init_end = .; | 247 | /DISCARD/ : { |
191 | /* freed after init ends here */ | 248 | *(.exitcall.exit) |
192 | |||
193 | _end = . ; | ||
194 | |||
195 | /* Sections to be discarded */ | ||
196 | /DISCARD/ : { | ||
197 | *(.exitcall.exit) | ||
198 | #ifdef CONFIG_64BIT | 249 | #ifdef CONFIG_64BIT |
199 | /* temporary hack until binutils is fixed to not emit these | 250 | /* temporary hack until binutils is fixed to not emit these |
200 | for static binaries */ | 251 | * for static binaries |
201 | *(.interp) | 252 | */ |
202 | *(.dynsym) | 253 | *(.interp) |
203 | *(.dynstr) | 254 | *(.dynsym) |
204 | *(.dynamic) | 255 | *(.dynstr) |
205 | *(.hash) | 256 | *(.dynamic) |
206 | *(.gnu.hash) | 257 | *(.hash) |
258 | *(.gnu.hash) | ||
207 | #endif | 259 | #endif |
208 | } | 260 | } |
209 | 261 | ||
210 | STABS_DEBUG | 262 | STABS_DEBUG |
211 | .note 0 : { *(.note) } | 263 | .note 0 : { *(.note) } |
212 | |||
213 | } | 264 | } |