diff options
Diffstat (limited to 'arch/x86/kernel/vmlinux.lds.S')
-rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 95 |
1 files changed, 68 insertions, 27 deletions
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index d0bb52296fa3..89aed99aafce 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -69,7 +69,7 @@ jiffies_64 = jiffies; | |||
69 | 69 | ||
70 | PHDRS { | 70 | PHDRS { |
71 | text PT_LOAD FLAGS(5); /* R_E */ | 71 | text PT_LOAD FLAGS(5); /* R_E */ |
72 | data PT_LOAD FLAGS(7); /* RWE */ | 72 | data PT_LOAD FLAGS(6); /* RW_ */ |
73 | #ifdef CONFIG_X86_64 | 73 | #ifdef CONFIG_X86_64 |
74 | user PT_LOAD FLAGS(5); /* R_E */ | 74 | user PT_LOAD FLAGS(5); /* R_E */ |
75 | #ifdef CONFIG_SMP | 75 | #ifdef CONFIG_SMP |
@@ -105,6 +105,7 @@ SECTIONS | |||
105 | SCHED_TEXT | 105 | SCHED_TEXT |
106 | LOCK_TEXT | 106 | LOCK_TEXT |
107 | KPROBES_TEXT | 107 | KPROBES_TEXT |
108 | ENTRY_TEXT | ||
108 | IRQENTRY_TEXT | 109 | IRQENTRY_TEXT |
109 | *(.fixup) | 110 | *(.fixup) |
110 | *(.gnu.warning) | 111 | *(.gnu.warning) |
@@ -116,6 +117,10 @@ SECTIONS | |||
116 | 117 | ||
117 | EXCEPTION_TABLE(16) :text = 0x9090 | 118 | EXCEPTION_TABLE(16) :text = 0x9090 |
118 | 119 | ||
120 | #if defined(CONFIG_DEBUG_RODATA) | ||
121 | /* .text should occupy whole number of pages */ | ||
122 | . = ALIGN(PAGE_SIZE); | ||
123 | #endif | ||
119 | X64_ALIGN_DEBUG_RODATA_BEGIN | 124 | X64_ALIGN_DEBUG_RODATA_BEGIN |
120 | RO_DATA(PAGE_SIZE) | 125 | RO_DATA(PAGE_SIZE) |
121 | X64_ALIGN_DEBUG_RODATA_END | 126 | X64_ALIGN_DEBUG_RODATA_END |
@@ -156,6 +161,12 @@ SECTIONS | |||
156 | 161 | ||
157 | #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) | 162 | #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) |
158 | #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) | 163 | #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) |
164 | #define EMIT_VVAR(x, offset) .vsyscall_var_ ## x \ | ||
165 | ADDR(.vsyscall_0) + offset \ | ||
166 | : AT(VLOAD(.vsyscall_var_ ## x)) { \ | ||
167 | *(.vsyscall_var_ ## x) \ | ||
168 | } \ | ||
169 | x = VVIRT(.vsyscall_var_ ## x); | ||
159 | 170 | ||
160 | . = ALIGN(4096); | 171 | . = ALIGN(4096); |
161 | __vsyscall_0 = .; | 172 | __vsyscall_0 = .; |
@@ -170,18 +181,6 @@ SECTIONS | |||
170 | *(.vsyscall_fn) | 181 | *(.vsyscall_fn) |
171 | } | 182 | } |
172 | 183 | ||
173 | . = ALIGN(L1_CACHE_BYTES); | ||
174 | .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) { | ||
175 | *(.vsyscall_gtod_data) | ||
176 | } | ||
177 | |||
178 | vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data); | ||
179 | .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) { | ||
180 | *(.vsyscall_clock) | ||
181 | } | ||
182 | vsyscall_clock = VVIRT(.vsyscall_clock); | ||
183 | |||
184 | |||
185 | .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { | 184 | .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { |
186 | *(.vsyscall_1) | 185 | *(.vsyscall_1) |
187 | } | 186 | } |
@@ -189,21 +188,14 @@ SECTIONS | |||
189 | *(.vsyscall_2) | 188 | *(.vsyscall_2) |
190 | } | 189 | } |
191 | 190 | ||
192 | .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { | ||
193 | *(.vgetcpu_mode) | ||
194 | } | ||
195 | vgetcpu_mode = VVIRT(.vgetcpu_mode); | ||
196 | |||
197 | . = ALIGN(L1_CACHE_BYTES); | ||
198 | .jiffies : AT(VLOAD(.jiffies)) { | ||
199 | *(.jiffies) | ||
200 | } | ||
201 | jiffies = VVIRT(.jiffies); | ||
202 | |||
203 | .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { | 191 | .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { |
204 | *(.vsyscall_3) | 192 | *(.vsyscall_3) |
205 | } | 193 | } |
206 | 194 | ||
195 | #define __VVAR_KERNEL_LDS | ||
196 | #include <asm/vvar.h> | ||
197 | #undef __VVAR_KERNEL_LDS | ||
198 | |||
207 | . = __vsyscall_0 + PAGE_SIZE; | 199 | . = __vsyscall_0 + PAGE_SIZE; |
208 | 200 | ||
209 | #undef VSYSCALL_ADDR | 201 | #undef VSYSCALL_ADDR |
@@ -211,6 +203,7 @@ SECTIONS | |||
211 | #undef VLOAD | 203 | #undef VLOAD |
212 | #undef VVIRT_OFFSET | 204 | #undef VVIRT_OFFSET |
213 | #undef VVIRT | 205 | #undef VVIRT |
206 | #undef EMIT_VVAR | ||
214 | 207 | ||
215 | #endif /* CONFIG_X86_64 */ | 208 | #endif /* CONFIG_X86_64 */ |
216 | 209 | ||
@@ -226,7 +219,7 @@ SECTIONS | |||
226 | * output PHDR, so the next output section - .init.text - should | 219 | * output PHDR, so the next output section - .init.text - should |
227 | * start another segment - init. | 220 | * start another segment - init. |
228 | */ | 221 | */ |
229 | PERCPU_VADDR(0, :percpu) | 222 | PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) |
230 | #endif | 223 | #endif |
231 | 224 | ||
232 | INIT_TEXT_SECTION(PAGE_SIZE) | 225 | INIT_TEXT_SECTION(PAGE_SIZE) |
@@ -236,12 +229,30 @@ SECTIONS | |||
236 | 229 | ||
237 | INIT_DATA_SECTION(16) | 230 | INIT_DATA_SECTION(16) |
238 | 231 | ||
232 | /* | ||
233 | * Code and data for a variety of lowlevel trampolines, to be | ||
234 | * copied into base memory (< 1 MiB) during initialization. | ||
235 | * Since it is copied early, the main copy can be discarded | ||
236 | * afterwards. | ||
237 | */ | ||
238 | .x86_trampoline : AT(ADDR(.x86_trampoline) - LOAD_OFFSET) { | ||
239 | x86_trampoline_start = .; | ||
240 | *(.x86_trampoline) | ||
241 | x86_trampoline_end = .; | ||
242 | } | ||
243 | |||
239 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { | 244 | .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { |
240 | __x86_cpu_dev_start = .; | 245 | __x86_cpu_dev_start = .; |
241 | *(.x86_cpu_dev.init) | 246 | *(.x86_cpu_dev.init) |
242 | __x86_cpu_dev_end = .; | 247 | __x86_cpu_dev_end = .; |
243 | } | 248 | } |
244 | 249 | ||
250 | /* | ||
251 | * start address and size of operations which during runtime | ||
252 | * can be patched with virtualization friendly instructions or | ||
253 | * baremetal native ones. Think page table operations. | ||
254 | * Details in paravirt_types.h | ||
255 | */ | ||
245 | . = ALIGN(8); | 256 | . = ALIGN(8); |
246 | .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { | 257 | .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { |
247 | __parainstructions = .; | 258 | __parainstructions = .; |
@@ -249,6 +260,11 @@ SECTIONS | |||
249 | __parainstructions_end = .; | 260 | __parainstructions_end = .; |
250 | } | 261 | } |
251 | 262 | ||
263 | /* | ||
264 | * struct alt_inst entries. From the header (alternative.h): | ||
265 | * "Alternative instructions for different CPU types or capabilities" | ||
266 | * Think locking instructions on spinlocks. | ||
267 | */ | ||
252 | . = ALIGN(8); | 268 | . = ALIGN(8); |
253 | .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { | 269 | .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { |
254 | __alt_instructions = .; | 270 | __alt_instructions = .; |
@@ -256,11 +272,36 @@ SECTIONS | |||
256 | __alt_instructions_end = .; | 272 | __alt_instructions_end = .; |
257 | } | 273 | } |
258 | 274 | ||
275 | /* | ||
276 | * And here are the replacement instructions. The linker sticks | ||
277 | * them as binary blobs. The .altinstructions has enough data to | ||
278 | * get the address and the length of them to patch the kernel safely. | ||
279 | */ | ||
259 | .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { | 280 | .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) { |
260 | *(.altinstr_replacement) | 281 | *(.altinstr_replacement) |
261 | } | 282 | } |
262 | 283 | ||
263 | /* | 284 | /* |
285 | * struct iommu_table_entry entries are injected in this section. | ||
286 | * It is an array of IOMMUs which during run time gets sorted depending | ||
287 | * on its dependency order. After rootfs_initcall is complete | ||
288 | * this section can be safely removed. | ||
289 | */ | ||
290 | .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { | ||
291 | __iommu_table = .; | ||
292 | *(.iommu_table) | ||
293 | __iommu_table_end = .; | ||
294 | } | ||
295 | |||
296 | . = ALIGN(8); | ||
297 | .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) { | ||
298 | __apicdrivers = .; | ||
299 | *(.apicdrivers); | ||
300 | __apicdrivers_end = .; | ||
301 | } | ||
302 | |||
303 | . = ALIGN(8); | ||
304 | /* | ||
264 | * .exit.text is discard at runtime, not link time, to deal with | 305 | * .exit.text is discard at runtime, not link time, to deal with |
265 | * references from .altinstructions and .eh_frame | 306 | * references from .altinstructions and .eh_frame |
266 | */ | 307 | */ |
@@ -273,7 +314,7 @@ SECTIONS | |||
273 | } | 314 | } |
274 | 315 | ||
275 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) | 316 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) |
276 | PERCPU(PAGE_SIZE) | 317 | PERCPU_SECTION(INTERNODE_CACHE_BYTES) |
277 | #endif | 318 | #endif |
278 | 319 | ||
279 | . = ALIGN(PAGE_SIZE); | 320 | . = ALIGN(PAGE_SIZE); |
@@ -307,7 +348,7 @@ SECTIONS | |||
307 | __bss_start = .; | 348 | __bss_start = .; |
308 | *(.bss..page_aligned) | 349 | *(.bss..page_aligned) |
309 | *(.bss) | 350 | *(.bss) |
310 | . = ALIGN(4); | 351 | . = ALIGN(PAGE_SIZE); |
311 | __bss_stop = .; | 352 | __bss_stop = .; |
312 | } | 353 | } |
313 | 354 | ||