diff options
Diffstat (limited to 'arch/x86/kernel/vmlinux.lds.S')
-rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 142 |
1 files changed, 55 insertions, 87 deletions
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 59f31d2dd435..9fc178255c04 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -46,11 +46,10 @@ PHDRS { | |||
46 | data PT_LOAD FLAGS(7); /* RWE */ | 46 | data PT_LOAD FLAGS(7); /* RWE */ |
47 | #ifdef CONFIG_X86_64 | 47 | #ifdef CONFIG_X86_64 |
48 | user PT_LOAD FLAGS(7); /* RWE */ | 48 | user PT_LOAD FLAGS(7); /* RWE */ |
49 | data.init PT_LOAD FLAGS(7); /* RWE */ | ||
50 | #ifdef CONFIG_SMP | 49 | #ifdef CONFIG_SMP |
51 | percpu PT_LOAD FLAGS(7); /* RWE */ | 50 | percpu PT_LOAD FLAGS(7); /* RWE */ |
52 | #endif | 51 | #endif |
53 | data.init2 PT_LOAD FLAGS(7); /* RWE */ | 52 | init PT_LOAD FLAGS(7); /* RWE */ |
54 | #endif | 53 | #endif |
55 | note PT_NOTE FLAGS(0); /* ___ */ | 54 | note PT_NOTE FLAGS(0); /* ___ */ |
56 | } | 55 | } |
@@ -103,65 +102,43 @@ SECTIONS | |||
103 | __stop___ex_table = .; | 102 | __stop___ex_table = .; |
104 | } :text = 0x9090 | 103 | } :text = 0x9090 |
105 | 104 | ||
106 | RODATA | 105 | RO_DATA(PAGE_SIZE) |
107 | 106 | ||
108 | /* Data */ | 107 | /* Data */ |
109 | . = ALIGN(PAGE_SIZE); | ||
110 | .data : AT(ADDR(.data) - LOAD_OFFSET) { | 108 | .data : AT(ADDR(.data) - LOAD_OFFSET) { |
111 | /* Start of data section */ | 109 | /* Start of data section */ |
112 | _sdata = .; | 110 | _sdata = .; |
113 | DATA_DATA | 111 | |
114 | CONSTRUCTORS | 112 | /* init_task */ |
115 | } :data | 113 | INIT_TASK_DATA(THREAD_SIZE) |
116 | 114 | ||
117 | #ifdef CONFIG_X86_32 | 115 | #ifdef CONFIG_X86_32 |
118 | /* 32 bit has nosave before _edata */ | 116 | /* 32 bit has nosave before _edata */ |
119 | . = ALIGN(PAGE_SIZE); | 117 | NOSAVE_DATA |
120 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { | ||
121 | __nosave_begin = .; | ||
122 | *(.data.nosave) | ||
123 | . = ALIGN(PAGE_SIZE); | ||
124 | __nosave_end = .; | ||
125 | } | ||
126 | #endif | 118 | #endif |
127 | 119 | ||
128 | . = ALIGN(PAGE_SIZE); | 120 | PAGE_ALIGNED_DATA(PAGE_SIZE) |
129 | .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) { | ||
130 | *(.data.page_aligned) | ||
131 | *(.data.idt) | 121 | *(.data.idt) |
132 | } | ||
133 | 122 | ||
134 | #ifdef CONFIG_X86_32 | 123 | CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES) |
135 | . = ALIGN(32); | ||
136 | #else | ||
137 | . = ALIGN(PAGE_SIZE); | ||
138 | . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); | ||
139 | #endif | ||
140 | .data.cacheline_aligned : | ||
141 | AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { | ||
142 | *(.data.cacheline_aligned) | ||
143 | } | ||
144 | 124 | ||
145 | /* rarely changed data like cpu maps */ | 125 | DATA_DATA |
146 | #ifdef CONFIG_X86_32 | 126 | CONSTRUCTORS |
147 | . = ALIGN(32); | 127 | |
148 | #else | 128 | /* rarely changed data like cpu maps */ |
149 | . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES); | 129 | READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES) |
150 | #endif | ||
151 | .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { | ||
152 | *(.data.read_mostly) | ||
153 | 130 | ||
154 | /* End of data section */ | 131 | /* End of data section */ |
155 | _edata = .; | 132 | _edata = .; |
156 | } | 133 | } :data |
157 | 134 | ||
158 | #ifdef CONFIG_X86_64 | 135 | #ifdef CONFIG_X86_64 |
159 | 136 | ||
160 | #define VSYSCALL_ADDR (-10*1024*1024) | 137 | #define VSYSCALL_ADDR (-10*1024*1024) |
161 | #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \ | 138 | #define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \ |
162 | SIZEOF(.data.read_mostly) + 4095) & ~(4095)) | 139 | PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) |
163 | #define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \ | 140 | #define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \ |
164 | SIZEOF(.data.read_mostly) + 4095) & ~(4095)) | 141 | PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) |
165 | 142 | ||
166 | #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) | 143 | #define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) |
167 | #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) | 144 | #define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) |
@@ -227,35 +204,29 @@ SECTIONS | |||
227 | 204 | ||
228 | #endif /* CONFIG_X86_64 */ | 205 | #endif /* CONFIG_X86_64 */ |
229 | 206 | ||
230 | /* init_task */ | 207 | /* Init code and data - will be freed after init */ |
231 | . = ALIGN(THREAD_SIZE); | 208 | . = ALIGN(PAGE_SIZE); |
232 | .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { | 209 | .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { |
233 | *(.data.init_task) | 210 | __init_begin = .; /* paired with __init_end */ |
234 | } | 211 | } |
235 | #ifdef CONFIG_X86_64 | ||
236 | :data.init | ||
237 | #endif | ||
238 | 212 | ||
213 | #if defined(CONFIG_X86_64) && defined(CONFIG_SMP) | ||
239 | /* | 214 | /* |
240 | * smp_locks might be freed after init | 215 | * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the |
241 | * start/end must be page aligned | 216 | * output PHDR, so the next output section - .init.text - should |
217 | * start another segment - init. | ||
242 | */ | 218 | */ |
243 | . = ALIGN(PAGE_SIZE); | 219 | PERCPU_VADDR(0, :percpu) |
244 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { | 220 | #endif |
245 | __smp_locks = .; | ||
246 | *(.smp_locks) | ||
247 | __smp_locks_end = .; | ||
248 | . = ALIGN(PAGE_SIZE); | ||
249 | } | ||
250 | 221 | ||
251 | /* Init code and data - will be freed after init */ | ||
252 | . = ALIGN(PAGE_SIZE); | ||
253 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { | 222 | .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { |
254 | __init_begin = .; /* paired with __init_end */ | ||
255 | _sinittext = .; | 223 | _sinittext = .; |
256 | INIT_TEXT | 224 | INIT_TEXT |
257 | _einittext = .; | 225 | _einittext = .; |
258 | } | 226 | } |
227 | #ifdef CONFIG_X86_64 | ||
228 | :init | ||
229 | #endif | ||
259 | 230 | ||
260 | .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { | 231 | .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { |
261 | INIT_DATA | 232 | INIT_DATA |
@@ -326,17 +297,7 @@ SECTIONS | |||
326 | } | 297 | } |
327 | #endif | 298 | #endif |
328 | 299 | ||
329 | #if defined(CONFIG_X86_64) && defined(CONFIG_SMP) | 300 | #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) |
330 | /* | ||
331 | * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the | ||
332 | * output PHDR, so the next output section - __data_nosave - should | ||
333 | * start another section data.init2. Also, pda should be at the head of | ||
334 | * percpu area. Preallocate it and define the percpu offset symbol | ||
335 | * so that it can be accessed as a percpu variable. | ||
336 | */ | ||
337 | . = ALIGN(PAGE_SIZE); | ||
338 | PERCPU_VADDR(0, :percpu) | ||
339 | #else | ||
340 | PERCPU(PAGE_SIZE) | 301 | PERCPU(PAGE_SIZE) |
341 | #endif | 302 | #endif |
342 | 303 | ||
@@ -347,15 +308,22 @@ SECTIONS | |||
347 | __init_end = .; | 308 | __init_end = .; |
348 | } | 309 | } |
349 | 310 | ||
311 | /* | ||
312 | * smp_locks might be freed after init | ||
313 | * start/end must be page aligned | ||
314 | */ | ||
315 | . = ALIGN(PAGE_SIZE); | ||
316 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { | ||
317 | __smp_locks = .; | ||
318 | *(.smp_locks) | ||
319 | __smp_locks_end = .; | ||
320 | . = ALIGN(PAGE_SIZE); | ||
321 | } | ||
322 | |||
350 | #ifdef CONFIG_X86_64 | 323 | #ifdef CONFIG_X86_64 |
351 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { | 324 | .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { |
352 | . = ALIGN(PAGE_SIZE); | 325 | NOSAVE_DATA |
353 | __nosave_begin = .; | 326 | } |
354 | *(.data.nosave) | ||
355 | . = ALIGN(PAGE_SIZE); | ||
356 | __nosave_end = .; | ||
357 | } :data.init2 | ||
358 | /* use another section data.init2, see PERCPU_VADDR() above */ | ||
359 | #endif | 327 | #endif |
360 | 328 | ||
361 | /* BSS */ | 329 | /* BSS */ |
@@ -393,8 +361,8 @@ SECTIONS | |||
393 | 361 | ||
394 | 362 | ||
395 | #ifdef CONFIG_X86_32 | 363 | #ifdef CONFIG_X86_32 |
396 | ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), | 364 | . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), |
397 | "kernel image bigger than KERNEL_IMAGE_SIZE") | 365 | "kernel image bigger than KERNEL_IMAGE_SIZE"); |
398 | #else | 366 | #else |
399 | /* | 367 | /* |
400 | * Per-cpu symbols which need to be offset from __per_cpu_load | 368 | * Per-cpu symbols which need to be offset from __per_cpu_load |
@@ -407,12 +375,12 @@ INIT_PER_CPU(irq_stack_union); | |||
407 | /* | 375 | /* |
408 | * Build-time check on the image size: | 376 | * Build-time check on the image size: |
409 | */ | 377 | */ |
410 | ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), | 378 | . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), |
411 | "kernel image bigger than KERNEL_IMAGE_SIZE") | 379 | "kernel image bigger than KERNEL_IMAGE_SIZE"); |
412 | 380 | ||
413 | #ifdef CONFIG_SMP | 381 | #ifdef CONFIG_SMP |
414 | ASSERT((per_cpu__irq_stack_union == 0), | 382 | . = ASSERT((per_cpu__irq_stack_union == 0), |
415 | "irq_stack_union is not at start of per-cpu area"); | 383 | "irq_stack_union is not at start of per-cpu area"); |
416 | #endif | 384 | #endif |
417 | 385 | ||
418 | #endif /* CONFIG_X86_32 */ | 386 | #endif /* CONFIG_X86_32 */ |
@@ -420,7 +388,7 @@ ASSERT((per_cpu__irq_stack_union == 0), | |||
420 | #ifdef CONFIG_KEXEC | 388 | #ifdef CONFIG_KEXEC |
421 | #include <asm/kexec.h> | 389 | #include <asm/kexec.h> |
422 | 390 | ||
423 | ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, | 391 | . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, |
424 | "kexec control code size is too big") | 392 | "kexec control code size is too big"); |
425 | #endif | 393 | #endif |
426 | 394 | ||