aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2010-06-19 11:19:25 -0400
committerTony Luck <tony.luck@intel.com>2010-06-21 18:08:44 -0400
commit7b313fdf2303ddfaea6ae1aad09fb3ccdb884bcc (patch)
tree8e7c02228c74d76d7fab33f1c60b4de46e6144af /arch/ia64
parent7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff)
[IA64] beautify vmlinux.lds.h
Use the same style as used for C code in vmlinux.lds.h. This is the same format as have been gradually introduced for other architectures in the kernel. This patch do not introduce any functional changes. Note: Use "git diff -w" to supress whitespace noise. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S362
1 files changed, 189 insertions, 173 deletions
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index e07218a2577f..5a4d044dcb1c 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -6,204 +6,209 @@
6 6
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8 8
9#define IVT_TEXT \
10 VMLINUX_SYMBOL(__start_ivt_text) = .; \
11 *(.text..ivt) \
12 VMLINUX_SYMBOL(__end_ivt_text) = .;
13
14OUTPUT_FORMAT("elf64-ia64-little") 9OUTPUT_FORMAT("elf64-ia64-little")
15OUTPUT_ARCH(ia64) 10OUTPUT_ARCH(ia64)
16ENTRY(phys_start) 11ENTRY(phys_start)
17jiffies = jiffies_64; 12jiffies = jiffies_64;
13
18PHDRS { 14PHDRS {
19 code PT_LOAD; 15 code PT_LOAD;
20 percpu PT_LOAD; 16 percpu PT_LOAD;
21 data PT_LOAD; 17 data PT_LOAD;
22 note PT_NOTE; 18 note PT_NOTE;
23 unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */ 19 unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
24} 20}
25SECTIONS
26{
27 /* unwind exit sections must be discarded before the rest of the
28 sections get included. */
29 /DISCARD/ : {
30 *(.IA_64.unwind.exit.text)
31 *(.IA_64.unwind_info.exit.text)
32 *(.comment)
33 *(.note)
34 }
35
36 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
37 phys_start = _start - LOAD_OFFSET;
38
39 code : { } :code
40 . = KERNEL_START;
41
42 _text = .;
43 _stext = .;
44
45 .text : AT(ADDR(.text) - LOAD_OFFSET)
46 {
47 IVT_TEXT
48 TEXT_TEXT
49 SCHED_TEXT
50 LOCK_TEXT
51 KPROBES_TEXT
52 *(.gnu.linkonce.t*)
53 }
54 .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
55 { *(.text2) }
56#ifdef CONFIG_SMP
57 .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET)
58 { *(.text..lock) }
59#endif
60 _etext = .;
61 21
62 /* Read-only data */ 22SECTIONS {
23 /*
24 * unwind exit sections must be discarded before
25 * the rest of the sections get included.
26 */
27 /DISCARD/ : {
28 *(.IA_64.unwind.exit.text)
29 *(.IA_64.unwind_info.exit.text)
30 *(.comment)
31 *(.note)
32 }
63 33
64 NOTES :code :note /* put .notes in text and mark in PT_NOTE */ 34 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
65 code_continues : {} :code /* switch back to regular program... */ 35 phys_start = _start - LOAD_OFFSET;
36
37 code : {
38 } :code
39 . = KERNEL_START;
40
41 _text = .;
42 _stext = .;
43
44 .text : AT(ADDR(.text) - LOAD_OFFSET) {
45 __start_ivt_text = .;
46 *(.text..ivt)
47 __end_ivt_text = .;
48 TEXT_TEXT
49 SCHED_TEXT
50 LOCK_TEXT
51 KPROBES_TEXT
52 *(.gnu.linkonce.t*)
53 }
66 54
67 EXCEPTION_TABLE(16) 55 .text2 : AT(ADDR(.text2) - LOAD_OFFSET) {
56 *(.text2)
57 }
68 58
69 /* MCA table */ 59#ifdef CONFIG_SMP
70 . = ALIGN(16); 60 .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
71 __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) 61 *(.text..lock)
72 { 62 }
73 __start___mca_table = .; 63#endif
74 *(__mca_table) 64 _etext = .;
75 __stop___mca_table = .; 65
66 /*
67 * Read-only data
68 */
69 NOTES :code :note /* put .notes in text and mark in PT_NOTE */
70 code_continues : {
71 } : code /* switch back to regular program... */
72
73 EXCEPTION_TABLE(16)
74
75 /* MCA table */
76 . = ALIGN(16);
77 __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
78 __start___mca_table = .;
79 *(__mca_table)
80 __stop___mca_table = .;
76 } 81 }
77 82
78 .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) 83 .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
79 { 84 __start___phys_stack_reg_patchlist = .;
80 __start___phys_stack_reg_patchlist = .; 85 *(.data..patch.phys_stack_reg)
81 *(.data..patch.phys_stack_reg) 86 __end___phys_stack_reg_patchlist = .;
82 __end___phys_stack_reg_patchlist = .;
83 } 87 }
84 88
85 /* Global data */ 89 /*
86 _data = .; 90 * Global data
91 */
92 _data = .;
87 93
88 /* Unwind info & table: */ 94 /* Unwind info & table: */
89 . = ALIGN(8); 95 . = ALIGN(8);
90 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) 96 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
91 { *(.IA_64.unwind_info*) } 97 *(.IA_64.unwind_info*)
92 .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) 98 }
93 { 99 .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
94 __start_unwind = .; 100 __start_unwind = .;
95 *(.IA_64.unwind*) 101 *(.IA_64.unwind*)
96 __end_unwind = .; 102 __end_unwind = .;
97 } :code :unwind 103 } :code :unwind
98 code_continues2 : {} : code 104 code_continues2 : {
105 } : code
99 106
100 RODATA 107 RODATA
101 108
102 .opd : AT(ADDR(.opd) - LOAD_OFFSET) 109 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
103 { *(.opd) } 110 *(.opd)
104 111 }
105 /* Initialization code and data: */
106 112
107 . = ALIGN(PAGE_SIZE); 113 /*
108 __init_begin = .; 114 * Initialization code and data:
115 */
116 . = ALIGN(PAGE_SIZE);
117 __init_begin = .;
109 118
110 INIT_TEXT_SECTION(PAGE_SIZE) 119 INIT_TEXT_SECTION(PAGE_SIZE)
111 INIT_DATA_SECTION(16) 120 INIT_DATA_SECTION(16)
112 121
113 .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) 122 .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
114 { 123 __start___vtop_patchlist = .;
115 __start___vtop_patchlist = .; 124 *(.data..patch.vtop)
116 *(.data..patch.vtop) 125 __end___vtop_patchlist = .;
117 __end___vtop_patchlist = .;
118 } 126 }
119 127
120 .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) 128 .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
121 { 129 __start___rse_patchlist = .;
122 __start___rse_patchlist = .; 130 *(.data..patch.rse)
123 *(.data..patch.rse) 131 __end___rse_patchlist = .;
124 __end___rse_patchlist = .;
125 } 132 }
126 133
127 .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) 134 .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
128 { 135 __start___mckinley_e9_bundles = .;
129 __start___mckinley_e9_bundles = .; 136 *(.data..patch.mckinley_e9)
130 *(.data..patch.mckinley_e9) 137 __end___mckinley_e9_bundles = .;
131 __end___mckinley_e9_bundles = .;
132 } 138 }
133 139
134#if defined(CONFIG_PARAVIRT) 140#if defined(CONFIG_PARAVIRT)
135 . = ALIGN(16); 141 . = ALIGN(16);
136 .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) 142 .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) {
137 { 143 __start_paravirt_bundles = .;
138 __start_paravirt_bundles = .; 144 *(.paravirt_bundles)
139 *(.paravirt_bundles) 145 __stop_paravirt_bundles = .;
140 __stop_paravirt_bundles = .; 146 }
141 } 147 . = ALIGN(16);
142 . = ALIGN(16); 148 .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) {
143 .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) 149 __start_paravirt_insts = .;
144 { 150 *(.paravirt_insts)
145 __start_paravirt_insts = .; 151 __stop_paravirt_insts = .;
146 *(.paravirt_insts) 152 }
147 __stop_paravirt_insts = .; 153 . = ALIGN(16);
148 } 154 .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) {
149 . = ALIGN(16); 155 __start_paravirt_branches = .;
150 .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) 156 *(.paravirt_branches)
151 { 157 __stop_paravirt_branches = .;
152 __start_paravirt_branches = .;
153 *(.paravirt_branches)
154 __stop_paravirt_branches = .;
155 } 158 }
156#endif 159#endif
157 160
158#if defined(CONFIG_IA64_GENERIC) 161#if defined(CONFIG_IA64_GENERIC)
159 /* Machine Vector */ 162 /* Machine Vector */
160 . = ALIGN(16); 163 . = ALIGN(16);
161 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) 164 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
162 { 165 machvec_start = .;
163 machvec_start = .; 166 *(.machvec)
164 *(.machvec) 167 machvec_end = .;
165 machvec_end = .;
166 } 168 }
167#endif 169#endif
168 170
169#ifdef CONFIG_SMP 171#ifdef CONFIG_SMP
170 . = ALIGN(PERCPU_PAGE_SIZE); 172 . = ALIGN(PERCPU_PAGE_SIZE);
171 __cpu0_per_cpu = .; 173 __cpu0_per_cpu = .;
172 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ 174 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
173#endif 175#endif
174 176
175 . = ALIGN(PAGE_SIZE); 177 . = ALIGN(PAGE_SIZE);
176 __init_end = .; 178 __init_end = .;
177 179
178 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) 180 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
179 { 181 PAGE_ALIGNED_DATA(PAGE_SIZE)
180 PAGE_ALIGNED_DATA(PAGE_SIZE) 182 . = ALIGN(PAGE_SIZE);
181 . = ALIGN(PAGE_SIZE); 183 __start_gate_section = .;
182 __start_gate_section = .; 184 *(.data..gate)
183 *(.data..gate) 185 __stop_gate_section = .;
184 __stop_gate_section = .;
185#ifdef CONFIG_XEN 186#ifdef CONFIG_XEN
186 . = ALIGN(PAGE_SIZE); 187 . = ALIGN(PAGE_SIZE);
187 __xen_start_gate_section = .; 188 __xen_start_gate_section = .;
188 *(.data..gate.xen) 189 *(.data..gate.xen)
189 __xen_stop_gate_section = .; 190 __xen_stop_gate_section = .;
190#endif 191#endif
191 } 192 }
192 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose 193 /*
193 * kernel data 194 * make sure the gate page doesn't expose
194 */ 195 * kernel data
195 196 */
196 /* Per-cpu data: */ 197 . = ALIGN(PAGE_SIZE);
197 . = ALIGN(PERCPU_PAGE_SIZE); 198
198 PERCPU_VADDR(PERCPU_ADDR, :percpu) 199 /* Per-cpu data: */
199 __phys_per_cpu_start = __per_cpu_load; 200 . = ALIGN(PERCPU_PAGE_SIZE);
200 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits 201 PERCPU_VADDR(PERCPU_ADDR, :percpu)
201 * into percpu page size 202 __phys_per_cpu_start = __per_cpu_load;
202 */ 203 /*
203 204 * ensure percpu data fits
204 data : { } :data 205 * into percpu page size
205 .data : AT(ADDR(.data) - LOAD_OFFSET) 206 */
206 { 207 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
208
209 data : {
210 } :data
211 .data : AT(ADDR(.data) - LOAD_OFFSET) {
207 INIT_TASK_DATA(PAGE_SIZE) 212 INIT_TASK_DATA(PAGE_SIZE)
208 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) 213 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
209 READ_MOSTLY_DATA(SMP_CACHE_BYTES) 214 READ_MOSTLY_DATA(SMP_CACHE_BYTES)
@@ -213,26 +218,37 @@ SECTIONS
213 CONSTRUCTORS 218 CONSTRUCTORS
214 } 219 }
215 220
216 . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */ 221 . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
217 .got : AT(ADDR(.got) - LOAD_OFFSET) 222 .got : AT(ADDR(.got) - LOAD_OFFSET) {
218 { *(.got.plt) *(.got) } 223 *(.got.plt)
219 __gp = ADDR(.got) + 0x200000; 224 *(.got)
220 /* We want the small data sections together, so single-instruction offsets 225 }
221 can access them all, and initialized data all before uninitialized, so 226 __gp = ADDR(.got) + 0x200000;
222 we can shorten the on-disk segment size. */ 227
223 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) 228 /*
224 { *(.sdata) *(.sdata1) *(.srdata) } 229 * We want the small data sections together,
225 _edata = .; 230 * so single-instruction offsets can access
231 * them all, and initialized data all before
232 * uninitialized, so we can shorten the
233 * on-disk segment size.
234 */
235 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
236 *(.sdata)
237 *(.sdata1)
238 *(.srdata)
239 }
240 _edata = .;
226 241
227 BSS_SECTION(0, 0, 0) 242 BSS_SECTION(0, 0, 0)
228 243
229 _end = .; 244 _end = .;
230 245
231 code : { } :code 246 code : {
247 } :code
232 248
233 STABS_DEBUG 249 STABS_DEBUG
234 DWARF_DEBUG 250 DWARF_DEBUG
235 251
236 /* Default discards */ 252 /* Default discards */
237 DISCARDS 253 DISCARDS
238} 254}