aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel/vmlinux.lds.S
diff options
context:
space:
mode:
authorBarry Song <barry.song@analog.com>2010-01-06 23:11:17 -0500
committerMike Frysinger <vapier@gentoo.org>2010-03-09 00:30:48 -0500
commitd86bfb1600db38e8387beee0aaab4263cfd728a2 (patch)
tree95e6f3d77d6dede480d74a7a4b87f2794a5b31fe /arch/blackfin/kernel/vmlinux.lds.S
parentaad16f32284030907b4f105e92e5fb534fd272bc (diff)
Blackfin: initial XIP support
Signed-off-by: Barry Song <barry.song@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin/kernel/vmlinux.lds.S')
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S57
1 files changed, 57 insertions, 0 deletions
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index be4b1bbb3918..984c78172397 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -15,7 +15,12 @@ _jiffies = _jiffies_64;
15 15
16SECTIONS 16SECTIONS
17{ 17{
18#ifdef CONFIG_RAMKERNEL
18 . = CONFIG_BOOT_LOAD; 19 . = CONFIG_BOOT_LOAD;
20#else
21 . = CONFIG_ROM_BASE;
22#endif
23
19 /* Neither the text, ro_data or bss section need to be aligned 24 /* Neither the text, ro_data or bss section need to be aligned
20 * So pack them back to back 25 * So pack them back to back
21 */ 26 */
@@ -31,6 +36,12 @@ SECTIONS
31 LOCK_TEXT 36 LOCK_TEXT
32 IRQENTRY_TEXT 37 IRQENTRY_TEXT
33 KPROBES_TEXT 38 KPROBES_TEXT
39#ifdef CONFIG_ROMKERNEL
40 __sinittext = .;
41 INIT_TEXT
42 __einittext = .;
43 EXIT_TEXT
44#endif
34 *(.text.*) 45 *(.text.*)
35 *(.fixup) 46 *(.fixup)
36 47
@@ -50,8 +61,14 @@ SECTIONS
50 61
51 /* Just in case the first read only is a 32-bit access */ 62 /* Just in case the first read only is a 32-bit access */
52 RO_DATA(4) 63 RO_DATA(4)
64 __rodata_end = .;
53 65
66#ifdef CONFIG_ROMKERNEL
67 . = CONFIG_BOOT_LOAD;
68 .bss : AT(__rodata_end)
69#else
54 .bss : 70 .bss :
71#endif
55 { 72 {
56 . = ALIGN(4); 73 . = ALIGN(4);
57 ___bss_start = .; 74 ___bss_start = .;
@@ -67,7 +84,11 @@ SECTIONS
67 ___bss_stop = .; 84 ___bss_stop = .;
68 } 85 }
69 86
87#if defined(CONFIG_ROMKERNEL)
88 .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
89#else
70 .data : 90 .data :
91#endif
71 { 92 {
72 __sdata = .; 93 __sdata = .;
73 /* This gets done first, so the glob doesn't suck it in */ 94 /* This gets done first, so the glob doesn't suck it in */
@@ -94,6 +115,8 @@ SECTIONS
94 115
95 __edata = .; 116 __edata = .;
96 } 117 }
118 __data_lma = LOADADDR(.data);
119 __data_len = SIZEOF(.data);
97 120
98 /* The init section should be last, so when we free it, it goes into 121 /* The init section should be last, so when we free it, it goes into
99 * the general memory pool, and (hopefully) will decrease fragmentation 122 * the general memory pool, and (hopefully) will decrease fragmentation
@@ -103,6 +126,7 @@ SECTIONS
103 . = ALIGN(PAGE_SIZE); 126 . = ALIGN(PAGE_SIZE);
104 ___init_begin = .; 127 ___init_begin = .;
105 128
129#ifdef CONFIG_RAMKERNEL
106 INIT_TEXT_SECTION(PAGE_SIZE) 130 INIT_TEXT_SECTION(PAGE_SIZE)
107 131
108 /* We have to discard exit text and such at runtime, not link time, to 132 /* We have to discard exit text and such at runtime, not link time, to
@@ -125,6 +149,35 @@ SECTIONS
125 } 149 }
126 150
127 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) 151 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
152#else
153 .init.data : AT(__data_lma + __data_len)
154 {
155 __sinitdata = .;
156 INIT_DATA
157 INIT_SETUP(16)
158 INIT_CALLS
159 CON_INITCALL
160 SECURITY_INITCALL
161 INIT_RAM_FS
162
163 . = ALIGN(4);
164 ___per_cpu_load = .;
165 ___per_cpu_start = .;
166 *(.data.percpu.first)
167 *(.data.percpu.page_aligned)
168 *(.data.percpu)
169 *(.data.percpu.shared_aligned)
170 ___per_cpu_end = .;
171
172 EXIT_DATA
173 __einitdata = .;
174 }
175 __init_data_lma = LOADADDR(.init.data);
176 __init_data_len = SIZEOF(.init.data);
177 __init_data_end = .;
178
179 .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
180#endif
128 { 181 {
129 . = ALIGN(4); 182 . = ALIGN(4);
130 __stext_l1 = .; 183 __stext_l1 = .;
@@ -205,7 +258,11 @@ SECTIONS
205 /* Force trailing alignment of our init section so that when we 258 /* Force trailing alignment of our init section so that when we
206 * free our init memory, we don't leave behind a partial page. 259 * free our init memory, we don't leave behind a partial page.
207 */ 260 */
261#ifdef CONFIG_RAMKERNEL
208 . = __l2_lma + __l2_len; 262 . = __l2_lma + __l2_len;
263#else
264 . = __init_data_end;
265#endif
209 . = ALIGN(PAGE_SIZE); 266 . = ALIGN(PAGE_SIZE);
210 ___init_end = .; 267 ___init_end = .;
211 268