aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/blackfin/include/asm/sections.h16
-rw-r--r--arch/blackfin/kernel/setup.c39
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S28
3 files changed, 44 insertions, 39 deletions
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h
index 1f5381fbb4a7..42f6c53c59c6 100644
--- a/arch/blackfin/include/asm/sections.h
+++ b/arch/blackfin/include/asm/sections.h
@@ -13,10 +13,18 @@ extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
13extern unsigned long _ramstart, _ramend, _rambase; 13extern unsigned long _ramstart, _ramend, _rambase;
14extern unsigned long memory_start, memory_end, physical_mem_end; 14extern unsigned long memory_start, memory_end, physical_mem_end;
15 15
16extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], 16/*
17 _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], 17 * The weak markings on the lengths might seem weird, but this is required
18 _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], 18 * in order to make gcc accept the fact that these may actually have a value
19 _ebss_l2[], _l2_lma_start[]; 19 * of 0 (since they aren't actually addresses, but sizes of sections).
20 */
21extern char _stext_l1[], _etext_l1[], _text_l1_lma[], __weak _text_l1_len[];
22extern char _sdata_l1[], _edata_l1[], _sbss_l1[], _ebss_l1[],
23 _data_l1_lma[], __weak _data_l1_len[];
24extern char _sdata_b_l1[], _edata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
25 _data_b_l1_lma[], __weak _data_b_l1_len[];
26extern char _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[],
27 _sbss_l2[], _ebss_l2[], _l2_lma[], __weak _l2_len[];
20 28
21#include <asm/mem_map.h> 29#include <asm/mem_map.h>
22 30
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index c202a44d1416..5fda77488319 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -178,10 +178,10 @@ void __init bfin_cache_init(void)
178 178
179void __init bfin_relocate_l1_mem(void) 179void __init bfin_relocate_l1_mem(void)
180{ 180{
181 unsigned long l1_code_length; 181 unsigned long text_l1_len = (unsigned long)_text_l1_len;
182 unsigned long l1_data_a_length; 182 unsigned long data_l1_len = (unsigned long)_data_l1_len;
183 unsigned long l1_data_b_length; 183 unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
184 unsigned long l2_length; 184 unsigned long l2_len = (unsigned long)_l2_len;
185 185
186 early_shadow_stamp(); 186 early_shadow_stamp();
187 187
@@ -201,30 +201,23 @@ void __init bfin_relocate_l1_mem(void)
201 201
202 blackfin_dma_early_init(); 202 blackfin_dma_early_init();
203 203
204 /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ 204 /* if necessary, copy L1 text to L1 instruction SRAM */
205 l1_code_length = _etext_l1 - _stext_l1; 205 if (L1_CODE_LENGTH && text_l1_len)
206 if (l1_code_length) 206 early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
207 early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
208 207
209 /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */ 208 /* if necessary, copy L1 data to L1 data bank A SRAM */
210 l1_data_a_length = _sbss_l1 - _sdata_l1; 209 if (L1_DATA_A_LENGTH && data_l1_len)
211 if (l1_data_a_length) 210 early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
212 early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
213 211
214 /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */ 212 /* if necessary, copy L1 data B to L1 data bank B SRAM */
215 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; 213 if (L1_DATA_B_LENGTH && data_b_l1_len)
216 if (l1_data_b_length) 214 early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
217 early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
218 l1_data_a_length, l1_data_b_length);
219 215
220 early_dma_memcpy_done(); 216 early_dma_memcpy_done();
221 217
222 /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */ 218 /* if necessary, copy L2 text/data to L2 SRAM */
223 if (L2_LENGTH != 0) { 219 if (L2_LENGTH && l2_len)
224 l2_length = _sbss_l2 - _stext_l2; 220 memcpy(_stext_l2, _l2_lma, l2_len);
225 if (l2_length)
226 memcpy(_stext_l2, _l2_lma_start, l2_length);
227 }
228} 221}
229 222
230/* add_memory_region to memmap */ 223/* add_memory_region to memmap */
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 10e12539000e..01682eed771e 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -123,8 +123,6 @@ SECTIONS
123 EXIT_DATA 123 EXIT_DATA
124 } 124 }
125 125
126 __l1_lma_start = .;
127
128 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) 126 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
129 { 127 {
130 . = ALIGN(4); 128 . = ALIGN(4);
@@ -136,9 +134,11 @@ SECTIONS
136 . = ALIGN(4); 134 . = ALIGN(4);
137 __etext_l1 = .; 135 __etext_l1 = .;
138 } 136 }
139 ASSERT (SIZEOF(.text_l1) <= L1_CODE_LENGTH, "L1 text overflow!") 137 __text_l1_lma = LOADADDR(.text_l1);
138 __text_l1_len = SIZEOF(.text_l1);
139 ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
140 140
141 .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1)) 141 .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
142 { 142 {
143 . = ALIGN(4); 143 . = ALIGN(4);
144 __sdata_l1 = .; 144 __sdata_l1 = .;
@@ -154,9 +154,11 @@ SECTIONS
154 . = ALIGN(4); 154 . = ALIGN(4);
155 __ebss_l1 = .; 155 __ebss_l1 = .;
156 } 156 }
157 ASSERT (SIZEOF(.data_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!") 157 __data_l1_lma = LOADADDR(.data_l1);
158 __data_l1_len = SIZEOF(.data_l1);
159 ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
158 160
159 .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1)) 161 .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
160 { 162 {
161 . = ALIGN(4); 163 . = ALIGN(4);
162 __sdata_b_l1 = .; 164 __sdata_b_l1 = .;
@@ -169,11 +171,11 @@ SECTIONS
169 . = ALIGN(4); 171 . = ALIGN(4);
170 __ebss_b_l1 = .; 172 __ebss_b_l1 = .;
171 } 173 }
172 ASSERT (SIZEOF(.data_b_l1) <= L1_DATA_B_LENGTH, "L1 data B overflow!") 174 __data_b_l1_lma = LOADADDR(.data_b_l1);
173 175 __data_b_l1_len = SIZEOF(.data_b_l1);
174 __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1); 176 ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
175 177
176 .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1)) 178 .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
177 { 179 {
178 . = ALIGN(4); 180 . = ALIGN(4);
179 __stext_l2 = .; 181 __stext_l2 = .;
@@ -195,12 +197,14 @@ SECTIONS
195 . = ALIGN(4); 197 . = ALIGN(4);
196 __ebss_l2 = .; 198 __ebss_l2 = .;
197 } 199 }
198 ASSERT (SIZEOF(.text_data_l2) <= L2_LENGTH, "L2 overflow!") 200 __l2_lma = LOADADDR(.text_data_l2);
201 __l2_len = SIZEOF(.text_data_l2);
202 ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
199 203
200 /* Force trailing alignment of our init section so that when we 204 /* Force trailing alignment of our init section so that when we
201 * free our init memory, we don't leave behind a partial page. 205 * free our init memory, we don't leave behind a partial page.
202 */ 206 */
203 . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2); 207 . = __l2_lma + __l2_len;
204 . = ALIGN(PAGE_SIZE); 208 . = ALIGN(PAGE_SIZE);
205 ___init_end = .; 209 ___init_end = .;
206 210