aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2011-04-27 16:15:11 -0400
committerNicolas Pitre <nico@fluxnic.net>2011-05-07 00:07:53 -0400
commitadcc25915b98e5752d51d66774ec4a61e50af3c5 (patch)
tree9953511f394eabaab7331e895f707fd1de1a6271 /arch
parent7c2527f0c4bf6bd096f58296597e1373387d69fd (diff)
ARM: zImage: make sure not to relocate on top of the relocation code
If the zImage load address is slightly below the relocation address, there is a risk for the copied data to overwrite the copy loop or cache flush code that the relocation process requires. Always bump the relocation address by the size of that code to avoid this issue. Noticed by Tony Lindgren <tony@atomide.com>. While at it, let's start the copy from the restart symbol which makes the above code size computation possible by the assembler directly (same sections), given that we don't need to preserve the code before that point anyway. And therefore we don't need to carry the _start pointer in r5 anymore. Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org> Tested-by: Tony Lindgren <tony@atomide.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/compressed/head.S27
1 files changed, 17 insertions, 10 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 55a5bcb82ba0..53dd5da84f8a 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -187,15 +187,14 @@ not_angel:
187 bl cache_on 187 bl cache_on
188 188
189restart: adr r0, LC0 189restart: adr r0, LC0
190 ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} 190 ldmia r0, {r1, r2, r3, r6, r9, r11, r12}
191 ldr sp, [r0, #32] 191 ldr sp, [r0, #28]
192 192
193 /* 193 /*
194 * We might be running at a different address. We need 194 * We might be running at a different address. We need
195 * to fix up various pointers. 195 * to fix up various pointers.
196 */ 196 */
197 sub r0, r0, r1 @ calculate the delta offset 197 sub r0, r0, r1 @ calculate the delta offset
198 add r5, r5, r0 @ _start
199 add r6, r6, r0 @ _edata 198 add r6, r6, r0 @ _edata
200 199
201#ifndef CONFIG_ZBOOT_ROM 200#ifndef CONFIG_ZBOOT_ROM
@@ -214,31 +213,39 @@ restart: adr r0, LC0
214/* 213/*
215 * Check to see if we will overwrite ourselves. 214 * Check to see if we will overwrite ourselves.
216 * r4 = final kernel address 215 * r4 = final kernel address
217 * r5 = start of this image
218 * r9 = size of decompressed image 216 * r9 = size of decompressed image
219 * r10 = end of this image, including bss/stack/malloc space if non XIP 217 * r10 = end of this image, including bss/stack/malloc space if non XIP
220 * We basically want: 218 * We basically want:
221 * r4 >= r10 -> OK 219 * r4 >= r10 -> OK
222 * r4 + image length <= r5 -> OK 220 * r4 + image length <= current position (pc) -> OK
223 */ 221 */
224 cmp r4, r10 222 cmp r4, r10
225 bhs wont_overwrite 223 bhs wont_overwrite
226 add r10, r4, r9 224 add r10, r4, r9
227 cmp r10, r5 225 ARM( cmp r10, pc )
226 THUMB( mov lr, pc )
227 THUMB( cmp r10, lr )
228 bls wont_overwrite 228 bls wont_overwrite
229 229
230/* 230/*
231 * Relocate ourselves past the end of the decompressed kernel. 231 * Relocate ourselves past the end of the decompressed kernel.
232 * r5 = start of this image
233 * r6 = _edata 232 * r6 = _edata
234 * r10 = end of the decompressed kernel 233 * r10 = end of the decompressed kernel
235 * Because we always copy ahead, we need to do it from the end and go 234 * Because we always copy ahead, we need to do it from the end and go
236 * backward in case the source and destination overlap. 235 * backward in case the source and destination overlap.
237 */ 236 */
238 /* Round up to next 256-byte boundary. */ 237 /*
239 add r10, r10, #256 238 * Bump to the next 256-byte boundary with the size of
239 * the relocation code added. This avoids overwriting
240 * ourself when the offset is small.
241 */
242 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
240 bic r10, r10, #255 243 bic r10, r10, #255
241 244
245 /* Get start of code we want to copy and align it down. */
246 adr r5, restart
247 bic r5, r5, #31
248
242 sub r9, r6, r5 @ size to copy 249 sub r9, r6, r5 @ size to copy
243 add r9, r9, #31 @ rounded up to a multiple 250 add r9, r9, #31 @ rounded up to a multiple
244 bic r9, r9, #31 @ ... of 32 bytes 251 bic r9, r9, #31 @ ... of 32 bytes
@@ -346,7 +353,6 @@ not_relocated: mov r0, #0
346LC0: .word LC0 @ r1 353LC0: .word LC0 @ r1
347 .word __bss_start @ r2 354 .word __bss_start @ r2
348 .word _end @ r3 355 .word _end @ r3
349 .word _start @ r5
350 .word _edata @ r6 356 .word _edata @ r6
351 .word _image_size @ r9 357 .word _image_size @ r9
352 .word _got_start @ r11 358 .word _got_start @ r11
@@ -1075,6 +1081,7 @@ memdump: mov r12, r0
1075#endif 1081#endif
1076 1082
1077 .ltorg 1083 .ltorg
1084reloc_code_end:
1078 1085
1079 .align 1086 .align
1080 .section ".stack", "aw", %nobits 1087 .section ".stack", "aw", %nobits