aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/boot/compressed/head.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/boot/compressed/head.S')
-rw-r--r--arch/arm/boot/compressed/head.S63
1 files changed, 42 insertions, 21 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index adf583cd0c35..c363458a4e63 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -179,16 +179,29 @@ not_angel:
179 bl cache_on 179 bl cache_on
180 180
181restart: adr r0, LC0 181restart: adr r0, LC0
182 ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12} 182 ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
183 ldr sp, [r0, #32] 183 ldr sp, [r0, #28]
184 184
185 /* 185 /*
186 * We might be running at a different address. We need 186 * We might be running at a different address. We need
187 * to fix up various pointers. 187 * to fix up various pointers.
188 */ 188 */
189 sub r0, r0, r1 @ calculate the delta offset 189 sub r0, r0, r1 @ calculate the delta offset
190 add r5, r5, r0 @ _start
191 add r6, r6, r0 @ _edata 190 add r6, r6, r0 @ _edata
191 add r10, r10, r0 @ inflated kernel size location
192
193 /*
194 * The kernel build system appends the size of the
195 * decompressed kernel at the end of the compressed data
196 * in little-endian form.
197 */
198 ldrb r9, [r10, #0]
199 ldrb lr, [r10, #1]
200 orr r9, r9, lr, lsl #8
201 ldrb lr, [r10, #2]
202 ldrb r10, [r10, #3]
203 orr r9, r9, lr, lsl #16
204 orr r9, r9, r10, lsl #24
192 205
193#ifndef CONFIG_ZBOOT_ROM 206#ifndef CONFIG_ZBOOT_ROM
194 /* malloc space is above the relocated stack (64k max) */ 207 /* malloc space is above the relocated stack (64k max) */
@@ -206,31 +219,40 @@ restart: adr r0, LC0
206/* 219/*
207 * Check to see if we will overwrite ourselves. 220 * Check to see if we will overwrite ourselves.
208 * r4 = final kernel address 221 * r4 = final kernel address
209 * r5 = start of this image
210 * r9 = size of decompressed image 222 * r9 = size of decompressed image
211 * r10 = end of this image, including bss/stack/malloc space if non XIP 223 * r10 = end of this image, including bss/stack/malloc space if non XIP
212 * We basically want: 224 * We basically want:
213 * r4 >= r10 -> OK 225 * r4 - 16k page directory >= r10 -> OK
214 * r4 + image length <= r5 -> OK 226 * r4 + image length <= current position (pc) -> OK
215 */ 227 */
228 add r10, r10, #16384
216 cmp r4, r10 229 cmp r4, r10
217 bhs wont_overwrite 230 bhs wont_overwrite
218 add r10, r4, r9 231 add r10, r4, r9
219 cmp r10, r5 232 ARM( cmp r10, pc )
233 THUMB( mov lr, pc )
234 THUMB( cmp r10, lr )
220 bls wont_overwrite 235 bls wont_overwrite
221 236
222/* 237/*
223 * Relocate ourselves past the end of the decompressed kernel. 238 * Relocate ourselves past the end of the decompressed kernel.
224 * r5 = start of this image
225 * r6 = _edata 239 * r6 = _edata
226 * r10 = end of the decompressed kernel 240 * r10 = end of the decompressed kernel
227 * Because we always copy ahead, we need to do it from the end and go 241 * Because we always copy ahead, we need to do it from the end and go
228 * backward in case the source and destination overlap. 242 * backward in case the source and destination overlap.
229 */ 243 */
230 /* Round up to next 256-byte boundary. */ 244 /*
231 add r10, r10, #256 245 * Bump to the next 256-byte boundary with the size of
246 * the relocation code added. This avoids overwriting
247 * ourself when the offset is small.
248 */
249 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
232 bic r10, r10, #255 250 bic r10, r10, #255
233 251
252 /* Get start of code we want to copy and align it down. */
253 adr r5, restart
254 bic r5, r5, #31
255
234 sub r9, r6, r5 @ size to copy 256 sub r9, r6, r5 @ size to copy
235 add r9, r9, #31 @ rounded up to a multiple 257 add r9, r9, #31 @ rounded up to a multiple
236 bic r9, r9, #31 @ ... of 32 bytes 258 bic r9, r9, #31 @ ... of 32 bytes
@@ -245,6 +267,11 @@ restart: adr r0, LC0
245 /* Preserve offset to relocated code. */ 267 /* Preserve offset to relocated code. */
246 sub r6, r9, r6 268 sub r6, r9, r6
247 269
270#ifndef CONFIG_ZBOOT_ROM
271 /* cache_clean_flush may use the stack, so relocate it */
272 add sp, sp, r6
273#endif
274
248 bl cache_clean_flush 275 bl cache_clean_flush
249 276
250 adr r0, BSYM(restart) 277 adr r0, BSYM(restart)
@@ -333,12 +360,11 @@ not_relocated: mov r0, #0
333LC0: .word LC0 @ r1 360LC0: .word LC0 @ r1
334 .word __bss_start @ r2 361 .word __bss_start @ r2
335 .word _end @ r3 362 .word _end @ r3
336 .word _start @ r5
337 .word _edata @ r6 363 .word _edata @ r6
338 .word _image_size @ r9 364 .word input_data_end - 4 @ r10 (inflated size location)
339 .word _got_start @ r11 365 .word _got_start @ r11
340 .word _got_end @ ip 366 .word _got_end @ ip
341 .word user_stack_end @ sp 367 .word .L_user_stack_end @ sp
342 .size LC0, . - LC0 368 .size LC0, . - LC0
343 369
344#ifdef CONFIG_ARCH_RPC 370#ifdef CONFIG_ARCH_RPC
@@ -735,12 +761,6 @@ proc_types:
735 W(b) __armv4_mmu_cache_off 761 W(b) __armv4_mmu_cache_off
736 W(b) __armv6_mmu_cache_flush 762 W(b) __armv6_mmu_cache_flush
737 763
738 .word 0x560f5810 @ Marvell PJ4 ARMv6
739 .word 0xff0ffff0
740 W(b) __armv4_mmu_cache_on
741 W(b) __armv4_mmu_cache_off
742 W(b) __armv6_mmu_cache_flush
743
744 .word 0x000f0000 @ new CPU Id 764 .word 0x000f0000 @ new CPU Id
745 .word 0x000f0000 765 .word 0x000f0000
746 W(b) __armv7_mmu_cache_on 766 W(b) __armv7_mmu_cache_on
@@ -1062,8 +1082,9 @@ memdump: mov r12, r0
1062#endif 1082#endif
1063 1083
1064 .ltorg 1084 .ltorg
1085reloc_code_end:
1065 1086
1066 .align 1087 .align
1067 .section ".stack", "aw", %nobits 1088 .section ".stack", "aw", %nobits
1068user_stack: .space 4096 1089.L_user_stack: .space 4096
1069user_stack_end: 1090.L_user_stack_end: