aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/boot/compressed/head.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/boot/compressed/head.S')
-rw-r--r--arch/arm/boot/compressed/head.S121
1 files changed, 114 insertions, 7 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index e95a5989602a..9f5ac11ccd8e 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -216,6 +216,103 @@ restart: adr r0, LC0
216 mov r10, r6 216 mov r10, r6
217#endif 217#endif
218 218
219 mov r5, #0 @ init dtb size to 0
220#ifdef CONFIG_ARM_APPENDED_DTB
221/*
222 * r0 = delta
223 * r2 = BSS start
224 * r3 = BSS end
225 * r4 = final kernel address
226 * r5 = appended dtb size (still unknown)
227 * r6 = _edata
228 * r7 = architecture ID
229 * r8 = atags/device tree pointer
230 * r9 = size of decompressed image
231 * r10 = end of this image, including bss/stack/malloc space if non XIP
232 * r11 = GOT start
233 * r12 = GOT end
234 * sp = stack pointer
235 *
236 * if there are device trees (dtb) appended to zImage, advance r10 so that the
237 * dtb data will get relocated along with the kernel if necessary.
238 */
239
240 ldr lr, [r6, #0]
241#ifndef __ARMEB__
242 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
243#else
244 ldr r1, =0xd00dfeed
245#endif
246 cmp lr, r1
247 bne dtb_check_done @ not found
248
249#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
250 /*
251 * OK... Let's do some funky business here.
252 * If we do have a DTB appended to zImage, and we do have
253 * an ATAG list around, we want the later to be translated
254 * and folded into the former here. To be on the safe side,
255 * let's temporarily move the stack away into the malloc
256 * area. No GOT fixup has occurred yet, but none of the
257 * code we're about to call uses any global variable.
258 */
259 add sp, sp, #0x10000
260 stmfd sp!, {r0-r3, ip, lr}
261 mov r0, r8
262 mov r1, r6
263 sub r2, sp, r6
264 bl atags_to_fdt
265
266 /*
267 * If returned value is 1, there is no ATAG at the location
268 * pointed by r8. Try the typical 0x100 offset from start
269 * of RAM and hope for the best.
270 */
271 cmp r0, #1
272 sub r0, r4, #(TEXT_OFFSET - 0x100)
273 mov r1, r6
274 sub r2, sp, r6
275 blne atags_to_fdt
276
277 ldmfd sp!, {r0-r3, ip, lr}
278 sub sp, sp, #0x10000
279#endif
280
281 mov r8, r6 @ use the appended device tree
282
283 /*
284 * Make sure that the DTB doesn't end up in the final
285 * kernel's .bss area. To do so, we adjust the decompressed
286 * kernel size to compensate if that .bss size is larger
287 * than the relocated code.
288 */
289 ldr r5, =_kernel_bss_size
290 adr r1, wont_overwrite
291 sub r1, r6, r1
292 subs r1, r5, r1
293 addhi r9, r9, r1
294
295 /* Get the dtb's size */
296 ldr r5, [r6, #4]
297#ifndef __ARMEB__
298 /* convert r5 (dtb size) to little endian */
299 eor r1, r5, r5, ror #16
300 bic r1, r1, #0x00ff0000
301 mov r5, r5, ror #8
302 eor r5, r5, r1, lsr #8
303#endif
304
305 /* preserve 64-bit alignment */
306 add r5, r5, #7
307 bic r5, r5, #7
308
309 /* relocate some pointers past the appended dtb */
310 add r6, r6, r5
311 add r10, r10, r5
312 add sp, sp, r5
313dtb_check_done:
314#endif
315
219/* 316/*
220 * Check to see if we will overwrite ourselves. 317 * Check to see if we will overwrite ourselves.
221 * r4 = final kernel address 318 * r4 = final kernel address
@@ -223,15 +320,14 @@ restart: adr r0, LC0
223 * r10 = end of this image, including bss/stack/malloc space if non XIP 320 * r10 = end of this image, including bss/stack/malloc space if non XIP
224 * We basically want: 321 * We basically want:
225 * r4 - 16k page directory >= r10 -> OK 322 * r4 - 16k page directory >= r10 -> OK
226 * r4 + image length <= current position (pc) -> OK 323 * r4 + image length <= address of wont_overwrite -> OK
227 */ 324 */
228 add r10, r10, #16384 325 add r10, r10, #16384
229 cmp r4, r10 326 cmp r4, r10
230 bhs wont_overwrite 327 bhs wont_overwrite
231 add r10, r4, r9 328 add r10, r4, r9
232 ARM( cmp r10, pc ) 329 adr r9, wont_overwrite
233 THUMB( mov lr, pc ) 330 cmp r10, r9
234 THUMB( cmp r10, lr )
235 bls wont_overwrite 331 bls wont_overwrite
236 332
237/* 333/*
@@ -285,14 +381,16 @@ wont_overwrite:
285 * r2 = BSS start 381 * r2 = BSS start
286 * r3 = BSS end 382 * r3 = BSS end
287 * r4 = kernel execution address 383 * r4 = kernel execution address
384 * r5 = appended dtb size (0 if not present)
288 * r7 = architecture ID 385 * r7 = architecture ID
289 * r8 = atags pointer 386 * r8 = atags pointer
290 * r11 = GOT start 387 * r11 = GOT start
291 * r12 = GOT end 388 * r12 = GOT end
292 * sp = stack pointer 389 * sp = stack pointer
293 */ 390 */
294 teq r0, #0 391 orrs r1, r0, r5
295 beq not_relocated 392 beq not_relocated
393
296 add r11, r11, r0 394 add r11, r11, r0
297 add r12, r12, r0 395 add r12, r12, r0
298 396
@@ -307,12 +405,21 @@ wont_overwrite:
307 405
308 /* 406 /*
309 * Relocate all entries in the GOT table. 407 * Relocate all entries in the GOT table.
408 * Bump bss entries to _edata + dtb size
310 */ 409 */
3111: ldr r1, [r11, #0] @ relocate entries in the GOT 4101: ldr r1, [r11, #0] @ relocate entries in the GOT
312 add r1, r1, r0 @ table. This fixes up the 411 add r1, r1, r0 @ This fixes up C references
313 str r1, [r11], #4 @ C references. 412 cmp r1, r2 @ if entry >= bss_start &&
413 cmphs r3, r1 @ bss_end > entry
414 addhi r1, r1, r5 @ entry += dtb size
415 str r1, [r11], #4 @ next entry
314 cmp r11, r12 416 cmp r11, r12
315 blo 1b 417 blo 1b
418
419 /* bump our bss pointers too */
420 add r2, r2, r5
421 add r3, r3, r5
422
316#else 423#else
317 424
318 /* 425 /*