aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/boot/compressed/head.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/boot/compressed/head.S')
-rw-r--r--arch/arm/boot/compressed/head.S122
1 files changed, 115 insertions, 7 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index e95a5989602..c2effc91725 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -216,6 +216,104 @@ restart: adr r0, LC0
216 mov r10, r6 216 mov r10, r6
217#endif 217#endif
218 218
219 mov r5, #0 @ init dtb size to 0
220#ifdef CONFIG_ARM_APPENDED_DTB
221/*
222 * r0 = delta
223 * r2 = BSS start
224 * r3 = BSS end
225 * r4 = final kernel address
226 * r5 = appended dtb size (still unknown)
227 * r6 = _edata
228 * r7 = architecture ID
229 * r8 = atags/device tree pointer
230 * r9 = size of decompressed image
231 * r10 = end of this image, including bss/stack/malloc space if non XIP
232 * r11 = GOT start
233 * r12 = GOT end
234 * sp = stack pointer
235 *
236 * if there are device trees (dtb) appended to zImage, advance r10 so that the
237 * dtb data will get relocated along with the kernel if necessary.
238 */
239
240 ldr lr, [r6, #0]
241#ifndef __ARMEB__
242 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
243#else
244 ldr r1, =0xd00dfeed
245#endif
246 cmp lr, r1
247 bne dtb_check_done @ not found
248
249#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
250 /*
251 * OK... Let's do some funky business here.
252 * If we do have a DTB appended to zImage, and we do have
253 * an ATAG list around, we want the later to be translated
254 * and folded into the former here. To be on the safe side,
255 * let's temporarily move the stack away into the malloc
256 * area. No GOT fixup has occurred yet, but none of the
257 * code we're about to call uses any global variable.
258 */
259 add sp, sp, #0x10000
260 stmfd sp!, {r0-r3, ip, lr}
261 mov r0, r8
262 mov r1, r6
263 sub r2, sp, r6
264 bl atags_to_fdt
265
266 /*
267 * If returned value is 1, there is no ATAG at the location
268 * pointed by r8. Try the typical 0x100 offset from start
269 * of RAM and hope for the best.
270 */
271 cmp r0, #1
272 sub r0, r4, #TEXT_OFFSET
273 add r0, r0, #0x100
274 mov r1, r6
275 sub r2, sp, r6
276 blne atags_to_fdt
277
278 ldmfd sp!, {r0-r3, ip, lr}
279 sub sp, sp, #0x10000
280#endif
281
282 mov r8, r6 @ use the appended device tree
283
284 /*
285 * Make sure that the DTB doesn't end up in the final
286 * kernel's .bss area. To do so, we adjust the decompressed
287 * kernel size to compensate if that .bss size is larger
288 * than the relocated code.
289 */
290 ldr r5, =_kernel_bss_size
291 adr r1, wont_overwrite
292 sub r1, r6, r1
293 subs r1, r5, r1
294 addhi r9, r9, r1
295
296 /* Get the dtb's size */
297 ldr r5, [r6, #4]
298#ifndef __ARMEB__
299 /* convert r5 (dtb size) to little endian */
300 eor r1, r5, r5, ror #16
301 bic r1, r1, #0x00ff0000
302 mov r5, r5, ror #8
303 eor r5, r5, r1, lsr #8
304#endif
305
306 /* preserve 64-bit alignment */
307 add r5, r5, #7
308 bic r5, r5, #7
309
310 /* relocate some pointers past the appended dtb */
311 add r6, r6, r5
312 add r10, r10, r5
313 add sp, sp, r5
314dtb_check_done:
315#endif
316
219/* 317/*
220 * Check to see if we will overwrite ourselves. 318 * Check to see if we will overwrite ourselves.
221 * r4 = final kernel address 319 * r4 = final kernel address
@@ -223,15 +321,14 @@ restart: adr r0, LC0
223 * r10 = end of this image, including bss/stack/malloc space if non XIP 321 * r10 = end of this image, including bss/stack/malloc space if non XIP
224 * We basically want: 322 * We basically want:
225 * r4 - 16k page directory >= r10 -> OK 323 * r4 - 16k page directory >= r10 -> OK
226 * r4 + image length <= current position (pc) -> OK 324 * r4 + image length <= address of wont_overwrite -> OK
227 */ 325 */
228 add r10, r10, #16384 326 add r10, r10, #16384
229 cmp r4, r10 327 cmp r4, r10
230 bhs wont_overwrite 328 bhs wont_overwrite
231 add r10, r4, r9 329 add r10, r4, r9
232 ARM( cmp r10, pc ) 330 adr r9, wont_overwrite
233 THUMB( mov lr, pc ) 331 cmp r10, r9
234 THUMB( cmp r10, lr )
235 bls wont_overwrite 332 bls wont_overwrite
236 333
237/* 334/*
@@ -285,14 +382,16 @@ wont_overwrite:
285 * r2 = BSS start 382 * r2 = BSS start
286 * r3 = BSS end 383 * r3 = BSS end
287 * r4 = kernel execution address 384 * r4 = kernel execution address
385 * r5 = appended dtb size (0 if not present)
288 * r7 = architecture ID 386 * r7 = architecture ID
289 * r8 = atags pointer 387 * r8 = atags pointer
290 * r11 = GOT start 388 * r11 = GOT start
291 * r12 = GOT end 389 * r12 = GOT end
292 * sp = stack pointer 390 * sp = stack pointer
293 */ 391 */
294 teq r0, #0 392 orrs r1, r0, r5
295 beq not_relocated 393 beq not_relocated
394
296 add r11, r11, r0 395 add r11, r11, r0
297 add r12, r12, r0 396 add r12, r12, r0
298 397
@@ -307,12 +406,21 @@ wont_overwrite:
307 406
308 /* 407 /*
309 * Relocate all entries in the GOT table. 408 * Relocate all entries in the GOT table.
409 * Bump bss entries to _edata + dtb size
310 */ 410 */
3111: ldr r1, [r11, #0] @ relocate entries in the GOT 4111: ldr r1, [r11, #0] @ relocate entries in the GOT
312 add r1, r1, r0 @ table. This fixes up the 412 add r1, r1, r0 @ This fixes up C references
313 str r1, [r11], #4 @ C references. 413 cmp r1, r2 @ if entry >= bss_start &&
414 cmphs r3, r1 @ bss_end > entry
415 addhi r1, r1, r5 @ entry += dtb size
416 str r1, [r11], #4 @ next entry
314 cmp r11, r12 417 cmp r11, r12
315 blo 1b 418 blo 1b
419
420 /* bump our bss pointers too */
421 add r2, r2, r5
422 add r3, r3, r5
423
316#else 424#else
317 425
318 /* 426 /*