aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2015-03-17 03:59:53 -0400
committerWill Deacon <will.deacon@arm.com>2015-03-19 15:46:02 -0400
commit8b0a95753a34b5c8b2e483e0e5b1d67761e32c5f (patch)
treeb94cad9ea7b18ad3b28b56d76c6fb6093a2bbd68
parentb1c98297fe0c6e2899ede03fc3b831f36e19fb76 (diff)
arm64: merge __enable_mmu and __turn_mmu_on
Enabling of the MMU is split into two functions, with an align and a branch in the middle. On arm64, the entire kernel Image is ID mapped so this is really not necessary, and we can just merge it into a single function. Also replaces an open coded adrp/add reference to __enable_mmu pair with adr_l. Tested-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/kernel/head.S33
1 files changed, 7 insertions, 26 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 818213186dac..750403c62928 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -255,8 +255,7 @@ ENTRY(stext)
255 */ 255 */
256 ldr x27, =__mmap_switched // address to jump to after 256 ldr x27, =__mmap_switched // address to jump to after
257 // MMU has been enabled 257 // MMU has been enabled
258 adrp lr, __enable_mmu // return (PIC) address 258 adr_l lr, __enable_mmu // return (PIC) address
259 add lr, lr, #:lo12:__enable_mmu
260 b __cpu_setup // initialise processor 259 b __cpu_setup // initialise processor
261ENDPROC(stext) 260ENDPROC(stext)
262 261
@@ -615,11 +614,12 @@ ENDPROC(__secondary_switched)
615#endif /* CONFIG_SMP */ 614#endif /* CONFIG_SMP */
616 615
617/* 616/*
618 * Setup common bits before finally enabling the MMU. Essentially this is just 617 * Enable the MMU.
619 * loading the page table pointer and vector base registers.
620 * 618 *
621 * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on 619 * x0 = SCTLR_EL1 value for turning on the MMU.
622 * the MMU. 620 * x27 = *virtual* address to jump to upon completion
621 *
622 * other registers depend on the function called upon completion
623 */ 623 */
624__enable_mmu: 624__enable_mmu:
625 ldr x5, =vectors 625 ldr x5, =vectors
@@ -627,29 +627,10 @@ __enable_mmu:
627 msr ttbr0_el1, x25 // load TTBR0 627 msr ttbr0_el1, x25 // load TTBR0
628 msr ttbr1_el1, x26 // load TTBR1 628 msr ttbr1_el1, x26 // load TTBR1
629 isb 629 isb
630 b __turn_mmu_on
631ENDPROC(__enable_mmu)
632
633/*
634 * Enable the MMU. This completely changes the structure of the visible memory
635 * space. You will not be able to trace execution through this.
636 *
637 * x0 = system control register
638 * x27 = *virtual* address to jump to upon completion
639 *
640 * other registers depend on the function called upon completion
641 *
642 * We align the entire function to the smallest power of two larger than it to
643 * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
644 * close to the end of a 512MB or 1GB block we might require an additional
645 * table to map the entire function.
646 */
647 .align 4
648__turn_mmu_on:
649 msr sctlr_el1, x0 630 msr sctlr_el1, x0
650 isb 631 isb
651 br x27 632 br x27
652ENDPROC(__turn_mmu_on) 633ENDPROC(__enable_mmu)
653 634
654/* 635/*
655 * Calculate the start of physical memory. 636 * Calculate the start of physical memory.