aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2015-03-17 04:14:29 -0400
committerWill Deacon <will.deacon@arm.com>2015-03-19 15:46:02 -0400
commit6f4d57fa7021efbf135cfa068d56bc5035edffa1 (patch)
tree24f44e36f384fc88729185466cbcb5a2e0f67518 /arch/arm64/kernel
parent8b0a95753a34b5c8b2e483e0e5b1d67761e32c5f (diff)
arm64: remove __calc_phys_offset
This removes the function __calc_phys_offset and all open coded virtual to physical address translations using the offset kept in x28. Instead, just use absolute or PC-relative symbol references as appropriate when referring to virtual or physical addresses, respectively. Tested-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/head.S47
1 files changed, 11 insertions, 36 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 750403c62928..f5ac337f9598 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -36,7 +36,7 @@
36#include <asm/page.h> 36#include <asm/page.h>
37#include <asm/virt.h> 37#include <asm/virt.h>
38 38
39#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 39#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
40 40
41#if (TEXT_OFFSET & 0xfff) != 0 41#if (TEXT_OFFSET & 0xfff) != 0
42#error TEXT_OFFSET must be at least 4KB aligned 42#error TEXT_OFFSET must be at least 4KB aligned
@@ -46,13 +46,6 @@
46#error TEXT_OFFSET must be less than 2MB 46#error TEXT_OFFSET must be less than 2MB
47#endif 47#endif
48 48
49 .macro pgtbl, ttb0, ttb1, virt_to_phys
50 ldr \ttb1, =swapper_pg_dir
51 ldr \ttb0, =idmap_pg_dir
52 add \ttb1, \ttb1, \virt_to_phys
53 add \ttb0, \ttb0, \virt_to_phys
54 .endm
55
56#ifdef CONFIG_ARM64_64K_PAGES 49#ifdef CONFIG_ARM64_64K_PAGES
57#define BLOCK_SHIFT PAGE_SHIFT 50#define BLOCK_SHIFT PAGE_SHIFT
58#define BLOCK_SIZE PAGE_SIZE 51#define BLOCK_SIZE PAGE_SIZE
@@ -63,7 +56,7 @@
63#define TABLE_SHIFT PUD_SHIFT 56#define TABLE_SHIFT PUD_SHIFT
64#endif 57#endif
65 58
66#define KERNEL_START KERNEL_RAM_VADDR 59#define KERNEL_START _text
67#define KERNEL_END _end 60#define KERNEL_END _end
68 61
69/* 62/*
@@ -242,7 +235,7 @@ section_table:
242ENTRY(stext) 235ENTRY(stext)
243 mov x21, x0 // x21=FDT 236 mov x21, x0 // x21=FDT
244 bl el2_setup // Drop to EL1, w20=cpu_boot_mode 237 bl el2_setup // Drop to EL1, w20=cpu_boot_mode
245 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET 238 adrp x24, __PHYS_OFFSET
246 bl set_cpu_boot_mode_flag 239 bl set_cpu_boot_mode_flag
247 240
248 bl __vet_fdt 241 bl __vet_fdt
@@ -342,7 +335,8 @@ ENDPROC(__vet_fdt)
342 * - pgd entry for fixed mappings (TTBR1) 335 * - pgd entry for fixed mappings (TTBR1)
343 */ 336 */
344__create_page_tables: 337__create_page_tables:
345 pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses 338 adrp x25, idmap_pg_dir
339 adrp x26, swapper_pg_dir
346 mov x27, lr 340 mov x27, lr
347 341
348 /* 342 /*
@@ -371,12 +365,10 @@ __create_page_tables:
371 * Create the identity mapping. 365 * Create the identity mapping.
372 */ 366 */
373 mov x0, x25 // idmap_pg_dir 367 mov x0, x25 // idmap_pg_dir
374 ldr x3, =KERNEL_START 368 adrp x3, KERNEL_START // __pa(KERNEL_START)
375 add x3, x3, x28 // __pa(KERNEL_START)
376 create_pgd_entry x0, x3, x5, x6 369 create_pgd_entry x0, x3, x5, x6
377 ldr x6, =KERNEL_END
378 mov x5, x3 // __pa(KERNEL_START) 370 mov x5, x3 // __pa(KERNEL_START)
379 add x6, x6, x28 // __pa(KERNEL_END) 371 adr_l x6, KERNEL_END // __pa(KERNEL_END)
380 create_block_map x0, x7, x3, x5, x6 372 create_block_map x0, x7, x3, x5, x6
381 373
382 /* 374 /*
@@ -385,7 +377,7 @@ __create_page_tables:
385 mov x0, x26 // swapper_pg_dir 377 mov x0, x26 // swapper_pg_dir
386 mov x5, #PAGE_OFFSET 378 mov x5, #PAGE_OFFSET
387 create_pgd_entry x0, x5, x3, x6 379 create_pgd_entry x0, x5, x3, x6
388 ldr x6, =KERNEL_END 380 ldr x6, =KERNEL_END // __va(KERNEL_END)
389 mov x3, x24 // phys offset 381 mov x3, x24 // phys offset
390 create_block_map x0, x7, x3, x5, x6 382 create_block_map x0, x7, x3, x5, x6
391 383
@@ -537,8 +529,7 @@ ENDPROC(el2_setup)
537 * in x20. See arch/arm64/include/asm/virt.h for more info. 529 * in x20. See arch/arm64/include/asm/virt.h for more info.
538 */ 530 */
539ENTRY(set_cpu_boot_mode_flag) 531ENTRY(set_cpu_boot_mode_flag)
540 ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode 532 adr_l x1, __boot_cpu_mode
541 add x1, x1, x28
542 cmp w20, #BOOT_CPU_MODE_EL2 533 cmp w20, #BOOT_CPU_MODE_EL2
543 b.ne 1f 534 b.ne 1f
544 add x1, x1, #4 535 add x1, x1, #4
@@ -569,7 +560,6 @@ ENTRY(__boot_cpu_mode)
569 */ 560 */
570ENTRY(secondary_holding_pen) 561ENTRY(secondary_holding_pen)
571 bl el2_setup // Drop to EL1, w20=cpu_boot_mode 562 bl el2_setup // Drop to EL1, w20=cpu_boot_mode
572 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
573 bl set_cpu_boot_mode_flag 563 bl set_cpu_boot_mode_flag
574 mrs x0, mpidr_el1 564 mrs x0, mpidr_el1
575 ldr x1, =MPIDR_HWID_BITMASK 565 ldr x1, =MPIDR_HWID_BITMASK
@@ -588,7 +578,6 @@ ENDPROC(secondary_holding_pen)
588 */ 578 */
589ENTRY(secondary_entry) 579ENTRY(secondary_entry)
590 bl el2_setup // Drop to EL1 580 bl el2_setup // Drop to EL1
591 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
592 bl set_cpu_boot_mode_flag 581 bl set_cpu_boot_mode_flag
593 b secondary_startup 582 b secondary_startup
594ENDPROC(secondary_entry) 583ENDPROC(secondary_entry)
@@ -597,7 +586,8 @@ ENTRY(secondary_startup)
597 /* 586 /*
598 * Common entry point for secondary CPUs. 587 * Common entry point for secondary CPUs.
599 */ 588 */
600 pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1 589 adrp x25, idmap_pg_dir
590 adrp x26, swapper_pg_dir
601 bl __cpu_setup // initialise processor 591 bl __cpu_setup // initialise processor
602 592
603 ldr x21, =secondary_data 593 ldr x21, =secondary_data
@@ -631,18 +621,3 @@ __enable_mmu:
631 isb 621 isb
632 br x27 622 br x27
633ENDPROC(__enable_mmu) 623ENDPROC(__enable_mmu)
634
635/*
636 * Calculate the start of physical memory.
637 */
638__calc_phys_offset:
639 adr x0, 1f
640 ldp x1, x2, [x0]
641 sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET
642 add x24, x2, x28 // x24 = PHYS_OFFSET
643 ret
644ENDPROC(__calc_phys_offset)
645
646 .align 3
6471: .quad .
648 .quad PAGE_OFFSET