aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/head.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 15:31:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 15:31:53 -0400
commit5167d09ffad5b16b574d35ce3047ed34caf1e837 (patch)
treefc45dd9cbd578f5010e7b8208ecdfc6534547989 /arch/arm64/kernel/head.S
parent8533ce72718871fb528d853391746f36243273af (diff)
parentea1719672f59eeb85829073b567495c4f472ac9f (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "Once again, Catalin's off on holiday and I'm looking after the arm64 tree. Please can you pull the following arm64 updates for 3.17? Note that this branch also includes the new GICv3 driver (merged via a stable tag from Jason's irqchip tree), since there is a fix for older binutils on top. Changes include: - context tracking support (NO_HZ_FULL) which narrowly missed 3.16 - vDSO layout rework following Andy's work on x86 - TEXT_OFFSET fuzzing for bootloader testing - /proc/cpuinfo tidy-up - preliminary work to support 48-bit virtual addresses, but this is currently disabled until KVM has been ported to use it (the patches do, however, bring some nice clean-up) - boot-time CPU sanity checks (especially useful on heterogenous systems) - support for syscall auditing - support for CC_STACKPROTECTOR - defconfig updates" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (55 commits) arm64: add newline to I-cache policy string Revert "arm64: dmi: Add SMBIOS/DMI support" arm64: fpsimd: fix a typo in fpsimd_save_partial_state ENDPROC arm64: don't call break hooks for BRK exceptions from EL0 arm64: defconfig: enable devtmpfs mount option arm64: vdso: fix build error when switching from LE to BE arm64: defconfig: add virtio support for running as a kvm guest arm64: gicv3: Allow GICv3 compilation with older binutils arm64: fix soft lockup due to large tlb flush range arm64/crypto: fix makefile rule for aes-glue-%.o arm64: Do not invoke audit_syscall_* functions if !CONFIG_AUDIT_SYSCALL arm64: Fix barriers used for page table modifications arm64: Add support for 48-bit VA space with 64KB page configuration arm64: asm/pgtable.h pmd/pud definitions clean-up arm64: Determine the vmalloc/vmemmap space at build time based on VA_BITS arm64: Clean up the initial page table creation in head.S arm64: Remove asm/pgtable-*level-types.h files arm64: Remove asm/pgtable-*level-hwdef.h files arm64: Convert bool ARM64_x_LEVELS to int ARM64_PGTABLE_LEVELS arm64: mm: Implement 4 levels of translation tables ...
Diffstat (limited to 'arch/arm64/kernel/head.S')
-rw-r--r--arch/arm64/kernel/head.S121
1 files changed, 76 insertions, 45 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a2c1195abb7f..144f10567f82 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -22,6 +22,7 @@
22 22
23#include <linux/linkage.h> 23#include <linux/linkage.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/irqchip/arm-gic-v3.h>
25 26
26#include <asm/assembler.h> 27#include <asm/assembler.h>
27#include <asm/ptrace.h> 28#include <asm/ptrace.h>
@@ -35,37 +36,31 @@
35#include <asm/page.h> 36#include <asm/page.h>
36#include <asm/virt.h> 37#include <asm/virt.h>
37 38
38/*
39 * swapper_pg_dir is the virtual address of the initial page table. We place
40 * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
41 * 2 pages and is placed below swapper_pg_dir.
42 */
43#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 39#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
44 40
45#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000 41#if (TEXT_OFFSET & 0xf) != 0
46#error KERNEL_RAM_VADDR must start at 0xXXX80000 42#error TEXT_OFFSET must be at least 16B aligned
43#elif (PAGE_OFFSET & 0xfffff) != 0
44#error PAGE_OFFSET must be at least 2MB aligned
45#elif TEXT_OFFSET > 0xfffff
46#error TEXT_OFFSET must be less than 2MB
47#endif 47#endif
48 48
49#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) 49 .macro pgtbl, ttb0, ttb1, virt_to_phys
50#define IDMAP_DIR_SIZE (2 * PAGE_SIZE) 50 ldr \ttb1, =swapper_pg_dir
51 51 ldr \ttb0, =idmap_pg_dir
52 .globl swapper_pg_dir 52 add \ttb1, \ttb1, \virt_to_phys
53 .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE 53 add \ttb0, \ttb0, \virt_to_phys
54
55 .globl idmap_pg_dir
56 .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
57
58 .macro pgtbl, ttb0, ttb1, phys
59 add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
60 sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
61 .endm 54 .endm
62 55
63#ifdef CONFIG_ARM64_64K_PAGES 56#ifdef CONFIG_ARM64_64K_PAGES
64#define BLOCK_SHIFT PAGE_SHIFT 57#define BLOCK_SHIFT PAGE_SHIFT
65#define BLOCK_SIZE PAGE_SIZE 58#define BLOCK_SIZE PAGE_SIZE
59#define TABLE_SHIFT PMD_SHIFT
66#else 60#else
67#define BLOCK_SHIFT SECTION_SHIFT 61#define BLOCK_SHIFT SECTION_SHIFT
68#define BLOCK_SIZE SECTION_SIZE 62#define BLOCK_SIZE SECTION_SIZE
63#define TABLE_SHIFT PUD_SHIFT
69#endif 64#endif
70 65
71#define KERNEL_START KERNEL_RAM_VADDR 66#define KERNEL_START KERNEL_RAM_VADDR
@@ -120,9 +115,9 @@ efi_head:
120 b stext // branch to kernel start, magic 115 b stext // branch to kernel start, magic
121 .long 0 // reserved 116 .long 0 // reserved
122#endif 117#endif
123 .quad TEXT_OFFSET // Image load offset from start of RAM 118 .quad _kernel_offset_le // Image load offset from start of RAM, little-endian
124 .quad 0 // reserved 119 .quad _kernel_size_le // Effective size of kernel image, little-endian
125 .quad 0 // reserved 120 .quad _kernel_flags_le // Informative flags, little-endian
126 .quad 0 // reserved 121 .quad 0 // reserved
127 .quad 0 // reserved 122 .quad 0 // reserved
128 .quad 0 // reserved 123 .quad 0 // reserved
@@ -295,6 +290,23 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
295 msr cnthctl_el2, x0 290 msr cnthctl_el2, x0
296 msr cntvoff_el2, xzr // Clear virtual offset 291 msr cntvoff_el2, xzr // Clear virtual offset
297 292
293#ifdef CONFIG_ARM_GIC_V3
294 /* GICv3 system register access */
295 mrs x0, id_aa64pfr0_el1
296 ubfx x0, x0, #24, #4
297 cmp x0, #1
298 b.ne 3f
299
300 mrs_s x0, ICC_SRE_EL2
301 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
302 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
303 msr_s ICC_SRE_EL2, x0
304 isb // Make sure SRE is now set
305 msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
306
3073:
308#endif
309
298 /* Populate ID registers. */ 310 /* Populate ID registers. */
299 mrs x0, midr_el1 311 mrs x0, midr_el1
300 mrs x1, mpidr_el1 312 mrs x1, mpidr_el1
@@ -413,7 +425,7 @@ ENTRY(secondary_startup)
413 mov x23, x0 // x23=current cpu_table 425 mov x23, x0 // x23=current cpu_table
414 cbz x23, __error_p // invalid processor (x23=0)? 426 cbz x23, __error_p // invalid processor (x23=0)?
415 427
416 pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1 428 pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
417 ldr x12, [x23, #CPU_INFO_SETUP] 429 ldr x12, [x23, #CPU_INFO_SETUP]
418 add x12, x12, x28 // __virt_to_phys 430 add x12, x12, x28 // __virt_to_phys
419 blr x12 // initialise processor 431 blr x12 // initialise processor
@@ -455,8 +467,13 @@ ENDPROC(__enable_mmu)
455 * x27 = *virtual* address to jump to upon completion 467 * x27 = *virtual* address to jump to upon completion
456 * 468 *
457 * other registers depend on the function called upon completion 469 * other registers depend on the function called upon completion
470 *
471 * We align the entire function to the smallest power of two larger than it to
472 * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
473 * close to the end of a 512MB or 1GB block we might require an additional
474 * table to map the entire function.
458 */ 475 */
459 .align 6 476 .align 4
460__turn_mmu_on: 477__turn_mmu_on:
461 msr sctlr_el1, x0 478 msr sctlr_el1, x0
462 isb 479 isb
@@ -479,17 +496,38 @@ ENDPROC(__calc_phys_offset)
479 .quad PAGE_OFFSET 496 .quad PAGE_OFFSET
480 497
481/* 498/*
482 * Macro to populate the PGD for the corresponding block entry in the next 499 * Macro to create a table entry to the next page.
483 * level (tbl) for the given virtual address. 500 *
501 * tbl: page table address
502 * virt: virtual address
503 * shift: #imm page table shift
504 * ptrs: #imm pointers per table page
505 *
506 * Preserves: virt
507 * Corrupts: tmp1, tmp2
508 * Returns: tbl -> next level table page address
509 */
510 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
511 lsr \tmp1, \virt, #\shift
512 and \tmp1, \tmp1, #\ptrs - 1 // table index
513 add \tmp2, \tbl, #PAGE_SIZE
514 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
515 str \tmp2, [\tbl, \tmp1, lsl #3]
516 add \tbl, \tbl, #PAGE_SIZE // next level table page
517 .endm
518
519/*
520 * Macro to populate the PGD (and possibily PUD) for the corresponding
521 * block entry in the next level (tbl) for the given virtual address.
484 * 522 *
485 * Preserves: pgd, tbl, virt 523 * Preserves: tbl, next, virt
486 * Corrupts: tmp1, tmp2 524 * Corrupts: tmp1, tmp2
487 */ 525 */
488 .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2 526 .macro create_pgd_entry, tbl, virt, tmp1, tmp2
489 lsr \tmp1, \virt, #PGDIR_SHIFT 527 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
490 and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index 528#if SWAPPER_PGTABLE_LEVELS == 3
491 orr \tmp2, \tbl, #3 // PGD entry table type 529 create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
492 str \tmp2, [\pgd, \tmp1, lsl #3] 530#endif
493 .endm 531 .endm
494 532
495/* 533/*
@@ -522,7 +560,7 @@ ENDPROC(__calc_phys_offset)
522 * - pgd entry for fixed mappings (TTBR1) 560 * - pgd entry for fixed mappings (TTBR1)
523 */ 561 */
524__create_page_tables: 562__create_page_tables:
525 pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses 563 pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
526 mov x27, lr 564 mov x27, lr
527 565
528 /* 566 /*
@@ -550,10 +588,10 @@ __create_page_tables:
550 /* 588 /*
551 * Create the identity mapping. 589 * Create the identity mapping.
552 */ 590 */
553 add x0, x25, #PAGE_SIZE // section table address 591 mov x0, x25 // idmap_pg_dir
554 ldr x3, =KERNEL_START 592 ldr x3, =KERNEL_START
555 add x3, x3, x28 // __pa(KERNEL_START) 593 add x3, x3, x28 // __pa(KERNEL_START)
556 create_pgd_entry x25, x0, x3, x5, x6 594 create_pgd_entry x0, x3, x5, x6
557 ldr x6, =KERNEL_END 595 ldr x6, =KERNEL_END
558 mov x5, x3 // __pa(KERNEL_START) 596 mov x5, x3 // __pa(KERNEL_START)
559 add x6, x6, x28 // __pa(KERNEL_END) 597 add x6, x6, x28 // __pa(KERNEL_END)
@@ -562,9 +600,9 @@ __create_page_tables:
562 /* 600 /*
563 * Map the kernel image (starting with PHYS_OFFSET). 601 * Map the kernel image (starting with PHYS_OFFSET).
564 */ 602 */
565 add x0, x26, #PAGE_SIZE // section table address 603 mov x0, x26 // swapper_pg_dir
566 mov x5, #PAGE_OFFSET 604 mov x5, #PAGE_OFFSET
567 create_pgd_entry x26, x0, x5, x3, x6 605 create_pgd_entry x0, x5, x3, x6
568 ldr x6, =KERNEL_END 606 ldr x6, =KERNEL_END
569 mov x3, x24 // phys offset 607 mov x3, x24 // phys offset
570 create_block_map x0, x7, x3, x5, x6 608 create_block_map x0, x7, x3, x5, x6
@@ -586,13 +624,6 @@ __create_page_tables:
586 create_block_map x0, x7, x3, x5, x6 624 create_block_map x0, x7, x3, x5, x6
5871: 6251:
588 /* 626 /*
589 * Create the pgd entry for the fixed mappings.
590 */
591 ldr x5, =FIXADDR_TOP // Fixed mapping virtual address
592 add x0, x26, #2 * PAGE_SIZE // section table address
593 create_pgd_entry x26, x0, x5, x6, x7
594
595 /*
596 * Since the page tables have been populated with non-cacheable 627 * Since the page tables have been populated with non-cacheable
597 * accesses (MMU disabled), invalidate the idmap and swapper page 628 * accesses (MMU disabled), invalidate the idmap and swapper page
598 * tables again to remove any speculatively loaded cache lines. 629 * tables again to remove any speculatively loaded cache lines.
@@ -611,7 +642,7 @@ ENDPROC(__create_page_tables)
611__switch_data: 642__switch_data:
612 .quad __mmap_switched 643 .quad __mmap_switched
613 .quad __bss_start // x6 644 .quad __bss_start // x6
614 .quad _end // x7 645 .quad __bss_stop // x7
615 .quad processor_id // x4 646 .quad processor_id // x4
616 .quad __fdt_pointer // x5 647 .quad __fdt_pointer // x5
617 .quad memstart_addr // x6 648 .quad memstart_addr // x6