aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/setup.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 23:03:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 23:03:47 -0400
commit588ab3f9afdfa1a6b1e5761c858b2c4ab6098285 (patch)
treec9aa4c4f8a63d25c3cf05330c68948dceec79cc2 /arch/arm64/kernel/setup.c
parent3d15cfdb1b77536c205d8e49c0312219ddf162ec (diff)
parent2776e0e8ef683a42fe3e9a5facf576b73579700e (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: "Here are the main arm64 updates for 4.6. There are some relatively intrusive changes to support KASLR, the reworking of the kernel virtual memory layout and initial page table creation. Summary: - Initial page table creation reworked to avoid breaking large block mappings (huge pages) into smaller ones. The ARM architecture requires break-before-make in such cases to avoid TLB conflicts but that's not always possible on live page tables - Kernel virtual memory layout: the kernel image is no longer linked to the bottom of the linear mapping (PAGE_OFFSET) but at the bottom of the vmalloc space, allowing the kernel to be loaded (nearly) anywhere in physical RAM - Kernel ASLR: position independent kernel Image and modules being randomly mapped in the vmalloc space with the randomness is provided by UEFI (efi_get_random_bytes() patches merged via the arm64 tree, acked by Matt Fleming) - Implement relative exception tables for arm64, required by KASLR (initial code for ARCH_HAS_RELATIVE_EXTABLE added to lib/extable.c but actual x86 conversion to deferred to 4.7 because of the merge dependencies) - Support for the User Access Override feature of ARMv8.2: this allows uaccess functions (get_user etc.) to be implemented using LDTR/STTR instructions. Such instructions, when run by the kernel, perform unprivileged accesses adding an extra level of protection. The set_fs() macro is used to "upgrade" such instruction to privileged accesses via the UAO bit - Half-precision floating point support (part of ARMv8.2) - Optimisations for CPUs with or without a hardware prefetcher (using run-time code patching) - copy_page performance improvement to deal with 128 bytes at a time - Sanity checks on the CPU capabilities (via CPUID) to prevent incompatible secondary CPUs from being brought up (e.g. weird big.LITTLE configurations) - valid_user_regs() reworked for better sanity check of the sigcontext information (restored pstate information) - ACPI parking protocol implementation - CONFIG_DEBUG_RODATA enabled by default - VDSO code marked as read-only - DEBUG_PAGEALLOC support - ARCH_HAS_UBSAN_SANITIZE_ALL enabled - Erratum workaround Cavium ThunderX SoC - set_pte_at() fix for PROT_NONE mappings - Code clean-ups" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (99 commits) arm64: kasan: Fix zero shadow mapping overriding kernel image shadow arm64: kasan: Use actual memory node when populating the kernel image shadow arm64: Update PTE_RDONLY in set_pte_at() for PROT_NONE permission arm64: Fix misspellings in comments. arm64: efi: add missing frame pointer assignment arm64: make mrs_s prefixing implicit in read_cpuid arm64: enable CONFIG_DEBUG_RODATA by default arm64: Rework valid_user_regs arm64: mm: check at build time that PAGE_OFFSET divides the VA space evenly arm64: KVM: Move kvm_call_hyp back to its original localtion arm64: mm: treat memstart_addr as a signed quantity arm64: mm: list kernel sections in order arm64: lse: deal with clobbered IP registers after branch via PLT arm64: mm: dump: Use VA_START directly instead of private LOWEST_ADDR arm64: kconfig: add submenu for 8.2 architectural features arm64: kernel: acpi: fix ioremap in ACPI parking protocol cpu_postboot arm64: Add support for Half precision floating point arm64: Remove fixmap include fragility arm64: Add workaround for Cavium erratum 27456 arm64: mm: Mark .rodata as RO ...
Diffstat (limited to 'arch/arm64/kernel/setup.c')
-rw-r--r--arch/arm64/kernel/setup.c36
1 files changed, 36 insertions, 0 deletions
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 450987d99b9b..9dc67769b6a4 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -62,6 +62,7 @@
62#include <asm/memblock.h> 62#include <asm/memblock.h>
63#include <asm/efi.h> 63#include <asm/efi.h>
64#include <asm/xen/hypervisor.h> 64#include <asm/xen/hypervisor.h>
65#include <asm/mmu_context.h>
65 66
66phys_addr_t __fdt_pointer __initdata; 67phys_addr_t __fdt_pointer __initdata;
67 68
@@ -313,6 +314,12 @@ void __init setup_arch(char **cmdline_p)
313 */ 314 */
314 local_async_enable(); 315 local_async_enable();
315 316
317 /*
318 * TTBR0 is only used for the identity mapping at this stage. Make it
319 * point to zero page to avoid speculatively fetching new entries.
320 */
321 cpu_uninstall_idmap();
322
316 efi_init(); 323 efi_init();
317 arm64_memblock_init(); 324 arm64_memblock_init();
318 325
@@ -381,3 +388,32 @@ static int __init topology_init(void)
381 return 0; 388 return 0;
382} 389}
383subsys_initcall(topology_init); 390subsys_initcall(topology_init);
391
392/*
393 * Dump out kernel offset information on panic.
394 */
395static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
396 void *p)
397{
398 u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR;
399
400 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) {
401 pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n",
402 kaslr_offset, KIMAGE_VADDR);
403 } else {
404 pr_emerg("Kernel Offset: disabled\n");
405 }
406 return 0;
407}
408
409static struct notifier_block kernel_offset_notifier = {
410 .notifier_call = dump_kernel_offset
411};
412
413static int __init register_kernel_offset_dumper(void)
414{
415 atomic_notifier_chain_register(&panic_notifier_list,
416 &kernel_offset_notifier);
417 return 0;
418}
419__initcall(register_kernel_offset_dumper);