From 021f653791ad17e03f98aaa7fb933816ae16f161 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 30 Jun 2014 16:01:31 +0100 Subject: irqchip: gic-v3: Initial support for GICv3 The Generic Interrupt Controller (version 3) offers services that are similar to GICv2, with a number of additional features: - Affinity routing based on the CPU MPIDR (ARE) - System register for the CPU interfaces (SRE) - Support for more that 8 CPUs - Locality-specific Peripheral Interrupts (LPIs) - Interrupt Translation Services (ITS) This patch adds preliminary support for GICv3 with ARE and SRE, non-secure mode only. It relies on higher exception levels to grant ARE and SRE access. Support for LPI and ITS will be added at a later time. Cc: Thomas Gleixner Cc: Jason Cooper Reviewed-by: Zi Shen Lim Reviewed-by: Christoffer Dall Reviewed-by: Tirumalesh Chalamarla Reviewed-by: Yun Wu Reviewed-by: Zhen Lei Tested-by: Tirumalesh Chalamarla Tested-by: Radha Mohan Chintakuntla Acked-by: Radha Mohan Chintakuntla Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier Reviewed-by: Mark Rutland Link: https://lkml.kernel.org/r/1404140510-5382-3-git-send-email-marc.zyngier@arm.com Signed-off-by: Jason Cooper --- arch/arm64/kernel/head.S | 18 ++++++++++++++++++ arch/arm64/kernel/hyp-stub.S | 1 + 2 files changed, 19 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a96d3a6a63f6..96623502519c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -22,6 +22,7 @@ #include #include +#include #include #include @@ -296,6 +297,23 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 msr cnthctl_el2, x0 msr cntvoff_el2, xzr // Clear virtual offset +#ifdef CONFIG_ARM_GIC_V3 + /* GICv3 system register access */ + mrs x0, id_aa64pfr0_el1 + ubfx x0, x0, #24, #4 + cmp x0, #1 + b.ne 3f + + mrs x0, ICC_SRE_EL2 + orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 + orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 + msr ICC_SRE_EL2, x0 + isb // Make sure SRE is now set + msr ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults + +3: +#endif + /* Populate ID registers. */ mrs x0, midr_el1 mrs x1, mpidr_el1 diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 0959611d9ff1..a272f335c289 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -19,6 +19,7 @@ #include #include +#include #include #include -- cgit v1.2.2 From 4e6f7084096c08e37f909d7b075a91b72580405f Mon Sep 17 00:00:00 2001 From: Zi Shen Lim Date: Sat, 7 Jun 2014 01:55:27 +0100 Subject: arm64: topology: add MPIDR-based detection Create cpu topology based on MPIDR. When hardware sets MPIDR to sane values, this method will always work. Therefore it should also work well as the fallback method. [1] When we have multiple processing elements in the system, we create the cpu topology by mapping each affinity level (from lowest to highest) to threads (if they exist), cores, and clusters. [1] http://www.spinics.net/lists/arm-kernel/msg317445.html Acked-by: Lorenzo Pieralisi Signed-off-by: Zi Shen Lim Signed-off-by: Mark Brown Signed-off-by: Catalin Marinas --- arch/arm64/kernel/topology.c | 47 +++++++++++++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 14 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 43514f905916..b6ee26b0939a 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -20,6 +20,7 @@ #include #include +#include #include static int __init get_cpu_for_node(struct device_node *node) @@ -188,13 +189,9 @@ static int __init parse_dt_topology(void) * Check that all cores are in the topology; the SMP code will * only mark cores described in the DT as possible. */ - for_each_possible_cpu(cpu) { - if (cpu_topology[cpu].cluster_id == -1) { - pr_err("CPU%d: No topology information specified\n", - cpu); + for_each_possible_cpu(cpu) + if (cpu_topology[cpu].cluster_id == -1) ret = -EINVAL; - } - } out_map: of_node_put(map); @@ -219,14 +216,6 @@ static void update_siblings_masks(unsigned int cpuid) struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; int cpu; - if (cpuid_topo->cluster_id == -1) { - /* - * DT does not contain topology information for this cpu. - */ - pr_debug("CPU%u: No topology information configured\n", cpuid); - return; - } - /* update core and thread sibling masks */ for_each_possible_cpu(cpu) { cpu_topo = &cpu_topology[cpu]; @@ -249,6 +238,36 @@ static void update_siblings_masks(unsigned int cpuid) void store_cpu_topology(unsigned int cpuid) { + struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; + u64 mpidr; + + if (cpuid_topo->cluster_id != -1) + goto topology_populated; + + mpidr = read_cpuid_mpidr(); + + /* Uniprocessor systems can rely on default topology values */ + if (mpidr & MPIDR_UP_BITMASK) + return; + + /* Create cpu topology mapping based on MPIDR. */ + if (mpidr & MPIDR_MT_BITMASK) { + /* Multiprocessor system : Multi-threads per core */ + cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); + } else { + /* Multiprocessor system : Single-thread per core */ + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + } + + pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", + cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id, + cpuid_topo->thread_id, mpidr); + +topology_populated: update_siblings_masks(cpuid); } -- cgit v1.2.2 From c0c264ae5112d1cdb7d37d4e208b7a7e766a7418 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 25 Jun 2014 23:55:03 +0100 Subject: arm64: Add CONFIG_CC_STACKPROTECTOR arm64 currently lacks support for -fstack-protector. Add similar functionality to arm to detect stack corruption. Acked-by: Will Deacon Acked-by: Kees Cook Signed-off-by: Laura Abbott Signed-off-by: Catalin Marinas --- arch/arm64/kernel/process.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 43b7c34f92cb..1309d64aa926 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -51,6 +51,12 @@ #include #include +#ifdef CONFIG_CC_STACKPROTECTOR +#include +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + static void setup_restart(void) { /* -- cgit v1.2.2 From 6ab6463aeb5fbc75fa3227befb508fc33b34dbf1 Mon Sep 17 00:00:00 2001 From: Larry Bassel Date: Fri, 30 May 2014 20:34:14 +0100 Subject: arm64: adjust el0_sync so that a function can be called To implement the context tracker properly on arm64, a function call needs to be made after debugging and interrupts are turned on, but before the lr is changed to point to ret_to_user(). If the function call is made after the lr is changed the function will not return to the correct place. For similar reasons, defer the setting of x0 so that it doesn't need to be saved around the function call (save far_el1 in x26 temporarily instead). Acked-by: Will Deacon Reviewed-by: Kevin Hilman Tested-by: Kevin Hilman Signed-off-by: Larry Bassel Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9ce04ba6bcb0..d7230bf68ad1 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -353,7 +353,6 @@ el0_sync: lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state b.eq el0_svc - adr lr, ret_to_user cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 b.eq el0_da cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 @@ -382,7 +381,6 @@ el0_sync_compat: lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state b.eq el0_svc_compat - adr lr, ret_to_user cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 b.eq el0_da cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 @@ -425,22 +423,25 @@ el0_da: /* * Data abort handling */ - mrs x0, far_el1 - bic x0, x0, #(0xff << 56) + mrs x26, far_el1 // enable interrupts before calling the main handler enable_dbg_and_irq + bic x0, x26, #(0xff << 56) mov x1, x25 mov x2, sp + adr lr, ret_to_user b do_mem_abort el0_ia: /* * Instruction abort handling */ - mrs x0, far_el1 + mrs x26, far_el1 // enable interrupts before calling the main handler enable_dbg_and_irq + mov x0, x26 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts mov x2, sp + adr lr, ret_to_user b do_mem_abort el0_fpsimd_acc: /* @@ -449,6 +450,7 @@ el0_fpsimd_acc: enable_dbg mov x0, x25 mov x1, sp + adr lr, ret_to_user b do_fpsimd_acc el0_fpsimd_exc: /* @@ -457,16 +459,19 @@ el0_fpsimd_exc: enable_dbg mov x0, x25 mov x1, sp + adr lr, ret_to_user b do_fpsimd_exc el0_sp_pc: /* * Stack or PC alignment exception handling */ - mrs x0, far_el1 + mrs x26, far_el1 // enable interrupts before calling the main handler enable_dbg_and_irq + mov x0, x26 mov x1, x25 mov x2, sp + adr lr, ret_to_user b do_sp_pc_abort el0_undef: /* @@ -475,6 +480,7 @@ el0_undef: // enable interrupts before calling the main handler enable_dbg_and_irq mov x0, sp + adr lr, ret_to_user b do_undefinstr el0_dbg: /* @@ -492,6 +498,7 @@ el0_inv: mov x0, sp mov x1, #BAD_SYNC mrs x2, esr_el1 + adr lr, ret_to_user b bad_mode ENDPROC(el0_sync) -- cgit v1.2.2 From 6c81fe7925cc4c42de49e17be21eb86d1173c3a7 Mon Sep 17 00:00:00 2001 From: Larry Bassel Date: Fri, 30 May 2014 12:34:15 -0700 Subject: arm64: enable context tracking Make calls to ct_user_enter when the kernel is exited and ct_user_exit when the kernel is entered (in el0_da, el0_ia, el0_svc, el0_irq and all of the "error" paths). These macros expand to function calls which will only work properly if el0_sync and related code has been rearranged (in a previous patch of this series). The calls to ct_user_exit are made after hw debugging has been enabled (enable_dbg_and_irq). The call to ct_user_enter is made at the beginning of the kernel_exit macro. This patch is based on earlier work by Kevin Hilman. Save/restore optimizations were also done by Kevin. Acked-by: Will Deacon Reviewed-by: Kevin Hilman Tested-by: Kevin Hilman Signed-off-by: Larry Bassel Signed-off-by: Kevin Hilman Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index d7230bf68ad1..93ac58dbf07c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -29,6 +29,32 @@ #include #include +/* + * Context tracking subsystem. Used to instrument transitions + * between user and kernel mode. + */ + .macro ct_user_exit, syscall = 0 +#ifdef CONFIG_CONTEXT_TRACKING + bl context_tracking_user_exit + .if \syscall == 1 + /* + * Save/restore needed during syscalls. Restore syscall arguments from + * the values already saved on stack during kernel_entry. + */ + ldp x0, x1, [sp] + ldp x2, x3, [sp, #S_X2] + ldp x4, x5, [sp, #S_X4] + ldp x6, x7, [sp, #S_X6] + .endif +#endif + .endm + + .macro ct_user_enter +#ifdef CONFIG_CONTEXT_TRACKING + bl context_tracking_user_enter +#endif + .endm + /* * Bad Abort numbers *----------------- @@ -91,6 +117,7 @@ .macro kernel_exit, el, ret = 0 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR .if \el == 0 + ct_user_enter ldr x23, [sp, #S_SP] // load return stack pointer .endif .if \ret @@ -426,6 +453,7 @@ el0_da: mrs x26, far_el1 // enable interrupts before calling the main handler enable_dbg_and_irq + ct_user_exit bic x0, x26, #(0xff << 56) mov x1, x25 mov x2, sp @@ -438,6 +466,7 @@ el0_ia: mrs x26, far_el1 // enable interrupts before calling the main handler enable_dbg_and_irq + ct_user_exit mov x0, x26 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts mov x2, sp @@ -448,6 +477,7 @@ el0_fpsimd_acc: * Floating Point or Advanced SIMD access */ enable_dbg + ct_user_exit mov x0, x25 mov x1, sp adr lr, ret_to_user @@ -457,6 +487,7 @@ el0_fpsimd_exc: * Floating Point or Advanced SIMD exception */ enable_dbg + ct_user_exit mov x0, x25 mov x1, sp adr lr, ret_to_user @@ -479,6 +510,7 @@ el0_undef: */ // enable interrupts before calling the main handler enable_dbg_and_irq + ct_user_exit mov x0, sp adr lr, ret_to_user b do_undefinstr @@ -492,9 +524,11 @@ el0_dbg: mov x2, sp bl do_debug_exception enable_dbg + ct_user_exit b ret_to_user el0_inv: enable_dbg + ct_user_exit mov x0, sp mov x1, #BAD_SYNC mrs x2, esr_el1 @@ -511,6 +545,7 @@ el0_irq_naked: bl trace_hardirqs_off #endif + ct_user_exit irq_handler #ifdef CONFIG_TRACE_IRQFLAGS @@ -615,6 +650,7 @@ el0_svc: el0_svc_naked: // compat entry point stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number enable_dbg_and_irq + ct_user_exit 1 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks tst x16, #_TIF_SYSCALL_WORK -- cgit v1.2.2 From f3e5c847ec3d12b4de7898662024ee25622b25d7 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 30 Jan 2014 17:56:56 +0000 Subject: arm64: Add __NR_* definitions for compat syscalls This patch adds __NR_* definitions to asm/unistd32.h, moves the __NR_compat_* definitions to asm/unistd.h and removes all the explicit unistd32.h includes apart from the one building the compat syscall table. The aim is to have the compat __NR_* definitions available but without colliding with the native syscall definitions (required by lib/compat_audit.c to avoid duplicating the audit header files between native and compat). Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 1 - arch/arm64/kernel/kuser32.S | 2 +- arch/arm64/kernel/signal32.c | 2 +- arch/arm64/kernel/sys_compat.c | 2 +- 4 files changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 93ac58dbf07c..f0b5e5120a87 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -27,7 +27,6 @@ #include #include #include -#include /* * Context tracking subsystem. Used to instrument transitions diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S index 7787208e8cc6..997e6b27ff6a 100644 --- a/arch/arm64/kernel/kuser32.S +++ b/arch/arm64/kernel/kuser32.S @@ -28,7 +28,7 @@ * See Documentation/arm/kernel_user_helpers.txt for formal definitions. */ -#include +#include .align 5 .globl __kuser_helper_start diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 3491c638f172..c5ee208321c3 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include struct compat_sigcontext { /* We always set these two fields to 0 */ diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 26e9c4eeaba8..de2b0226e06d 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -26,7 +26,7 @@ #include #include -#include +#include static inline void do_compat_cache_op(unsigned long start, unsigned long end, int flags) -- cgit v1.2.2 From 5701ede884c2221e6ebbb54aec83dc433287bc50 Mon Sep 17 00:00:00 2001 From: AKASHI Takahiro Date: Fri, 4 Jul 2014 08:28:31 +0100 Subject: arm64: audit: Add audit hook in syscall_trace_enter/exit() This patch adds auditing functions on entry to or exit from every system call invocation. Acked-by: Richard Guy Briggs Acked-by Will Deacon Signed-off-by: AKASHI Takahiro Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9fde010c945f..70526cfda056 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -19,6 +19,7 @@ * along with this program. If not, see . */ +#include #include #include #include @@ -39,6 +40,7 @@ #include #include #include +#include #include #include @@ -1113,11 +1115,16 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, regs->syscallno); + audit_syscall_entry(syscall_get_arch(), regs->syscallno, + regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]); + return regs->syscallno; } asmlinkage void syscall_trace_exit(struct pt_regs *regs) { + audit_syscall_exit(regs); + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_exit(regs, regs_return_value(regs)); -- cgit v1.2.2 From 909a4069da65a5cfca8c968edf9f0d99f694d2f3 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 24 Jun 2014 16:51:34 +0100 Subject: arm64: head.S: remove unnecessary function alignment Currently __turn_mmu_on is aligned to 64 bytes to ensure that it doesn't span any page boundary, which simplifies the idmap and spares us requiring an additional page table to map half of the function. In keeping with other important requirements in architecture code, this fact is undocumented. Additionally, as the function consists of three instructions totalling 12 bytes with no literal pool data, a smaller alignment of 16 bytes would be sufficient. This patch reduces the alignment to 16 bytes and documents the underlying reason for the alignment. This reduces the required alignment of the entire .head.text section from 64 bytes to 16 bytes, though it may still be aligned to a larger value depending on TEXT_OFFSET. Signed-off-by: Mark Rutland Tested-by: Laura Abbott Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a2c1195abb7f..6dfb21e6e105 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -455,8 +455,13 @@ ENDPROC(__enable_mmu) * x27 = *virtual* address to jump to upon completion * * other registers depend on the function called upon completion + * + * We align the entire function to the smallest power of two larger than it to + * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET + * close to the end of a 512MB or 1GB block we might require an additional + * table to map the entire function. */ - .align 6 + .align 4 __turn_mmu_on: msr sctlr_el1, x0 isb -- cgit v1.2.2 From bd00cd5f8c8c3c282bb1e1eac6a6679a4f808091 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 24 Jun 2014 16:51:35 +0100 Subject: arm64: place initial page tables above the kernel Currently we place swapper_pg_dir and idmap_pg_dir below the kernel image, between PHYS_OFFSET and (PHYS_OFFSET + TEXT_OFFSET). However, bootloaders may use portions of this memory below the kernel and we do not parse the memory reservation list until after the MMU has been enabled. As such we may clobber some memory a bootloader wishes to have preserved. To enable the use of all of this memory by bootloaders (when the required memory reservations are communicated to the kernel) it is necessary to move our initial page tables elsewhere. As we currently have an effectively unbound requirement for memory at the end of the kernel image for .bss, we can place the page tables here. This patch moves the initial page table to the end of the kernel image, after the BSS. As they do not consist of any initialised data they will be stripped from the kernel Image as with the BSS. The BSS clearing routine is updated to stop at __bss_stop rather than _end so as to not clobber the page tables, and memory reservations made redundant by the new organisation are removed. Signed-off-by: Mark Rutland Tested-by: Laura Abbott Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 28 ++++++++-------------------- arch/arm64/kernel/vmlinux.lds.S | 7 +++++++ 2 files changed, 15 insertions(+), 20 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 6dfb21e6e105..15d3c02a38d0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -35,29 +35,17 @@ #include #include -/* - * swapper_pg_dir is the virtual address of the initial page table. We place - * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has - * 2 pages and is placed below swapper_pg_dir. - */ #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) #if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000 #error KERNEL_RAM_VADDR must start at 0xXXX80000 #endif -#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) -#define IDMAP_DIR_SIZE (2 * PAGE_SIZE) - - .globl swapper_pg_dir - .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE - - .globl idmap_pg_dir - .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE - - .macro pgtbl, ttb0, ttb1, phys - add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE - sub \ttb0, \ttb1, #IDMAP_DIR_SIZE + .macro pgtbl, ttb0, ttb1, virt_to_phys + ldr \ttb1, =swapper_pg_dir + ldr \ttb0, =idmap_pg_dir + add \ttb1, \ttb1, \virt_to_phys + add \ttb0, \ttb0, \virt_to_phys .endm #ifdef CONFIG_ARM64_64K_PAGES @@ -413,7 +401,7 @@ ENTRY(secondary_startup) mov x23, x0 // x23=current cpu_table cbz x23, __error_p // invalid processor (x23=0)? - pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1 + pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1 ldr x12, [x23, #CPU_INFO_SETUP] add x12, x12, x28 // __virt_to_phys blr x12 // initialise processor @@ -527,7 +515,7 @@ ENDPROC(__calc_phys_offset) * - pgd entry for fixed mappings (TTBR1) */ __create_page_tables: - pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses + pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses mov x27, lr /* @@ -616,7 +604,7 @@ ENDPROC(__create_page_tables) __switch_data: .quad __mmap_switched .quad __bss_start // x6 - .quad _end // x7 + .quad __bss_stop // x7 .quad processor_id // x4 .quad __fdt_pointer // x5 .quad memstart_addr // x6 diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index f1e6d5c032e1..c6648d301adf 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -104,6 +104,13 @@ SECTIONS _edata = .; BSS_SECTION(0, 0, 0) + + . = ALIGN(PAGE_SIZE); + idmap_pg_dir = .; + . += IDMAP_DIR_SIZE; + swapper_pg_dir = .; + . += SWAPPER_DIR_SIZE; + _end = .; STABS_DEBUG -- cgit v1.2.2 From a2c1d73b94ed49f5fac12e95052d7b140783f800 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 24 Jun 2014 16:51:36 +0100 Subject: arm64: Update the Image header Currently the kernel Image is stripped of everything past the initial stack, and at runtime the memory is initialised and used by the kernel. This makes the effective minimum memory footprint of the kernel larger than the size of the loaded binary, though bootloaders have no mechanism to identify how large this minimum memory footprint is. This makes it difficult to choose safe locations to place both the kernel and other binaries required at boot (DTB, initrd, etc), such that the kernel won't clobber said binaries or other reserved memory during initialisation. Additionally when big endian support was added the image load offset was overlooked, and is currently of an arbitrary endianness, which makes it difficult for bootloaders to make use of it. It seems that bootloaders aren't respecting the image load offset at present anyway, and are assuming that offset 0x80000 will always be correct. This patch adds an effective image size to the kernel header which describes the amount of memory from the start of the kernel Image binary which the kernel expects to use before detecting memory and handling any memory reservations. This can be used by bootloaders to choose suitable locations to load the kernel and/or other binaries such that the kernel will not clobber any memory unexpectedly. As before, memory reservations are required to prevent the kernel from clobbering these locations later. Both the image load offset and the effective image size are forced to be little-endian regardless of the native endianness of the kernel to enable bootloaders to load a kernel of arbitrary endianness. Bootloaders which wish to make use of the load offset can inspect the effective image size field for a non-zero value to determine if the offset is of a known endianness. To enable software to determine the endinanness of the kernel as may be required for certain use-cases, a new flags field (also little-endian) is added to the kernel header to export this information. The documentation is updated to clarify these details. To discourage future assumptions regarding the value of text_offset, the value at this point in time is removed from the main flow of the documentation (though kept as a compatibility note). Some minor formatting issues in the documentation are also corrected. Signed-off-by: Mark Rutland Acked-by: Tom Rini Cc: Geoff Levand Cc: Kevin Hilman Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 6 ++-- arch/arm64/kernel/image.h | 62 +++++++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/vmlinux.lds.S | 4 +++ 3 files changed, 69 insertions(+), 3 deletions(-) create mode 100644 arch/arm64/kernel/image.h (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 15d3c02a38d0..3ba0fc02c0de 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -108,9 +108,9 @@ efi_head: b stext // branch to kernel start, magic .long 0 // reserved #endif - .quad TEXT_OFFSET // Image load offset from start of RAM - .quad 0 // reserved - .quad 0 // reserved + .quad _kernel_offset_le // Image load offset from start of RAM, little-endian + .quad _kernel_size_le // Effective size of kernel image, little-endian + .quad _kernel_flags_le // Informative flags, little-endian .quad 0 // reserved .quad 0 // reserved .quad 0 // reserved diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h new file mode 100644 index 000000000000..8fae0756e175 --- /dev/null +++ b/arch/arm64/kernel/image.h @@ -0,0 +1,62 @@ +/* + * Linker script macros to generate Image header fields. + * + * Copyright (C) 2014 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __ASM_IMAGE_H +#define __ASM_IMAGE_H + +#ifndef LINKER_SCRIPT +#error This file should only be included in vmlinux.lds.S +#endif + +/* + * There aren't any ELF relocations we can use to endian-swap values known only + * at link time (e.g. the subtraction of two symbol addresses), so we must get + * the linker to endian-swap certain values before emitting them. + */ +#ifdef CONFIG_CPU_BIG_ENDIAN +#define DATA_LE64(data) \ + ((((data) & 0x00000000000000ff) << 56) | \ + (((data) & 0x000000000000ff00) << 40) | \ + (((data) & 0x0000000000ff0000) << 24) | \ + (((data) & 0x00000000ff000000) << 8) | \ + (((data) & 0x000000ff00000000) >> 8) | \ + (((data) & 0x0000ff0000000000) >> 24) | \ + (((data) & 0x00ff000000000000) >> 40) | \ + (((data) & 0xff00000000000000) >> 56)) +#else +#define DATA_LE64(data) ((data) & 0xffffffffffffffff) +#endif + +#ifdef CONFIG_CPU_BIG_ENDIAN +#define __HEAD_FLAG_BE 1 +#else +#define __HEAD_FLAG_BE 0 +#endif + +#define __HEAD_FLAGS (__HEAD_FLAG_BE << 0) + +/* + * These will output as part of the Image header, which should be little-endian + * regardless of the endianness of the kernel. While constant values could be + * endian swapped in head.S, all are done here for consistency. + */ +#define HEAD_SYMBOLS \ + _kernel_size_le = DATA_LE64(_end - _text); \ + _kernel_offset_le = DATA_LE64(TEXT_OFFSET); \ + _kernel_flags_le = DATA_LE64(__HEAD_FLAGS); + +#endif /* __ASM_IMAGE_H */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index c6648d301adf..a814768ae148 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -9,6 +9,8 @@ #include #include +#include "image.h" + #define ARM_EXIT_KEEP(x) #define ARM_EXIT_DISCARD(x) x @@ -114,6 +116,8 @@ SECTIONS _end = .; STABS_DEBUG + + HEAD_SYMBOLS } /* -- cgit v1.2.2 From da57a369d3bc5cd61db90f7e9555840381db9b09 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 24 Jun 2014 16:51:37 +0100 Subject: arm64: Enable TEXT_OFFSET fuzzing The arm64 Image header contains a text_offset field which bootloaders are supposed to read to determine the offset (from a 2MB aligned "start of memory" per booting.txt) at which to load the kernel. The offset is not well respected by bootloaders at present, and due to the lack of variation there is little incentive to support it. This is unfortunate for the sake of future kernels where we may wish to vary the text offset (even zeroing it). This patch adds options to arm64 to enable fuzz-testing of text_offset. CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET forces the text offset to a random 16-byte aligned value value in the range [0..2MB) upon a build of the kernel. It is recommended that distribution kernels enable randomization to test bootloaders such that any compliance issues can be fixed early. Signed-off-by: Mark Rutland Acked-by: Tom Rini Acked-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/head.S | 8 ++++++-- arch/arm64/kernel/vmlinux.lds.S | 5 +++++ 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 3ba0fc02c0de..69dafe9621fd 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -37,8 +37,12 @@ #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) -#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000 -#error KERNEL_RAM_VADDR must start at 0xXXX80000 +#if (TEXT_OFFSET & 0xf) != 0 +#error TEXT_OFFSET must be at least 16B aligned +#elif (PAGE_OFFSET & 0xfffff) != 0 +#error PAGE_OFFSET must be at least 2MB aligned +#elif TEXT_OFFSET > 0xfffff +#error TEXT_OFFSET must be less than 2MB #endif .macro pgtbl, ttb0, ttb1, virt_to_phys diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index a814768ae148..97f0c0429dfa 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -125,3 +125,8 @@ SECTIONS */ ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), "HYP init code too big") + +/* + * If padding is applied before .head.text, virt<->phys conversions will fail. + */ +ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned") -- cgit v1.2.2 From ac7b406c1a9d50ddbf5e5cbce8ca4d68d36ac2db Mon Sep 17 00:00:00 2001 From: Jungseok Lee Date: Mon, 12 May 2014 10:40:30 +0100 Subject: arm64: Use pr_* instead of printk This patch fixed the following checkpatch complaint as using pr_* instead of printk. WARNING: printk() should include KERN_ facility level Signed-off-by: Jungseok Lee Reviewed-by: Sungjinn Chung Acked-by: Kukjin Kim Signed-off-by: Catalin Marinas --- arch/arm64/kernel/traps.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c43cfa9b8304..506f7814e305 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -156,7 +156,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.pc = thread_saved_pc(tsk); } - printk("Call trace:\n"); + pr_emerg("Call trace:\n"); while (1) { unsigned long where = frame.pc; int ret; @@ -331,17 +331,17 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) void __pte_error(const char *file, int line, unsigned long val) { - printk("%s:%d: bad pte %016lx.\n", file, line, val); + pr_crit("%s:%d: bad pte %016lx.\n", file, line, val); } void __pmd_error(const char *file, int line, unsigned long val) { - printk("%s:%d: bad pmd %016lx.\n", file, line, val); + pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val); } void __pgd_error(const char *file, int line, unsigned long val) { - printk("%s:%d: bad pgd %016lx.\n", file, line, val); + pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val); } void __init trap_init(void) -- cgit v1.2.2 From 8715493852783358ef8656a0054a14bf822509cf Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 9 Jul 2014 19:22:11 +0100 Subject: arm64: vdso: put vdso datapage in a separate vma The VDSO datapage doesn't need to be executable (no code there) or CoW-able (the kernel writes the page, so a private copy is totally useless). This patch moves the datapage into its own VMA, identified as "[vvar]" in /proc//maps. Cc: Andy Lutomirski Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vdso.c | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 50384fec56c4..84cafbc3eb54 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -138,11 +138,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; - unsigned long vdso_base, vdso_mapping_len; + unsigned long vdso_base, vdso_text_len, vdso_mapping_len; int ret; + vdso_text_len = vdso_pages << PAGE_SHIFT; /* Be sure to map the data page */ - vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT; + vdso_mapping_len = vdso_text_len + PAGE_SIZE; down_write(&mm->mmap_sem); vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); @@ -152,35 +153,52 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, } mm->context.vdso = (void *)vdso_base; - ret = install_special_mapping(mm, vdso_base, vdso_mapping_len, + ret = install_special_mapping(mm, vdso_base, vdso_text_len, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_pagelist); - if (ret) { - mm->context.vdso = NULL; + if (ret) + goto up_fail; + + vdso_base += vdso_text_len; + ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, + vdso_pagelist + vdso_pages); + if (ret) goto up_fail; - } -up_fail: up_write(&mm->mmap_sem); + return 0; +up_fail: + mm->context.vdso = NULL; + up_write(&mm->mmap_sem); return ret; } const char *arch_vma_name(struct vm_area_struct *vma) { + unsigned long vdso_text; + + if (!vma->vm_mm) + return NULL; + + vdso_text = (unsigned long)vma->vm_mm->context.vdso; + /* * We can re-use the vdso pointer in mm_context_t for identifying * the vectors page for compat applications. The vDSO will always * sit above TASK_UNMAPPED_BASE and so we don't need to worry about * it conflicting with the vectors base. */ - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { + if (vma->vm_start == vdso_text) { #ifdef CONFIG_COMPAT if (vma->vm_start == AARCH32_VECTORS_BASE) return "[vectors]"; #endif return "[vdso]"; + } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) { + return "[vvar]"; } return NULL; -- cgit v1.2.2 From 2fea7f6c98f5957e539eb8aa0ce849729b900342 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 9 Jul 2014 19:22:12 +0100 Subject: arm64: vdso: move to _install_special_mapping and remove arch_vma_name _install_special_mapping replaces install_special_mapping and removes the need to detect special VMA in arch_vma_name. This patch moves the vdso and compat vectors page code over to the new API. Cc: Andy Lutomirski Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vdso.c | 80 +++++++++++++++++++++--------------------------- 1 file changed, 35 insertions(+), 45 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 84cafbc3eb54..60ae12087d9f 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -88,22 +88,29 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr = AARCH32_VECTORS_BASE; - int ret; + static struct vm_special_mapping spec = { + .name = "[vectors]", + .pages = vectors_page, + + }; + void *ret; down_write(&mm->mmap_sem); current->mm->context.vdso = (void *)addr; /* Map vectors page at the high address. */ - ret = install_special_mapping(mm, addr, PAGE_SIZE, - VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, - vectors_page); + ret = _install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, + &spec); up_write(&mm->mmap_sem); - return ret; + return PTR_ERR_OR_ZERO(ret); } #endif /* CONFIG_COMPAT */ +static struct vm_special_mapping vdso_spec[2]; + static int __init vdso_init(void) { int i; @@ -130,6 +137,17 @@ static int __init vdso_init(void) /* Grab the vDSO data page. */ vdso_pagelist[i] = virt_to_page(vdso_data); + /* Populate the special mapping structures */ + vdso_spec[0] = (struct vm_special_mapping) { + .name = "[vdso]", + .pages = vdso_pagelist, + }; + + vdso_spec[1] = (struct vm_special_mapping) { + .name = "[vvar]", + .pages = vdso_pagelist + vdso_pages, + }; + return 0; } arch_initcall(vdso_init); @@ -139,7 +157,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, { struct mm_struct *mm = current->mm; unsigned long vdso_base, vdso_text_len, vdso_mapping_len; - int ret; + void *ret; vdso_text_len = vdso_pages << PAGE_SHIFT; /* Be sure to map the data page */ @@ -148,23 +166,23 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, down_write(&mm->mmap_sem); vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { - ret = vdso_base; + ret = ERR_PTR(vdso_base); goto up_fail; } mm->context.vdso = (void *)vdso_base; - ret = install_special_mapping(mm, vdso_base, vdso_text_len, - VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, - vdso_pagelist); - if (ret) + ret = _install_special_mapping(mm, vdso_base, vdso_text_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &vdso_spec[0]); + if (IS_ERR(ret)) goto up_fail; vdso_base += vdso_text_len; - ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, - VM_READ|VM_MAYREAD, - vdso_pagelist + vdso_pages); - if (ret) + ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, + &vdso_spec[1]); + if (IS_ERR(ret)) goto up_fail; up_write(&mm->mmap_sem); @@ -173,35 +191,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, up_fail: mm->context.vdso = NULL; up_write(&mm->mmap_sem); - return ret; -} - -const char *arch_vma_name(struct vm_area_struct *vma) -{ - unsigned long vdso_text; - - if (!vma->vm_mm) - return NULL; - - vdso_text = (unsigned long)vma->vm_mm->context.vdso; - - /* - * We can re-use the vdso pointer in mm_context_t for identifying - * the vectors page for compat applications. The vDSO will always - * sit above TASK_UNMAPPED_BASE and so we don't need to worry about - * it conflicting with the vectors base. - */ - if (vma->vm_start == vdso_text) { -#ifdef CONFIG_COMPAT - if (vma->vm_start == AARCH32_VECTORS_BASE) - return "[vectors]"; -#endif - return "[vdso]"; - } else if (vma->vm_start == (vdso_text + (vdso_pages << PAGE_SHIFT))) { - return "[vvar]"; - } - - return NULL; + return PTR_ERR(ret); } /* -- cgit v1.2.2 From 601255ae3c98fdeeee3a8bb4696425e4f868b4f1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 9 Jul 2014 19:22:13 +0100 Subject: arm64: vdso: move data page before code pages Andy pointed out that binutils generates additional sections in the vdso image (e.g. section string table) which, if our .text section gets big enough, could cross a page boundary and end up screwing up the location where the kernel expects to put the data page. This patch solves the issue in the same manner as x86_32, by moving the data page before the code pages. Cc: Andy Lutomirski Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vdso.c | 34 +++++++++++++++++----------------- arch/arm64/kernel/vdso/vdso.lds.S | 4 +--- 2 files changed, 18 insertions(+), 20 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 60ae12087d9f..24f2e8c62479 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -121,8 +121,8 @@ static int __init vdso_init(void) } vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; - pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n", - vdso_pages + 1, vdso_pages, 1L, &vdso_start); + pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", + vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); /* Allocate the vDSO pagelist, plus a page for the data. */ vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), @@ -130,22 +130,22 @@ static int __init vdso_init(void) if (vdso_pagelist == NULL) return -ENOMEM; + /* Grab the vDSO data page. */ + vdso_pagelist[0] = virt_to_page(vdso_data); + /* Grab the vDSO code pages. */ for (i = 0; i < vdso_pages; i++) - vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); - - /* Grab the vDSO data page. */ - vdso_pagelist[i] = virt_to_page(vdso_data); + vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE); /* Populate the special mapping structures */ vdso_spec[0] = (struct vm_special_mapping) { - .name = "[vdso]", + .name = "[vvar]", .pages = vdso_pagelist, }; vdso_spec[1] = (struct vm_special_mapping) { - .name = "[vvar]", - .pages = vdso_pagelist + vdso_pages, + .name = "[vdso]", + .pages = &vdso_pagelist[1], }; return 0; @@ -169,22 +169,22 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, ret = ERR_PTR(vdso_base); goto up_fail; } - mm->context.vdso = (void *)vdso_base; - - ret = _install_special_mapping(mm, vdso_base, vdso_text_len, - VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, &vdso_spec[0]); if (IS_ERR(ret)) goto up_fail; - vdso_base += vdso_text_len; - ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, - VM_READ|VM_MAYREAD, + vdso_base += PAGE_SIZE; + mm->context.vdso = (void *)vdso_base; + ret = _install_special_mapping(mm, vdso_base, vdso_text_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_spec[1]); if (IS_ERR(ret)) goto up_fail; + up_write(&mm->mmap_sem); return 0; diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index 8154b8d1c826..beca249bc2f3 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -28,6 +28,7 @@ OUTPUT_ARCH(aarch64) SECTIONS { + PROVIDE(_vdso_data = . - PAGE_SIZE); . = VDSO_LBASE + SIZEOF_HEADERS; .hash : { *(.hash) } :text @@ -57,9 +58,6 @@ SECTIONS _end = .; PROVIDE(end = .); - . = ALIGN(PAGE_SIZE); - PROVIDE(_vdso_data = .); - /DISCARD/ : { *(.note.GNU-stack) *(.data .data.* .gnu.linkonce.d.* .sdata*) -- cgit v1.2.2 From ad789ba5f7086138461420d2156478d33fb61077 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Tue, 15 Jul 2014 08:38:08 +0100 Subject: arm64: Align the kbuild output for VDSOL and VDSOA Signed-off-by: Ian Campbell Cc: Catalin Marinas Cc: Will Deacon Cc: Michal Marek Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kbuild@vger.kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/vdso/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 6d20b7d162d8..84b942612051 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -47,9 +47,9 @@ $(obj-vdso): %.o: %.S $(call if_changed_dep,vdsoas) # Actual build commands -quiet_cmd_vdsold = VDSOL $@ +quiet_cmd_vdsold = VDSOL $@ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ -quiet_cmd_vdsoas = VDSOA $@ +quiet_cmd_vdsoas = VDSOA $@ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< # Install commands for the unstripped file -- cgit v1.2.2 From 756854d9b99a735f86bc3b86df5c19be12e8746e Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 17 Jul 2014 18:19:18 +0100 Subject: arm64: kernel: enable PSCI cpu operations on UP systems PSCI CPU operations have to be enabled on UP kernels so that calls like eg cpu_suspend can be made functional on UP too. This patch reworks the PSCI CPU operations so that they can be enabled on UP systems. Acked-by: Mark Rutland Signed-off-by: Lorenzo Pieralisi Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpu_ops.c | 2 +- arch/arm64/kernel/psci.c | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index d62d12fb36c8..cce952440c64 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c @@ -30,8 +30,8 @@ const struct cpu_operations *cpu_ops[NR_CPUS]; static const struct cpu_operations *supported_cpu_ops[] __initconst = { #ifdef CONFIG_SMP &smp_spin_table_ops, - &cpu_psci_ops, #endif + &cpu_psci_ops, NULL, }; diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 9e9798f91172..a623c44a6bc4 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -434,9 +434,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu) return 0; } #endif +#endif const struct cpu_operations cpu_psci_ops = { .name = "psci", +#ifdef CONFIG_SMP .cpu_init = cpu_psci_cpu_init, .cpu_prepare = cpu_psci_cpu_prepare, .cpu_boot = cpu_psci_cpu_boot, @@ -445,6 +447,6 @@ const struct cpu_operations cpu_psci_ops = { .cpu_die = cpu_psci_cpu_die, .cpu_kill = cpu_psci_cpu_kill, #endif +#endif }; -#endif -- cgit v1.2.2 From b9e97ef93c630404f305350d88d09391d1a55648 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 17 Jul 2014 18:19:19 +0100 Subject: arm64: kernel: add __init marker to PSCI init functions PSCI init functions must be marked as __init so that they are freed by the kernel upon boot. This patch marks the PSCI init functions as such since they need not be persistent in the kernel address space after the kernel has booted. Signed-off-by: Lorenzo Pieralisi Signed-off-by: Catalin Marinas --- arch/arm64/kernel/psci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index a623c44a6bc4..553954771a67 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -235,7 +235,7 @@ static void psci_sys_poweroff(void) * PSCI Function IDs for v0.2+ are well defined so use * standard values. */ -static int psci_0_2_init(struct device_node *np) +static int __init psci_0_2_init(struct device_node *np) { int err, ver; @@ -296,7 +296,7 @@ out_put_node: /* * PSCI < v0.2 get PSCI Function IDs via DT. */ -static int psci_0_1_init(struct device_node *np) +static int __init psci_0_1_init(struct device_node *np) { u32 id; int err; -- cgit v1.2.2 From 18ab7db6b749ac27aac08d572afbbd2f4d937934 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 17 Jul 2014 18:19:20 +0100 Subject: arm64: kernel: add missing __init section marker to cpu_suspend_init Suspend init function must be marked as __init, since it is not needed after the kernel has booted. This patch moves the cpu_suspend_init() function to the __init section. Signed-off-by: Lorenzo Pieralisi Signed-off-by: Catalin Marinas --- arch/arm64/kernel/suspend.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 1fa9ce4afd8f..55a99b9a97e0 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -119,7 +119,7 @@ int cpu_suspend(unsigned long arg) extern struct sleep_save_sp sleep_save_sp; extern phys_addr_t sleep_idmap_phys; -static int cpu_suspend_init(void) +static int __init cpu_suspend_init(void) { void *ctx_ptr; -- cgit v1.2.2 From df857416a13734ed9356f6e4f0152d55e4fb748a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 16 Jul 2014 16:32:44 +0100 Subject: arm64: cpuinfo: record cpu system register values Several kernel subsystems need to know details about CPU system register values, sometimes for CPUs other than that they are executing on. Rather than hard-coding system register accesses and cross-calls for these cases, this patch adds logic to record various system register values at boot-time. This may be used for feature reporting, firmware bug detection, etc. Separate hooks are added for the boot and hotplug paths to enable one-time intialisation and cold/warm boot value mismatch detection in later patches. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/kernel/Makefile | 3 +- arch/arm64/kernel/cpuinfo.c | 73 +++++++++++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/setup.c | 7 +++-- arch/arm64/kernel/smp.c | 6 ++++ 4 files changed, 85 insertions(+), 4 deletions(-) create mode 100644 arch/arm64/kernel/cpuinfo.c (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index cdaedad3afe5..27c72ef4fd7a 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -15,7 +15,8 @@ CFLAGS_REMOVE_return_address.o = -pg arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o cpu_ops.o insn.o return_address.o + hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ + cpuinfo.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c new file mode 100644 index 000000000000..1f350fe5b318 --- /dev/null +++ b/arch/arm64/kernel/cpuinfo.c @@ -0,0 +1,73 @@ +/* + * Record and handle CPU attributes. + * + * Copyright (C) 2014 ARM Ltd. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include + +#include +#include + +/* + * In case the boot CPU is hotpluggable, we record its initial state and + * current state separately. Certain system registers may contain different + * values depending on configuration at or after reset. + */ +DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); +static struct cpuinfo_arm64 boot_cpu_data; + +static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) +{ + info->reg_cntfrq = arch_timer_get_cntfrq(); + info->reg_ctr = read_cpuid_cachetype(); + info->reg_dczid = read_cpuid(DCZID_EL0); + info->reg_midr = read_cpuid_id(); + + info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); + info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); + info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); + info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); + info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); + + info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); + info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); + info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); + info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); + info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); + info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); + info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); + info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); + info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); + info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); + info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); + info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); +} + +void cpuinfo_store_cpu(void) +{ + struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); + __cpuinfo_store_cpu(info); +} + +void __init cpuinfo_store_boot_cpu(void) +{ + struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0); + __cpuinfo_store_cpu(info); + + boot_cpu_data = *info; +} diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 46d1125571f6..edb146d01857 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -45,6 +45,7 @@ #include #include +#include #include #include #include @@ -219,6 +220,8 @@ static void __init setup_processor(void) sprintf(init_utsname()->machine, ELF_PLATFORM); elf_hwcap = 0; + cpuinfo_store_boot_cpu(); + /* * Check for sane CTR_EL0.CWG value. */ @@ -417,14 +420,12 @@ static int __init arm64_device_init(void) } arch_initcall_sync(arm64_device_init); -static DEFINE_PER_CPU(struct cpu, cpu_data); - static int __init topology_init(void) { int i; for_each_possible_cpu(i) { - struct cpu *cpu = &per_cpu(cpu_data, i); + struct cpu *cpu = &per_cpu(cpu_data.cpu, i); cpu->hotpluggable = 1; register_cpu(cpu, i); } diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 40f38f46c8e0..3e2f5ebbf63e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -39,6 +39,7 @@ #include #include +#include #include #include #include @@ -154,6 +155,11 @@ asmlinkage void secondary_start_kernel(void) if (cpu_ops[cpu]->cpu_postboot) cpu_ops[cpu]->cpu_postboot(); + /* + * Log the CPU info before it is marked online and might get read. + */ + cpuinfo_store_cpu(); + /* * Enable GIC and timers. */ -- cgit v1.2.2 From 59ccc0d41b7a60c82c636cd056021f522c30557e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 16 Jul 2014 16:32:45 +0100 Subject: arm64: cachetype: report weakest cache policy In big.LITTLE systems, the I-cache policy may differ across CPUs, and thus we must always meet the most stringent maintenance requirements of any I-cache in the system when performing maintenance to ensure correctness. Unfortunately this requirement is not met as we always look at the current CPU's cache type register to determine the maintenance requirements. This patch causes the I-cache policy of all CPUs to be taken into account for icache_is_aliasing and icache_is_aivivt. If any I-cache in the system is aliasing or AIVIVT, the respective function will return true. At boot each CPU may set flags to identify that at least one I-cache in the system is aliasing and/or AIVIVT. The now unused and potentially misleading icache_policy function is removed. Signed-off-by: Mark Rutland Acked-by: Will Deacon Reviewed-by: Will Deacon Reviewed-by: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpuinfo.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 1f350fe5b318..3ce99fc1fde1 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -19,7 +19,9 @@ #include #include +#include #include +#include #include /* @@ -30,6 +32,28 @@ DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); static struct cpuinfo_arm64 boot_cpu_data; +static char *icache_policy_str[] = { + [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN", + [ICACHE_POLICY_AIVIVT] = "AIVIVT", + [ICACHE_POLICY_VIPT] = "VIPT", + [ICACHE_POLICY_PIPT] = "PIPT", +}; + +unsigned long __icache_flags; + +static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) +{ + unsigned int cpu = smp_processor_id(); + u32 l1ip = CTR_L1IP(info->reg_ctr); + + if (l1ip != ICACHE_POLICY_PIPT) + set_bit(ICACHEF_ALIASING, &__icache_flags); + if (l1ip == ICACHE_POLICY_AIVIVT); + set_bit(ICACHEF_AIVIVT, &__icache_flags); + + pr_info("Detected %s I-cache on CPU%d", icache_policy_str[l1ip], cpu); +} + static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) { info->reg_cntfrq = arch_timer_get_cntfrq(); @@ -56,6 +80,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); + + cpuinfo_detect_icache_policy(info); } void cpuinfo_store_cpu(void) -- cgit v1.2.2 From 127161aaf0fcd37680984f79e1451b870b3e310a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 16 Jul 2014 16:32:46 +0100 Subject: arm64: add runtime system sanity checks Unexpected variation in certain system register values across CPUs is an indicator of potential problems with a system. The kernel expects CPUs to be mostly identical in terms of supported features, even in systems with heterogeneous CPUs, with uniform instruction set support being critical for the correct operation of userspace. To help detect issues early where hardware violates the expectations of the kernel, this patch adds simple runtime sanity checks on important ID registers in the bring up path of each CPU. Where CPUs are fundamentally mismatched, set TAINT_CPU_OUT_OF_SPEC. Given that the kernel assumes CPUs are identical feature wise, let's not pretend that we expect such configurations to work. Supporting such configurations would require massive rework, and hopefully they will never exist. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Catalin Marinas Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpuinfo.c | 93 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 3ce99fc1fde1..f82f7d1c468e 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -54,6 +55,97 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) pr_info("Detected %s I-cache on CPU%d", icache_policy_str[l1ip], cpu); } +static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) +{ + if ((boot & mask) == (cur & mask)) + return 0; + + pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n", + name, (unsigned long)boot, cpu, (unsigned long)cur); + + return 1; +} + +#define CHECK_MASK(field, mask, boot, cur, cpu) \ + check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu) + +#define CHECK(field, boot, cur, cpu) \ + CHECK_MASK(field, ~0ULL, boot, cur, cpu) + +/* + * Verify that CPUs don't have unexpected differences that will cause problems. + */ +static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) +{ + unsigned int cpu = smp_processor_id(); + struct cpuinfo_arm64 *boot = &boot_cpu_data; + unsigned int diff = 0; + + /* + * The kernel can handle differing I-cache policies, but otherwise + * caches should look identical. Userspace JITs will make use of + * *minLine. + */ + diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu); + + /* + * Userspace may perform DC ZVA instructions. Mismatched block sizes + * could result in too much or too little memory being zeroed if a + * process is preempted and migrated between CPUs. + */ + diff |= CHECK(dczid, boot, cur, cpu); + + /* If different, timekeeping will be broken (especially with KVM) */ + diff |= CHECK(cntfrq, boot, cur, cpu); + + /* + * Even in big.LITTLE, processors should be identical instruction-set + * wise. + */ + diff |= CHECK(id_aa64isar0, boot, cur, cpu); + diff |= CHECK(id_aa64isar1, boot, cur, cpu); + + /* + * Differing PARange support is fine as long as all peripherals and + * memory are mapped within the minimum PARange of all CPUs. + * Linux should not care about secure memory. + * ID_AA64MMFR1 is currently RES0. + */ + diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu); + diff |= CHECK(id_aa64mmfr1, boot, cur, cpu); + + /* + * EL3 is not our concern. + * ID_AA64PFR1 is currently RES0. + */ + diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu); + diff |= CHECK(id_aa64pfr1, boot, cur, cpu); + + /* + * If we have AArch32, we care about 32-bit features for compat. These + * registers should be RES0 otherwise. + */ + diff |= CHECK(id_isar0, boot, cur, cpu); + diff |= CHECK(id_isar1, boot, cur, cpu); + diff |= CHECK(id_isar2, boot, cur, cpu); + diff |= CHECK(id_isar3, boot, cur, cpu); + diff |= CHECK(id_isar4, boot, cur, cpu); + diff |= CHECK(id_isar5, boot, cur, cpu); + diff |= CHECK(id_mmfr0, boot, cur, cpu); + diff |= CHECK(id_mmfr1, boot, cur, cpu); + diff |= CHECK(id_mmfr2, boot, cur, cpu); + diff |= CHECK(id_mmfr3, boot, cur, cpu); + diff |= CHECK(id_pfr0, boot, cur, cpu); + diff |= CHECK(id_pfr1, boot, cur, cpu); + + /* + * Mismatched CPU features are a recipe for disaster. Don't even + * pretend to support them. + */ + WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC, + "Unsupported CPU feature variation."); +} + static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) { info->reg_cntfrq = arch_timer_get_cntfrq(); @@ -88,6 +180,7 @@ void cpuinfo_store_cpu(void) { struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); __cpuinfo_store_cpu(info); + cpuinfo_sanity_check(info); } void __init cpuinfo_store_boot_cpu(void) -- cgit v1.2.2 From d7a49086f263164a2c4c178eb76412d48cd671d7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 18 Jul 2014 14:57:38 +0100 Subject: arm64: cpuinfo: print info for all CPUs Currently reading /proc/cpuinfo will result in information being read out of the MIDR_EL1 of the current CPU, and the information is not associated with any particular logical CPU number. This is problematic for systems with heterogeneous CPUs (i.e. big.LITTLE) where MIDR fields will vary across CPUs, and the output will differ depending on the executing CPU. This patch reorganises the code responsible for /proc/cpuinfo to print information per-cpu. In the process, we perform several cleanups: * Property names are coerced to lower-case (to match "processor" as per glibc's expectations). * Property names are simplified and made to match the MIDR field names. * Revision is changed to hex as with every other field. * The meaningless Architecture property is removed. * The ripe-for-abuse Machine field is removed. The features field (a human-readable representation of the hwcaps) remains printed once, as this is expected to remain in use as the globally support CPU features. To enable the possibility of the addition of per-cpu HW feature information later, this is printed before any CPU-specific information. Comments are added to guide userspace developers in the right direction (using the hwcaps provided in auxval). Hopefully where userspace applications parse /proc/cpuinfo rather than using the readily available hwcaps, they limit themselves to reading said first line. If CPU features differ from each other, the previously installed sanity checks will give us some advance notice with warnings and TAINT_CPU_OUT_OF_SPEC. If we are lucky, we will never see such systems. Rework will be required in many places to support such systems anyway. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Marcus Shawcroft Cc: Peter Maydell Acked-by: Will Deacon [catalin.marinas@arm.com: remove machine_name as it is no longer reported] Signed-off-by: Catalin Marinas --- arch/arm64/kernel/setup.c | 40 ++++++++++++++++++---------------------- 1 file changed, 18 insertions(+), 22 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index edb146d01857..f6f0ccf35ae6 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -78,7 +78,6 @@ unsigned int compat_elf_hwcap2 __read_mostly; #endif static const char *cpu_name; -static const char *machine_name; phys_addr_t __fdt_pointer __initdata; /* @@ -310,8 +309,6 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) while (true) cpu_relax(); } - - machine_name = of_flat_dt_get_machine_name(); } /* @@ -450,10 +447,21 @@ static int c_show(struct seq_file *m, void *v) { int i; - seq_printf(m, "Processor\t: %s rev %d (%s)\n", - cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); + /* + * Dump out the common processor features in a single line. Userspace + * should read the hwcaps with getauxval(AT_HWCAP) rather than + * attempting to parse this. + */ + seq_puts(m, "features\t:"); + for (i = 0; hwcap_str[i]; i++) + if (elf_hwcap & (1 << i)) + seq_printf(m, " %s", hwcap_str[i]); + seq_puts(m, "\n\n"); for_each_online_cpu(i) { + struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); + u32 midr = cpuinfo->reg_midr; + /* * glibc reads /proc/cpuinfo to determine the number of * online processors, looking for lines beginning with @@ -462,25 +470,13 @@ static int c_show(struct seq_file *m, void *v) #ifdef CONFIG_SMP seq_printf(m, "processor\t: %d\n", i); #endif + seq_printf(m, "implementer\t: 0x%02x\n", + MIDR_IMPLEMENTOR(midr)); + seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr)); + seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr)); + seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr)); } - /* dump out the processor features */ - seq_puts(m, "Features\t: "); - - for (i = 0; hwcap_str[i]; i++) - if (elf_hwcap & (1 << i)) - seq_printf(m, "%s ", hwcap_str[i]); - - seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); - seq_printf(m, "CPU architecture: AArch64\n"); - seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); - seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); - seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); - - seq_puts(m, "\n"); - - seq_printf(m, "Hardware\t: %s\n", machine_name); - return 0; } -- cgit v1.2.2 From a28e3f4b90543f7c249a956e3ca518e243a04618 Mon Sep 17 00:00:00 2001 From: Yi Li Date: Fri, 11 Jul 2014 12:46:50 +0100 Subject: arm64: dmi: Add SMBIOS/DMI support SMbios is important for server hardware vendors. It implements a spec for providing descriptive information about the platform. Things like serial numbers, physical layout of the ports, build configuration data, and the like. This has been tested by dmidecode and lshw tools. Signed-off-by: Yi Li Signed-off-by: Ard Biesheuvel Reviewed-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/setup.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f6f0ccf35ae6..35339a0e1592 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -413,6 +414,7 @@ void __init setup_arch(char **cmdline_p) static int __init arm64_device_init(void) { of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); + dmi_scan_machine(); return 0; } arch_initcall_sync(arm64_device_init); -- cgit v1.2.2 From 7edd88ad7e59c2b7b49da0e00f251884fb785d4f Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 16 Jul 2014 12:07:17 +0100 Subject: arm64: Do not initialise the fixmap page tables in head.S The early_ioremap_init() function already handles fixmap pte initialisation, so upgrade this to cover all of pud/pmd/pte and remove one page from swapper_pg_dir. Signed-off-by: Catalin Marinas Tested-by: Jungseok Lee --- arch/arm64/kernel/head.S | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 69dafe9621fd..fa3b7fb8a77a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -582,13 +582,6 @@ __create_page_tables: sub x6, x6, #1 // inclusive range create_block_map x0, x7, x3, x5, x6 1: - /* - * Create the pgd entry for the fixed mappings. - */ - ldr x5, =FIXADDR_TOP // Fixed mapping virtual address - add x0, x26, #2 * PAGE_SIZE // section table address - create_pgd_entry x26, x0, x5, x6, x7 - /* * Since the page tables have been populated with non-cacheable * accesses (MMU disabled), invalidate the idmap and swapper page -- cgit v1.2.2 From c79b954bf6c006f2d3dd9d01f231abeead13a410 Mon Sep 17 00:00:00 2001 From: Jungseok Lee Date: Mon, 12 May 2014 18:40:51 +0900 Subject: arm64: mm: Implement 4 levels of translation tables This patch implements 4 levels of translation tables since 3 levels of page tables with 4KB pages cannot support 40-bit physical address space described in [1] due to the following issue. It is a restriction that kernel logical memory map with 4KB + 3 levels (0xffffffc000000000-0xffffffffffffffff) cannot cover RAM region from 544GB to 1024GB in [1]. Specifically, ARM64 kernel fails to create mapping for this region in map_mem function since __phys_to_virt for this region reaches to address overflow. If SoC design follows the document, [1], over 32GB RAM would be placed from 544GB. Even 64GB system is supposed to use the region from 544GB to 576GB for only 32GB RAM. Naturally, it would reach to enable 4 levels of page tables to avoid hacking __virt_to_phys and __phys_to_virt. However, it is recommended 4 levels of page table should be only enabled if memory map is too sparse or there is about 512GB RAM. References ---------- [1]: Principles of ARM Memory Maps, White Paper, Issue C Signed-off-by: Jungseok Lee Reviewed-by: Sungjinn Chung Acked-by: Kukjin Kim Reviewed-by: Christoffer Dall Reviewed-by: Steve Capper [catalin.marinas@arm.com: MEMBLOCK_INITIAL_LIMIT removed, same as PUD_SIZE] [catalin.marinas@arm.com: early_ioremap_init() updated for 4 levels] [catalin.marinas@arm.com: 48-bit VA depends on BROKEN until KVM is fixed] Signed-off-by: Catalin Marinas Tested-by: Jungseok Lee --- arch/arm64/kernel/head.S | 42 ++++++++++++++++++++++++++++++++++-------- arch/arm64/kernel/traps.c | 5 +++++ 2 files changed, 39 insertions(+), 8 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index fa3b7fb8a77a..847b99daad79 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -476,16 +476,42 @@ ENDPROC(__calc_phys_offset) .quad PAGE_OFFSET /* - * Macro to populate the PGD for the corresponding block entry in the next - * level (tbl) for the given virtual address. + * Macro to populate the PUD for the corresponding block entry in the next + * level (tbl) for the given virtual address in case of 4 levels. * - * Preserves: pgd, tbl, virt - * Corrupts: tmp1, tmp2 + * Preserves: pgd, virt + * Corrupts: tbl, tmp1, tmp2 + * Returns: pud */ - .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2 + .macro create_pud_entry, pgd, tbl, virt, pud, tmp1, tmp2 +#ifdef CONFIG_ARM64_4_LEVELS + add \tbl, \tbl, #PAGE_SIZE // bump tbl 1 page up. + // to make room for pud + add \pud, \pgd, #PAGE_SIZE // pgd points to pud which + // follows pgd + lsr \tmp1, \virt, #PUD_SHIFT + and \tmp1, \tmp1, #PTRS_PER_PUD - 1 // PUD index + orr \tmp2, \tbl, #3 // PUD entry table type + str \tmp2, [\pud, \tmp1, lsl #3] +#else + mov \pud, \tbl +#endif + .endm + +/* + * Macro to populate the PGD (and possibily PUD) for the corresponding + * block entry in the next level (tbl) for the given virtual address. + * + * Preserves: pgd, virt + * Corrupts: tmp1, tmp2, tmp3 + * Returns: tbl -> page where block mappings can be placed + * (changed to make room for pud with 4 levels, preserved otherwise) + */ + .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2, tmp3 + create_pud_entry \pgd, \tbl, \virt, \tmp3, \tmp1, \tmp2 lsr \tmp1, \virt, #PGDIR_SHIFT and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index - orr \tmp2, \tbl, #3 // PGD entry table type + orr \tmp2, \tmp3, #3 // PGD entry table type str \tmp2, [\pgd, \tmp1, lsl #3] .endm @@ -550,7 +576,7 @@ __create_page_tables: add x0, x25, #PAGE_SIZE // section table address ldr x3, =KERNEL_START add x3, x3, x28 // __pa(KERNEL_START) - create_pgd_entry x25, x0, x3, x5, x6 + create_pgd_entry x25, x0, x3, x1, x5, x6 ldr x6, =KERNEL_END mov x5, x3 // __pa(KERNEL_START) add x6, x6, x28 // __pa(KERNEL_END) @@ -561,7 +587,7 @@ __create_page_tables: */ add x0, x26, #PAGE_SIZE // section table address mov x5, #PAGE_OFFSET - create_pgd_entry x26, x0, x5, x3, x6 + create_pgd_entry x26, x0, x5, x1, x3, x6 ldr x6, =KERNEL_END mov x3, x24 // phys offset create_block_map x0, x7, x3, x5, x6 diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 506f7814e305..02cd3f023e9a 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -339,6 +339,11 @@ void __pmd_error(const char *file, int line, unsigned long val) pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val); } +void __pud_error(const char *file, int line, unsigned long val) +{ + pr_crit("%s:%d: bad pud %016lx.\n", file, line, val); +} + void __pgd_error(const char *file, int line, unsigned long val) { pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val); -- cgit v1.2.2 From abe669d7e1a8f9163eb7e8e153e7257d38c1ba3e Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 15 Jul 2014 15:37:21 +0100 Subject: arm64: Convert bool ARM64_x_LEVELS to int ARM64_PGTABLE_LEVELS Rather than having several Kconfig options, define int ARM64_PGTABLE_LEVELS which will be also useful in converting some of the pgtable macros. Signed-off-by: Catalin Marinas Tested-by: Jungseok Lee --- arch/arm64/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 847b99daad79..019f81d9f1d5 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -484,7 +484,7 @@ ENDPROC(__calc_phys_offset) * Returns: pud */ .macro create_pud_entry, pgd, tbl, virt, pud, tmp1, tmp2 -#ifdef CONFIG_ARM64_4_LEVELS +#if CONFIG_ARM64_PGTABLE_LEVELS == 4 add \tbl, \tbl, #PAGE_SIZE // bump tbl 1 page up. // to make room for pud add \pud, \pgd, #PAGE_SIZE // pgd points to pud which -- cgit v1.2.2 From b4a0d8b37797663b0e523c102dbb3e4b712a720c Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 16 Jul 2014 12:10:33 +0100 Subject: arm64: Clean up the initial page table creation in head.S This patch adds a create_table_entry macro which is used to populate pgd and pud entries, also reducing the number of arguments for create_pgd_entry. Signed-off-by: Catalin Marinas Tested-by: Jungseok Lee --- arch/arm64/kernel/head.S | 59 ++++++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 32 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 019f81d9f1d5..a6db505411bc 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -476,43 +476,38 @@ ENDPROC(__calc_phys_offset) .quad PAGE_OFFSET /* - * Macro to populate the PUD for the corresponding block entry in the next - * level (tbl) for the given virtual address in case of 4 levels. + * Macro to create a table entry to the next page. * - * Preserves: pgd, virt - * Corrupts: tbl, tmp1, tmp2 - * Returns: pud + * tbl: page table address + * virt: virtual address + * shift: #imm page table shift + * ptrs: #imm pointers per table page + * + * Preserves: virt + * Corrupts: tmp1, tmp2 + * Returns: tbl -> next level table page address */ - .macro create_pud_entry, pgd, tbl, virt, pud, tmp1, tmp2 -#if CONFIG_ARM64_PGTABLE_LEVELS == 4 - add \tbl, \tbl, #PAGE_SIZE // bump tbl 1 page up. - // to make room for pud - add \pud, \pgd, #PAGE_SIZE // pgd points to pud which - // follows pgd - lsr \tmp1, \virt, #PUD_SHIFT - and \tmp1, \tmp1, #PTRS_PER_PUD - 1 // PUD index - orr \tmp2, \tbl, #3 // PUD entry table type - str \tmp2, [\pud, \tmp1, lsl #3] -#else - mov \pud, \tbl -#endif + .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 + lsr \tmp1, \virt, #\shift + and \tmp1, \tmp1, #\ptrs - 1 // table index + add \tmp2, \tbl, #PAGE_SIZE + orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type + str \tmp2, [\tbl, \tmp1, lsl #3] + add \tbl, \tbl, #PAGE_SIZE // next level table page .endm /* * Macro to populate the PGD (and possibily PUD) for the corresponding * block entry in the next level (tbl) for the given virtual address. * - * Preserves: pgd, virt - * Corrupts: tmp1, tmp2, tmp3 - * Returns: tbl -> page where block mappings can be placed - * (changed to make room for pud with 4 levels, preserved otherwise) + * Preserves: tbl, next, virt + * Corrupts: tmp1, tmp2 */ - .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2, tmp3 - create_pud_entry \pgd, \tbl, \virt, \tmp3, \tmp1, \tmp2 - lsr \tmp1, \virt, #PGDIR_SHIFT - and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index - orr \tmp2, \tmp3, #3 // PGD entry table type - str \tmp2, [\pgd, \tmp1, lsl #3] + .macro create_pgd_entry, tbl, virt, tmp1, tmp2 + create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 +#if CONFIG_ARM64_PGTABLE_LEVELS == 4 + create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2 +#endif .endm /* @@ -573,10 +568,10 @@ __create_page_tables: /* * Create the identity mapping. */ - add x0, x25, #PAGE_SIZE // section table address + mov x0, x25 // idmap_pg_dir ldr x3, =KERNEL_START add x3, x3, x28 // __pa(KERNEL_START) - create_pgd_entry x25, x0, x3, x1, x5, x6 + create_pgd_entry x0, x3, x5, x6 ldr x6, =KERNEL_END mov x5, x3 // __pa(KERNEL_START) add x6, x6, x28 // __pa(KERNEL_END) @@ -585,9 +580,9 @@ __create_page_tables: /* * Map the kernel image (starting with PHYS_OFFSET). */ - add x0, x26, #PAGE_SIZE // section table address + mov x0, x26 // swapper_pg_dir mov x5, #PAGE_OFFSET - create_pgd_entry x26, x0, x5, x1, x3, x6 + create_pgd_entry x0, x5, x3, x6 ldr x6, =KERNEL_END mov x3, x24 // phys offset create_block_map x0, x7, x3, x5, x6 -- cgit v1.2.2 From 383c2799113b00a5f12c820ff0fd3dfca9e5be89 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 21 Jul 2014 15:54:50 +0100 Subject: arm64: Add support for 48-bit VA space with 64KB page configuration This patch allows support for 3 levels of page tables with 64KB page configuration allowing 48-bit VA space. The pgd is no longer a full PAGE_SIZE (PTRS_PER_PGD is 64) and (swapper|idmap)_pg_dir are not fully populated (pgd_alloc falls back to kzalloc). Signed-off-by: Catalin Marinas Tested-by: Jungseok Lee --- arch/arm64/kernel/head.S | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a6db505411bc..0bce493495e9 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -55,9 +55,11 @@ #ifdef CONFIG_ARM64_64K_PAGES #define BLOCK_SHIFT PAGE_SHIFT #define BLOCK_SIZE PAGE_SIZE +#define TABLE_SHIFT PMD_SHIFT #else #define BLOCK_SHIFT SECTION_SHIFT #define BLOCK_SIZE SECTION_SIZE +#define TABLE_SHIFT PUD_SHIFT #endif #define KERNEL_START KERNEL_RAM_VADDR @@ -505,8 +507,8 @@ ENDPROC(__calc_phys_offset) */ .macro create_pgd_entry, tbl, virt, tmp1, tmp2 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 -#if CONFIG_ARM64_PGTABLE_LEVELS == 4 - create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2 +#if SWAPPER_PGTABLE_LEVELS == 3 + create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 #endif .endm -- cgit v1.2.2 From 2a8f45b040bcb9b2ad2845f061499d1b6f41cc7b Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 24 Jul 2014 14:12:11 +0100 Subject: arm64: Do not invoke audit_syscall_* functions if !CONFIG_AUDIT_SYSCALL This is a temporary patch to be able to compile the kernel in linux-next where the audit_syscall_* API has been changed. To be reverted once the proper arm64 fix can be applied. Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ptrace.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 70526cfda056..0310811bd77d 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1115,15 +1115,19 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, regs->syscallno); +#ifdef CONFIG_AUDITSYSCALL audit_syscall_entry(syscall_get_arch(), regs->syscallno, regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]); +#endif return regs->syscallno; } asmlinkage void syscall_trace_exit(struct pt_regs *regs) { +#ifdef CONFIG_AUDITSYSCALL audit_syscall_exit(regs); +#endif if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_exit(regs, regs_return_value(regs)); -- cgit v1.2.2 From 72c5839515260dce966cd24f54436e6583288e6c Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 24 Jul 2014 14:14:42 +0100 Subject: arm64: gicv3: Allow GICv3 compilation with older binutils GICv3 introduces new system registers accessible with the full msr/mrs syntax (e.g. mrs x0, Sop0_op1_CRm_CRn_op2). However, only recent binutils understand the new syntax. This patch introduces msr_s/mrs_s assembly macros which generate the equivalent instructions above and converts the existing GICv3 code (both drivers/irqchip/ and arch/arm64/kernel/). Signed-off-by: Catalin Marinas Reported-by: Olof Johansson Tested-by: Olof Johansson Suggested-by: Mark Rutland Acked-by: Mark Rutland Acked-by: Jason Cooper Cc: Will Deacon Cc: Marc Zyngier --- arch/arm64/kernel/head.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c99e3a879ebc..144f10567f82 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -297,12 +297,12 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 cmp x0, #1 b.ne 3f - mrs x0, ICC_SRE_EL2 + mrs_s x0, ICC_SRE_EL2 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 - msr ICC_SRE_EL2, x0 + msr_s ICC_SRE_EL2, x0 isb // Make sure SRE is now set - msr ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults + msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults 3: #endif -- cgit v1.2.2 From 1915e2ad1cf548217c963121e4076b3d44dd0169 Mon Sep 17 00:00:00 2001 From: Arun Chandran Date: Thu, 26 Jun 2014 15:16:03 +0530 Subject: arm64: vdso: fix build error when switching from LE to BE Building a kernel with CPU_BIG_ENDIAN fails if there are stale objects from a !CPU_BIG_ENDIAN build. Due to a missing FORCE prerequisite on an if_changed rule in the VDSO Makefile, we attempt to link a stale LE object into the new BE kernel. According to Documentation/kbuild/makefiles.txt, FORCE is required for if_changed rules and forgetting it is a common mistake, so fix it by 'Forcing' the build of vdso. This patch fixes build errors like these: arch/arm64/kernel/vdso/note.o: compiled for a little endian system and target is big endian failed to merge target specific data of file arch/arm64/kernel/vdso/note.o arch/arm64/kernel/vdso/sigreturn.o: compiled for a little endian system and target is big endian failed to merge target specific data of file arch/arm64/kernel/vdso/sigreturn.o Tested-by: Mark Rutland Signed-off-by: Arun Chandran Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 84b942612051..ff3bddea482d 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -43,7 +43,7 @@ $(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE $(call if_changed,vdsosym) # Assembly rules for the .S files -$(obj-vdso): %.o: %.S +$(obj-vdso): %.o: %.S FORCE $(call if_changed_dep,vdsoas) # Actual build commands -- cgit v1.2.2 From c878e0cff5c5e56b216951cbe75f7a3dd500a736 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 31 Jul 2014 11:36:08 +0100 Subject: arm64: don't call break hooks for BRK exceptions from EL0 Our break hooks are used to handle brk exceptions from kgdb (and potentially kprobes if that code ever resurfaces), so don't bother calling them if the BRK exception comes from userspace. This prevents userspace from trapping to a kdb shell on systems where kgdb is enabled and active. Cc: Reported-by: Omar Sandoval Signed-off-by: Will Deacon --- arch/arm64/kernel/debug-monitors.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index a7fb874b595e..fe5b94078d82 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -315,20 +315,20 @@ static int brk_handler(unsigned long addr, unsigned int esr, { siginfo_t info; - if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) - return 0; + if (user_mode(regs)) { + info = (siginfo_t) { + .si_signo = SIGTRAP, + .si_errno = 0, + .si_code = TRAP_BRKPT, + .si_addr = (void __user *)instruction_pointer(regs), + }; - if (!user_mode(regs)) + force_sig_info(SIGTRAP, &info, current); + } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { + pr_warning("Unexpected kernel BRK exception at EL1\n"); return -EFAULT; + } - info = (siginfo_t) { - .si_signo = SIGTRAP, - .si_errno = 0, - .si_code = TRAP_BRKPT, - .si_addr = (void __user *)instruction_pointer(regs), - }; - - force_sig_info(SIGTRAP, &info, current); return 0; } -- cgit v1.2.2 From e4aa297a490e16c44dc464361daab145a7c451c5 Mon Sep 17 00:00:00 2001 From: "byungchul.park" Date: Thu, 31 Jul 2014 11:05:36 +0100 Subject: arm64: fpsimd: fix a typo in fpsimd_save_partial_state ENDPROC Commit 190f1ca85d07 ("arm64: add support for kernel mode NEON in interrupt context") introduced a typing error in fpsimd_save_partial_state ENDPROC. This patch fixes the typing error. Acked-by: Ard Biesheuvel Signed-off-by: byungchul.park Signed-off-by: Will Deacon --- arch/arm64/kernel/entry-fpsimd.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index d358ccacfc00..c44a82f146b1 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -52,7 +52,7 @@ ENDPROC(fpsimd_load_state) ENTRY(fpsimd_save_partial_state) fpsimd_save_partial x0, 1, 8, 9 ret -ENDPROC(fpsimd_load_partial_state) +ENDPROC(fpsimd_save_partial_state) /* * Load the bottom n FP registers. -- cgit v1.2.2 From 94156675847c14a9b16e91b035da32e35e98ef79 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 31 Jul 2014 14:00:03 +0100 Subject: Revert "arm64: dmi: Add SMBIOS/DMI support" This reverts commit a28e3f4b90543f7c249a956e3ca518e243a04618. Ard and Yi Li report that this patch is broken by design, so revert it and let them sort it out for 3.18 instead. Reported-by: Ard Biesheuvel Signed-off-by: Will Deacon --- arch/arm64/kernel/setup.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 35339a0e1592..f6f0ccf35ae6 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -43,7 +43,6 @@ #include #include #include -#include #include #include @@ -414,7 +413,6 @@ void __init setup_arch(char **cmdline_p) static int __init arm64_device_init(void) { of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); - dmi_scan_machine(); return 0; } arch_initcall_sync(arm64_device_init); -- cgit v1.2.2 From ea1719672f59eeb85829073b567495c4f472ac9f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 1 Aug 2014 10:23:20 +0100 Subject: arm64: add newline to I-cache policy string Due to a missing newline in the I-cache policy detection log output, it's possible to get some ratehr unfortunate output at boot time: CPU1: Booted secondary processor Detected VIPT I-cache on CPU1CPU2: Booted secondary processor Detected VIPT I-cache on CPU2CPU3: Booted secondary processor Detected VIPT I-cache on CPU3CPU4: Booted secondary processor Detected PIPT I-cache on CPU4CPU5: Booted secondary processor Detected PIPT I-cache on CPU5Brought up 6 CPUs SMP: Total of 6 processors activated. This patch adds the missing newline to the format string, cleaning up the output. Fixes: 59ccc0d41b7a ("arm64: cachetype: report weakest cache policy") Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/cpuinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/kernel') diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index f82f7d1c468e..f798f66634af 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -52,7 +52,7 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) if (l1ip == ICACHE_POLICY_AIVIVT); set_bit(ICACHEF_AIVIVT, &__icache_flags); - pr_info("Detected %s I-cache on CPU%d", icache_policy_str[l1ip], cpu); + pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); } static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) -- cgit v1.2.2