aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-12 18:33:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-12 18:33:11 -0500
commita18e2fa5e670a1b84e66522b221c42875b02028a (patch)
tree30d7724f3f8e82c9408e8bd32b141f114d46b1c9
parent7dac7102afbeb99daa454f555f1ea1f42fad2f78 (diff)
parent01b305a234943c25c336a6f2f77932a4eaf125fa (diff)
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes and clean-ups from Catalin Marinas: "Here's a second pull request for this merging window with some fixes/clean-ups: - __cmpxchg_double*() return type fix to avoid truncation of a long to int and subsequent logical "not" in cmpxchg_double() misinterpreting the operation success/failure - BPF fixes for mod and div by zero - Fix compilation with STRICT_MM_TYPECHECKS enabled - VDSO build fix without libgcov - Some static and __maybe_unused annotations - Kconfig clean-up (FRAME_POINTER) - defconfig update for CRYPTO_CRC32_ARM64" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: suspend: make hw_breakpoint_restore static arm64: mmu: make split_pud and fixup_executable static arm64: smp: make of_parse_and_init_cpus static arm64: use linux/types.h in kvm.h arm64: build vdso without libgcov arm64: mark cpus_have_hwcap as __maybe_unused arm64: remove redundant FRAME_POINTER kconfig option and force to select it arm64: fix R/O permissions of FDT mapping arm64: fix STRICT_MM_TYPECHECKS issue in PTE_CONT manipulation arm64: bpf: fix mod-by-zero case arm64: bpf: fix div-by-zero case arm64: Enable CRYPTO_CRC32_ARM64 in defconfig arm64: cmpxchg_dbl: fix return value type
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/Kconfig.debug4
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h2
-rw-r--r--arch/arm64/include/asm/atomic_lse.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h1
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/suspend.c2
-rw-r--r--arch/arm64/kernel/vdso/Makefile3
-rw-r--r--arch/arm64/mm/mmu.c8
-rw-r--r--arch/arm64/net/bpf_jit.h3
-rw-r--r--arch/arm64/net/bpf_jit_comp.c54
14 files changed, 54 insertions, 33 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 851fe11c6069..9ac16a482ff1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -27,6 +27,7 @@ config ARM64
27 select CPU_PM if (SUSPEND || CPU_IDLE) 27 select CPU_PM if (SUSPEND || CPU_IDLE)
28 select DCACHE_WORD_ACCESS 28 select DCACHE_WORD_ACCESS
29 select EDAC_SUPPORT 29 select EDAC_SUPPORT
30 select FRAME_POINTER
30 select GENERIC_ALLOCATOR 31 select GENERIC_ALLOCATOR
31 select GENERIC_CLOCKEVENTS 32 select GENERIC_CLOCKEVENTS
32 select GENERIC_CLOCKEVENTS_BROADCAST 33 select GENERIC_CLOCKEVENTS_BROADCAST
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index c24d6adc0420..04fb73b973f1 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -2,10 +2,6 @@ menu "Kernel hacking"
2 2
3source "lib/Kconfig.debug" 3source "lib/Kconfig.debug"
4 4
5config FRAME_POINTER
6 bool
7 default y
8
9config ARM64_PTDUMP 5config ARM64_PTDUMP
10 bool "Export kernel pagetable layout to userspace via debugfs" 6 bool "Export kernel pagetable layout to userspace via debugfs"
11 depends on DEBUG_KERNEL 7 depends on DEBUG_KERNEL
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 2f71f9cdd39c..bdd7aa358d2a 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -224,3 +224,4 @@ CONFIG_CRYPTO_GHASH_ARM64_CE=y
224CONFIG_CRYPTO_AES_ARM64_CE_CCM=y 224CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
225CONFIG_CRYPTO_AES_ARM64_CE_BLK=y 225CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
226CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y 226CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
227CONFIG_CRYPTO_CRC32_ARM64=y
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index 74d0b8eb0799..f61c84f6ba02 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -233,7 +233,7 @@ __CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory")
233#undef __CMPXCHG_CASE 233#undef __CMPXCHG_CASE
234 234
235#define __CMPXCHG_DBL(name, mb, rel, cl) \ 235#define __CMPXCHG_DBL(name, mb, rel, cl) \
236__LL_SC_INLINE int \ 236__LL_SC_INLINE long \
237__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ 237__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
238 unsigned long old2, \ 238 unsigned long old2, \
239 unsigned long new1, \ 239 unsigned long new1, \
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 1fce7908e690..197e06afbf71 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -387,7 +387,7 @@ __CMPXCHG_CASE(x, , mb_8, al, "memory")
387#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op) 387#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
388 388
389#define __CMPXCHG_DBL(name, mb, cl...) \ 389#define __CMPXCHG_DBL(name, mb, cl...) \
390static inline int __cmpxchg_double##name(unsigned long old1, \ 390static inline long __cmpxchg_double##name(unsigned long old1, \
391 unsigned long old2, \ 391 unsigned long old2, \
392 unsigned long new1, \ 392 unsigned long new1, \
393 unsigned long new2, \ 393 unsigned long new2, \
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f3acf421ded4..9819a9426b69 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -80,6 +80,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
80#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) 80#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
81 81
82#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) 82#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
83#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
83#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) 84#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
84#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) 85#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
85 86
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 0cd7b5947dfc..2d4ca4bb0dd3 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -32,7 +32,7 @@
32 32
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
34#include <linux/psci.h> 34#include <linux/psci.h>
35#include <asm/types.h> 35#include <linux/types.h>
36#include <asm/ptrace.h> 36#include <asm/ptrace.h>
37 37
38#define __KVM_HAVE_GUEST_DEBUG 38#define __KVM_HAVE_GUEST_DEBUG
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 52f0d7a5a1c2..c8cf89223b5a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -696,7 +696,7 @@ static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
696} 696}
697 697
698/* Check if we have a particular HWCAP enabled */ 698/* Check if we have a particular HWCAP enabled */
699static bool cpus_have_hwcap(const struct arm64_cpu_capabilities *cap) 699static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *cap)
700{ 700{
701 bool rc; 701 bool rc;
702 702
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 2bbdc0e4fd14..b1adc51b2c2e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -473,7 +473,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
473 * cpu logical map array containing MPIDR values related to logical 473 * cpu logical map array containing MPIDR values related to logical
474 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 474 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
475 */ 475 */
476void __init of_parse_and_init_cpus(void) 476static void __init of_parse_and_init_cpus(void)
477{ 477{
478 struct device_node *dn = NULL; 478 struct device_node *dn = NULL;
479 479
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 40f7b33a22da..fce95e17cf7f 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -41,7 +41,7 @@ void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
41 * time the notifier runs debug exceptions might have been enabled already, 41 * time the notifier runs debug exceptions might have been enabled already,
42 * with HW breakpoints registers content still in an unknown state. 42 * with HW breakpoints registers content still in an unknown state.
43 */ 43 */
44void (*hw_breakpoint_restore)(void *); 44static void (*hw_breakpoint_restore)(void *);
45void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) 45void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
46{ 46{
47 /* Prevent multiple restore hook initializations */ 47 /* Prevent multiple restore hook initializations */
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index f6fe17d88da5..b467fd0a384b 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -15,6 +15,9 @@ ccflags-y := -shared -fno-common -fno-builtin
15ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ 15ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
16 $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) 16 $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
17 17
18# Disable gcov profiling for VDSO code
19GCOV_PROFILE := n
20
18# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared 21# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
19# down to collect2, resulting in silent corruption of the vDSO image. 22# down to collect2, resulting in silent corruption of the vDSO image.
20ccflags-y += -Wl,-shared 23ccflags-y += -Wl,-shared
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c2fa6b56613c..e3f563c81c48 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -146,7 +146,7 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
146 if (((addr | next | phys) & ~CONT_MASK) == 0) { 146 if (((addr | next | phys) & ~CONT_MASK) == 0) {
147 /* a block of CONT_PTES */ 147 /* a block of CONT_PTES */
148 __populate_init_pte(pte, addr, next, phys, 148 __populate_init_pte(pte, addr, next, phys,
149 prot | __pgprot(PTE_CONT)); 149 __pgprot(pgprot_val(prot) | PTE_CONT));
150 } else { 150 } else {
151 /* 151 /*
152 * If the range being split is already inside of a 152 * If the range being split is already inside of a
@@ -165,7 +165,7 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
165 } while (addr != end); 165 } while (addr != end);
166} 166}
167 167
168void split_pud(pud_t *old_pud, pmd_t *pmd) 168static void split_pud(pud_t *old_pud, pmd_t *pmd)
169{ 169{
170 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT; 170 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
171 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr); 171 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
@@ -447,7 +447,7 @@ static void __init map_mem(void)
447 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); 447 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
448} 448}
449 449
450void __init fixup_executable(void) 450static void __init fixup_executable(void)
451{ 451{
452#ifdef CONFIG_DEBUG_RODATA 452#ifdef CONFIG_DEBUG_RODATA
453 /* now that we are actually fully mapped, make the start/end more fine grained */ 453 /* now that we are actually fully mapped, make the start/end more fine grained */
@@ -691,7 +691,7 @@ void __set_fixmap(enum fixed_addresses idx,
691void *__init fixmap_remap_fdt(phys_addr_t dt_phys) 691void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
692{ 692{
693 const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 693 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
694 pgprot_t prot = PAGE_KERNEL | PTE_RDONLY; 694 pgprot_t prot = PAGE_KERNEL_RO;
695 int size, offset; 695 int size, offset;
696 void *dt_virt; 696 void *dt_virt;
697 697
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 98a26ce82d26..aee5637ea436 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * BPF JIT compiler for ARM64 2 * BPF JIT compiler for ARM64
3 * 3 *
4 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> 4 * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -35,6 +35,7 @@
35 aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \ 35 aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
36 AARCH64_INSN_BRANCH_COMP_##type) 36 AARCH64_INSN_BRANCH_COMP_##type)
37#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO) 37#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
38#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
38 39
39/* Conditional branch (immediate) */ 40/* Conditional branch (immediate) */
40#define A64_COND_BRANCH(cond, offset) \ 41#define A64_COND_BRANCH(cond, offset) \
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index a44e5293c6f5..cf3c7d4a1b58 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * BPF JIT compiler for ARM64 2 * BPF JIT compiler for ARM64
3 * 3 *
4 * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> 4 * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -225,6 +225,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
225 u8 jmp_cond; 225 u8 jmp_cond;
226 s32 jmp_offset; 226 s32 jmp_offset;
227 227
228#define check_imm(bits, imm) do { \
229 if ((((imm) > 0) && ((imm) >> (bits))) || \
230 (((imm) < 0) && (~(imm) >> (bits)))) { \
231 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
232 i, imm, imm); \
233 return -EINVAL; \
234 } \
235} while (0)
236#define check_imm19(imm) check_imm(19, imm)
237#define check_imm26(imm) check_imm(26, imm)
238
228 switch (code) { 239 switch (code) {
229 /* dst = src */ 240 /* dst = src */
230 case BPF_ALU | BPF_MOV | BPF_X: 241 case BPF_ALU | BPF_MOV | BPF_X:
@@ -258,15 +269,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
258 break; 269 break;
259 case BPF_ALU | BPF_DIV | BPF_X: 270 case BPF_ALU | BPF_DIV | BPF_X:
260 case BPF_ALU64 | BPF_DIV | BPF_X: 271 case BPF_ALU64 | BPF_DIV | BPF_X:
261 emit(A64_UDIV(is64, dst, dst, src), ctx);
262 break;
263 case BPF_ALU | BPF_MOD | BPF_X: 272 case BPF_ALU | BPF_MOD | BPF_X:
264 case BPF_ALU64 | BPF_MOD | BPF_X: 273 case BPF_ALU64 | BPF_MOD | BPF_X:
265 ctx->tmp_used = 1; 274 {
266 emit(A64_UDIV(is64, tmp, dst, src), ctx); 275 const u8 r0 = bpf2a64[BPF_REG_0];
267 emit(A64_MUL(is64, tmp, tmp, src), ctx); 276
268 emit(A64_SUB(is64, dst, dst, tmp), ctx); 277 /* if (src == 0) return 0 */
278 jmp_offset = 3; /* skip ahead to else path */
279 check_imm19(jmp_offset);
280 emit(A64_CBNZ(is64, src, jmp_offset), ctx);
281 emit(A64_MOVZ(1, r0, 0, 0), ctx);
282 jmp_offset = epilogue_offset(ctx);
283 check_imm26(jmp_offset);
284 emit(A64_B(jmp_offset), ctx);
285 /* else */
286 switch (BPF_OP(code)) {
287 case BPF_DIV:
288 emit(A64_UDIV(is64, dst, dst, src), ctx);
289 break;
290 case BPF_MOD:
291 ctx->tmp_used = 1;
292 emit(A64_UDIV(is64, tmp, dst, src), ctx);
293 emit(A64_MUL(is64, tmp, tmp, src), ctx);
294 emit(A64_SUB(is64, dst, dst, tmp), ctx);
295 break;
296 }
269 break; 297 break;
298 }
270 case BPF_ALU | BPF_LSH | BPF_X: 299 case BPF_ALU | BPF_LSH | BPF_X:
271 case BPF_ALU64 | BPF_LSH | BPF_X: 300 case BPF_ALU64 | BPF_LSH | BPF_X:
272 emit(A64_LSLV(is64, dst, dst, src), ctx); 301 emit(A64_LSLV(is64, dst, dst, src), ctx);
@@ -393,17 +422,6 @@ emit_bswap_uxt:
393 emit(A64_ASR(is64, dst, dst, imm), ctx); 422 emit(A64_ASR(is64, dst, dst, imm), ctx);
394 break; 423 break;
395 424
396#define check_imm(bits, imm) do { \
397 if ((((imm) > 0) && ((imm) >> (bits))) || \
398 (((imm) < 0) && (~(imm) >> (bits)))) { \
399 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
400 i, imm, imm); \
401 return -EINVAL; \
402 } \
403} while (0)
404#define check_imm19(imm) check_imm(19, imm)
405#define check_imm26(imm) check_imm(26, imm)
406
407 /* JUMP off */ 425 /* JUMP off */
408 case BPF_JMP | BPF_JA: 426 case BPF_JMP | BPF_JA:
409 jmp_offset = bpf2a64_offset(i + off, i, ctx); 427 jmp_offset = bpf2a64_offset(i + off, i, ctx);