aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/boot/compressed/Makefile8
-rw-r--r--arch/arm/boot/compressed/head.S20
-rw-r--r--arch/arm/include/asm/assembler.h18
-rw-r--r--arch/arm/include/asm/barrier.h32
-rw-r--r--arch/arm/include/asm/bugs.h6
-rw-r--r--arch/arm/include/asm/cp15.h3
-rw-r--r--arch/arm/include/asm/cputype.h8
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_host.h14
-rw-r--r--arch/arm/include/asm/kvm_mmu.h23
-rw-r--r--arch/arm/include/asm/proc-fns.h4
-rw-r--r--arch/arm/include/asm/system_misc.h15
-rw-r--r--arch/arm/include/uapi/asm/siginfo.h13
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/bugs.c18
-rw-r--r--arch/arm/kernel/entry-common.S18
-rw-r--r--arch/arm/kernel/entry-header.S25
-rw-r--r--arch/arm/kernel/machine_kexec.c36
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/kernel/suspend.c2
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kvm/hyp/hyp-entry.S112
-rw-r--r--arch/arm/lib/getuser.S10
-rw-r--r--arch/arm/mm/Kconfig23
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/fault.c3
-rw-r--r--arch/arm/mm/proc-macros.S3
-rw-r--r--arch/arm/mm/proc-v7-2level.S6
-rw-r--r--arch/arm/mm/proc-v7-bugs.c174
-rw-r--r--arch/arm/mm/proc-v7.S154
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c4
-rw-r--r--arch/arm/vfp/vfpmodule.c2
32 files changed, 663 insertions, 105 deletions
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 6a5c4ac97703..a3c5fbcad4ab 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -117,11 +117,9 @@ ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
117asflags-y := -DZIMAGE 117asflags-y := -DZIMAGE
118 118
119# Supply kernel BSS size to the decompressor via a linker symbol. 119# Supply kernel BSS size to the decompressor via a linker symbol.
120KBSS_SZ = $(shell $(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \ 120KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \
121 perl -e 'while (<>) { \ 121 sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
122 $$bss_start=hex($$1) if /^([[:xdigit:]]+) B __bss_start$$/; \ 122 -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
123 $$bss_end=hex($$1) if /^([[:xdigit:]]+) B __bss_stop$$/; \
124 }; printf "%d\n", $$bss_end - $$bss_start;')
125LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) 123LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
126# Supply ZRELADDR to the decompressor via a linker symbol. 124# Supply ZRELADDR to the decompressor via a linker symbol.
127ifneq ($(CONFIG_AUTO_ZRELADDR),y) 125ifneq ($(CONFIG_AUTO_ZRELADDR),y)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 45c8823c3750..517e0e18f0b8 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -29,19 +29,19 @@
29#if defined(CONFIG_DEBUG_ICEDCC) 29#if defined(CONFIG_DEBUG_ICEDCC)
30 30
31#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 31#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
32 .macro loadsp, rb, tmp 32 .macro loadsp, rb, tmp1, tmp2
33 .endm 33 .endm
34 .macro writeb, ch, rb 34 .macro writeb, ch, rb
35 mcr p14, 0, \ch, c0, c5, 0 35 mcr p14, 0, \ch, c0, c5, 0
36 .endm 36 .endm
37#elif defined(CONFIG_CPU_XSCALE) 37#elif defined(CONFIG_CPU_XSCALE)
38 .macro loadsp, rb, tmp 38 .macro loadsp, rb, tmp1, tmp2
39 .endm 39 .endm
40 .macro writeb, ch, rb 40 .macro writeb, ch, rb
41 mcr p14, 0, \ch, c8, c0, 0 41 mcr p14, 0, \ch, c8, c0, 0
42 .endm 42 .endm
43#else 43#else
44 .macro loadsp, rb, tmp 44 .macro loadsp, rb, tmp1, tmp2
45 .endm 45 .endm
46 .macro writeb, ch, rb 46 .macro writeb, ch, rb
47 mcr p14, 0, \ch, c1, c0, 0 47 mcr p14, 0, \ch, c1, c0, 0
@@ -57,7 +57,7 @@
57 .endm 57 .endm
58 58
59#if defined(CONFIG_ARCH_SA1100) 59#if defined(CONFIG_ARCH_SA1100)
60 .macro loadsp, rb, tmp 60 .macro loadsp, rb, tmp1, tmp2
61 mov \rb, #0x80000000 @ physical base address 61 mov \rb, #0x80000000 @ physical base address
62#ifdef CONFIG_DEBUG_LL_SER3 62#ifdef CONFIG_DEBUG_LL_SER3
63 add \rb, \rb, #0x00050000 @ Ser3 63 add \rb, \rb, #0x00050000 @ Ser3
@@ -66,8 +66,8 @@
66#endif 66#endif
67 .endm 67 .endm
68#else 68#else
69 .macro loadsp, rb, tmp 69 .macro loadsp, rb, tmp1, tmp2
70 addruart \rb, \tmp 70 addruart \rb, \tmp1, \tmp2
71 .endm 71 .endm
72#endif 72#endif
73#endif 73#endif
@@ -561,8 +561,6 @@ not_relocated: mov r0, #0
561 bl decompress_kernel 561 bl decompress_kernel
562 bl cache_clean_flush 562 bl cache_clean_flush
563 bl cache_off 563 bl cache_off
564 mov r1, r7 @ restore architecture number
565 mov r2, r8 @ restore atags pointer
566 564
567#ifdef CONFIG_ARM_VIRT_EXT 565#ifdef CONFIG_ARM_VIRT_EXT
568 mrs r0, spsr @ Get saved CPU boot mode 566 mrs r0, spsr @ Get saved CPU boot mode
@@ -1297,7 +1295,7 @@ phex: adr r3, phexbuf
1297 b 1b 1295 b 1b
1298 1296
1299@ puts corrupts {r0, r1, r2, r3} 1297@ puts corrupts {r0, r1, r2, r3}
1300puts: loadsp r3, r1 1298puts: loadsp r3, r2, r1
13011: ldrb r2, [r0], #1 12991: ldrb r2, [r0], #1
1302 teq r2, #0 1300 teq r2, #0
1303 moveq pc, lr 1301 moveq pc, lr
@@ -1314,8 +1312,8 @@ puts: loadsp r3, r1
1314@ putc corrupts {r0, r1, r2, r3} 1312@ putc corrupts {r0, r1, r2, r3}
1315putc: 1313putc:
1316 mov r2, r0 1314 mov r2, r0
1315 loadsp r3, r1, r0
1317 mov r0, #0 1316 mov r0, #0
1318 loadsp r3, r1
1319 b 2b 1317 b 2b
1320 1318
1321@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} 1319@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
@@ -1365,6 +1363,8 @@ __hyp_reentry_vectors:
1365 1363
1366__enter_kernel: 1364__enter_kernel:
1367 mov r0, #0 @ must be 0 1365 mov r0, #0 @ must be 0
1366 mov r1, r7 @ restore architecture number
1367 mov r2, r8 @ restore atags pointer
1368 ARM( mov pc, r4 ) @ call kernel 1368 ARM( mov pc, r4 ) @ call kernel
1369 M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class 1369 M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
1370 THUMB( bx r4 ) @ entry point is always ARM for A/R classes 1370 THUMB( bx r4 ) @ entry point is always ARM for A/R classes
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index bc8d4bbd82e2..0cd4dccbae78 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -447,6 +447,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
447 .size \name , . - \name 447 .size \name , . - \name
448 .endm 448 .endm
449 449
450 .macro csdb
451#ifdef CONFIG_THUMB2_KERNEL
452 .inst.w 0xf3af8014
453#else
454 .inst 0xe320f014
455#endif
456 .endm
457
450 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req 458 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
451#ifndef CONFIG_CPU_USE_DOMAINS 459#ifndef CONFIG_CPU_USE_DOMAINS
452 adds \tmp, \addr, #\size - 1 460 adds \tmp, \addr, #\size - 1
@@ -536,4 +544,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
536#endif 544#endif
537 .endm 545 .endm
538 546
547#ifdef CONFIG_KPROBES
548#define _ASM_NOKPROBE(entry) \
549 .pushsection "_kprobe_blacklist", "aw" ; \
550 .balign 4 ; \
551 .long entry; \
552 .popsection
553#else
554#define _ASM_NOKPROBE(entry)
555#endif
556
539#endif /* __ASM_ASSEMBLER_H__ */ 557#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 40f5c410fd8c..69772e742a0a 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -17,6 +17,12 @@
17#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") 17#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
18#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") 18#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
19#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") 19#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
20#ifdef CONFIG_THUMB2_KERNEL
21#define CSDB ".inst.w 0xf3af8014"
22#else
23#define CSDB ".inst 0xe320f014"
24#endif
25#define csdb() __asm__ __volatile__(CSDB : : : "memory")
20#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 26#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
21#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 27#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
22 : : "r" (0) : "memory") 28 : : "r" (0) : "memory")
@@ -37,6 +43,13 @@
37#define dmb(x) __asm__ __volatile__ ("" : : : "memory") 43#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
38#endif 44#endif
39 45
46#ifndef CSDB
47#define CSDB
48#endif
49#ifndef csdb
50#define csdb()
51#endif
52
40#ifdef CONFIG_ARM_HEAVY_MB 53#ifdef CONFIG_ARM_HEAVY_MB
41extern void (*soc_mb)(void); 54extern void (*soc_mb)(void);
42extern void arm_heavy_mb(void); 55extern void arm_heavy_mb(void);
@@ -63,6 +76,25 @@ extern void arm_heavy_mb(void);
63#define __smp_rmb() __smp_mb() 76#define __smp_rmb() __smp_mb()
64#define __smp_wmb() dmb(ishst) 77#define __smp_wmb() dmb(ishst)
65 78
79#ifdef CONFIG_CPU_SPECTRE
80static inline unsigned long array_index_mask_nospec(unsigned long idx,
81 unsigned long sz)
82{
83 unsigned long mask;
84
85 asm volatile(
86 "cmp %1, %2\n"
87 " sbc %0, %1, %1\n"
88 CSDB
89 : "=r" (mask)
90 : "r" (idx), "Ir" (sz)
91 : "cc");
92
93 return mask;
94}
95#define array_index_mask_nospec array_index_mask_nospec
96#endif
97
66#include <asm-generic/barrier.h> 98#include <asm-generic/barrier.h>
67 99
68#endif /* !__ASSEMBLY__ */ 100#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
index a97f1ea708d1..73a99c72a930 100644
--- a/arch/arm/include/asm/bugs.h
+++ b/arch/arm/include/asm/bugs.h
@@ -10,12 +10,14 @@
10#ifndef __ASM_BUGS_H 10#ifndef __ASM_BUGS_H
11#define __ASM_BUGS_H 11#define __ASM_BUGS_H
12 12
13#ifdef CONFIG_MMU
14extern void check_writebuffer_bugs(void); 13extern void check_writebuffer_bugs(void);
15 14
16#define check_bugs() check_writebuffer_bugs() 15#ifdef CONFIG_MMU
16extern void check_bugs(void);
17extern void check_other_bugs(void);
17#else 18#else
18#define check_bugs() do { } while (0) 19#define check_bugs() do { } while (0)
20#define check_other_bugs() do { } while (0)
19#endif 21#endif
20 22
21#endif 23#endif
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 4c9fa72b59f5..07e27f212dc7 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -65,6 +65,9 @@
65#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) 65#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
66#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) 66#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
67 67
68#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
69#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
70
68extern unsigned long cr_alignment; /* defined in entry-armv.S */ 71extern unsigned long cr_alignment; /* defined in entry-armv.S */
69 72
70static inline unsigned long get_cr(void) 73static inline unsigned long get_cr(void)
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index cb546425da8a..26021980504d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -77,8 +77,16 @@
77#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0 77#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
78#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0 78#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
79#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0 79#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
80#define ARM_CPU_PART_CORTEX_A53 0x4100d030
81#define ARM_CPU_PART_CORTEX_A57 0x4100d070
82#define ARM_CPU_PART_CORTEX_A72 0x4100d080
83#define ARM_CPU_PART_CORTEX_A73 0x4100d090
84#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
80#define ARM_CPU_PART_MASK 0xff00fff0 85#define ARM_CPU_PART_MASK 0xff00fff0
81 86
87/* Broadcom cores */
88#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
89
82/* DEC implemented cores */ 90/* DEC implemented cores */
83#define ARM_CPU_PART_SA1100 0x4400a110 91#define ARM_CPU_PART_SA1100 0x4400a110
84 92
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 5a953ecb0d78..231e87ad45d5 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -61,8 +61,6 @@ struct kvm_vcpu;
61extern char __kvm_hyp_init[]; 61extern char __kvm_hyp_init[];
62extern char __kvm_hyp_init_end[]; 62extern char __kvm_hyp_init_end[];
63 63
64extern char __kvm_hyp_vector[];
65
66extern void __kvm_flush_vm_context(void); 64extern void __kvm_flush_vm_context(void);
67extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 65extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
68extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 66extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index c6a749568dd6..8467e05360d7 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -21,6 +21,7 @@
21 21
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/kvm_types.h> 23#include <linux/kvm_types.h>
24#include <asm/cputype.h>
24#include <asm/kvm.h> 25#include <asm/kvm.h>
25#include <asm/kvm_asm.h> 26#include <asm/kvm_asm.h>
26#include <asm/kvm_mmio.h> 27#include <asm/kvm_mmio.h>
@@ -308,8 +309,17 @@ static inline void kvm_arm_vhe_guest_exit(void) {}
308 309
309static inline bool kvm_arm_harden_branch_predictor(void) 310static inline bool kvm_arm_harden_branch_predictor(void)
310{ 311{
311 /* No way to detect it yet, pretend it is not there. */ 312 switch(read_cpuid_part()) {
312 return false; 313#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
314 case ARM_CPU_PART_BRAHMA_B15:
315 case ARM_CPU_PART_CORTEX_A12:
316 case ARM_CPU_PART_CORTEX_A15:
317 case ARM_CPU_PART_CORTEX_A17:
318 return true;
319#endif
320 default:
321 return false;
322 }
313} 323}
314 324
315static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {} 325static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 707a1f06dc5d..cf2eae51f9a0 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -311,7 +311,28 @@ static inline unsigned int kvm_get_vmid_bits(void)
311 311
312static inline void *kvm_get_hyp_vector(void) 312static inline void *kvm_get_hyp_vector(void)
313{ 313{
314 return kvm_ksym_ref(__kvm_hyp_vector); 314 switch(read_cpuid_part()) {
315#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
316 case ARM_CPU_PART_CORTEX_A12:
317 case ARM_CPU_PART_CORTEX_A17:
318 {
319 extern char __kvm_hyp_vector_bp_inv[];
320 return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
321 }
322
323 case ARM_CPU_PART_BRAHMA_B15:
324 case ARM_CPU_PART_CORTEX_A15:
325 {
326 extern char __kvm_hyp_vector_ic_inv[];
327 return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
328 }
329#endif
330 default:
331 {
332 extern char __kvm_hyp_vector[];
333 return kvm_ksym_ref(__kvm_hyp_vector);
334 }
335 }
315} 336}
316 337
317static inline int kvm_map_vectors(void) 338static inline int kvm_map_vectors(void)
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index f2e1af45bd6f..e25f4392e1b2 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -37,6 +37,10 @@ extern struct processor {
37 */ 37 */
38 void (*_proc_init)(void); 38 void (*_proc_init)(void);
39 /* 39 /*
40 * Check for processor bugs
41 */
42 void (*check_bugs)(void);
43 /*
40 * Disable any processor specifics 44 * Disable any processor specifics
41 */ 45 */
42 void (*_proc_fin)(void); 46 void (*_proc_fin)(void);
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 78f6db114faf..8e76db83c498 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -8,6 +8,7 @@
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <linux/irqflags.h> 9#include <linux/irqflags.h>
10#include <linux/reboot.h> 10#include <linux/reboot.h>
11#include <linux/percpu.h>
11 12
12extern void cpu_init(void); 13extern void cpu_init(void);
13 14
@@ -15,6 +16,20 @@ void soft_restart(unsigned long);
15extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 16extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
16extern void (*arm_pm_idle)(void); 17extern void (*arm_pm_idle)(void);
17 18
19#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
20typedef void (*harden_branch_predictor_fn_t)(void);
21DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
22static inline void harden_branch_predictor(void)
23{
24 harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
25 smp_processor_id());
26 if (fn)
27 fn();
28}
29#else
30#define harden_branch_predictor() do { } while (0)
31#endif
32
18#define UDBG_UNDEFINED (1 << 0) 33#define UDBG_UNDEFINED (1 << 0)
19#define UDBG_SYSCALL (1 << 1) 34#define UDBG_SYSCALL (1 << 1)
20#define UDBG_BADABORT (1 << 2) 35#define UDBG_BADABORT (1 << 2)
diff --git a/arch/arm/include/uapi/asm/siginfo.h b/arch/arm/include/uapi/asm/siginfo.h
deleted file mode 100644
index d0513880be21..000000000000
--- a/arch/arm/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef __ASM_SIGINFO_H
2#define __ASM_SIGINFO_H
3
4#include <asm-generic/siginfo.h>
5
6/*
7 * SIGFPE si_codes
8 */
9#ifdef __KERNEL__
10#define FPE_FIXME 0 /* Broken dup of SI_USER */
11#endif /* __KERNEL__ */
12
13#endif
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index b59ac4bf82b8..8cad59465af3 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -31,6 +31,7 @@ else
31obj-y += entry-armv.o 31obj-y += entry-armv.o
32endif 32endif
33 33
34obj-$(CONFIG_MMU) += bugs.o
34obj-$(CONFIG_CPU_IDLE) += cpuidle.o 35obj-$(CONFIG_CPU_IDLE) += cpuidle.o
35obj-$(CONFIG_ISA_DMA_API) += dma.o 36obj-$(CONFIG_ISA_DMA_API) += dma.o
36obj-$(CONFIG_FIQ) += fiq.o fiqasm.o 37obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
new file mode 100644
index 000000000000..7be511310191
--- /dev/null
+++ b/arch/arm/kernel/bugs.c
@@ -0,0 +1,18 @@
1// SPDX-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <asm/bugs.h>
4#include <asm/proc-fns.h>
5
6void check_other_bugs(void)
7{
8#ifdef MULTI_CPU
9 if (processor.check_bugs)
10 processor.check_bugs();
11#endif
12}
13
14void __init check_bugs(void)
15{
16 check_writebuffer_bugs();
17 check_other_bugs();
18}
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 3c4f88701f22..20df608bf343 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -242,9 +242,7 @@ local_restart:
242 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 242 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
243 bne __sys_trace 243 bne __sys_trace
244 244
245 cmp scno, #NR_syscalls @ check upper syscall limit 245 invoke_syscall tbl, scno, r10, ret_fast_syscall
246 badr lr, ret_fast_syscall @ return address
247 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
248 246
249 add r1, sp, #S_OFF 247 add r1, sp, #S_OFF
2502: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 2482: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
@@ -278,14 +276,8 @@ __sys_trace:
278 mov r1, scno 276 mov r1, scno
279 add r0, sp, #S_OFF 277 add r0, sp, #S_OFF
280 bl syscall_trace_enter 278 bl syscall_trace_enter
281 279 mov scno, r0
282 badr lr, __sys_trace_return @ return address 280 invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
283 mov scno, r0 @ syscall number (possibly new)
284 add r1, sp, #S_R0 + S_OFF @ pointer to regs
285 cmp scno, #NR_syscalls @ check upper syscall limit
286 ldmccia r1, {r0 - r6} @ have to reload r0 - r6
287 stmccia sp, {r4, r5} @ and update the stack args
288 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
289 cmp scno, #-1 @ skip the syscall? 281 cmp scno, #-1 @ skip the syscall?
290 bne 2b 282 bne 2b
291 add sp, sp, #S_OFF @ restore stack 283 add sp, sp, #S_OFF @ restore stack
@@ -363,6 +355,10 @@ sys_syscall:
363 bic scno, r0, #__NR_OABI_SYSCALL_BASE 355 bic scno, r0, #__NR_OABI_SYSCALL_BASE
364 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 356 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
365 cmpne scno, #NR_syscalls @ check range 357 cmpne scno, #NR_syscalls @ check range
358#ifdef CONFIG_CPU_SPECTRE
359 movhs scno, #0
360 csdb
361#endif
366 stmloia sp, {r5, r6} @ shuffle args 362 stmloia sp, {r5, r6} @ shuffle args
367 movlo r0, r1 363 movlo r0, r1
368 movlo r1, r2 364 movlo r1, r2
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 0f07579af472..773424843d6e 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -378,6 +378,31 @@
378#endif 378#endif
379 .endm 379 .endm
380 380
381 .macro invoke_syscall, table, nr, tmp, ret, reload=0
382#ifdef CONFIG_CPU_SPECTRE
383 mov \tmp, \nr
384 cmp \tmp, #NR_syscalls @ check upper syscall limit
385 movcs \tmp, #0
386 csdb
387 badr lr, \ret @ return address
388 .if \reload
389 add r1, sp, #S_R0 + S_OFF @ pointer to regs
390 ldmccia r1, {r0 - r6} @ reload r0-r6
391 stmccia sp, {r4, r5} @ update stack arguments
392 .endif
393 ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
394#else
395 cmp \nr, #NR_syscalls @ check upper syscall limit
396 badr lr, \ret @ return address
397 .if \reload
398 add r1, sp, #S_R0 + S_OFF @ pointer to regs
399 ldmccia r1, {r0 - r6} @ reload r0-r6
400 stmccia sp, {r4, r5} @ update stack arguments
401 .endif
402 ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
403#endif
404 .endm
405
381/* 406/*
382 * These are the registers used in the syscall handler, and allow us to 407 * These are the registers used in the syscall handler, and allow us to
383 * have in theory up to 7 arguments to a function - r0 to r6. 408 * have in theory up to 7 arguments to a function - r0 to r6.
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 6b38d7a634c1..dd2eb5f76b9f 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -83,7 +83,7 @@ void machine_crash_nonpanic_core(void *unused)
83{ 83{
84 struct pt_regs regs; 84 struct pt_regs regs;
85 85
86 crash_setup_regs(&regs, NULL); 86 crash_setup_regs(&regs, get_irq_regs());
87 printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n", 87 printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
88 smp_processor_id()); 88 smp_processor_id());
89 crash_save_cpu(&regs, smp_processor_id()); 89 crash_save_cpu(&regs, smp_processor_id());
@@ -95,6 +95,27 @@ void machine_crash_nonpanic_core(void *unused)
95 cpu_relax(); 95 cpu_relax();
96} 96}
97 97
98void crash_smp_send_stop(void)
99{
100 static int cpus_stopped;
101 unsigned long msecs;
102
103 if (cpus_stopped)
104 return;
105
106 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
107 smp_call_function(machine_crash_nonpanic_core, NULL, false);
108 msecs = 1000; /* Wait at most a second for the other cpus to stop */
109 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
110 mdelay(1);
111 msecs--;
112 }
113 if (atomic_read(&waiting_for_crash_ipi) > 0)
114 pr_warn("Non-crashing CPUs did not react to IPI\n");
115
116 cpus_stopped = 1;
117}
118
98static void machine_kexec_mask_interrupts(void) 119static void machine_kexec_mask_interrupts(void)
99{ 120{
100 unsigned int i; 121 unsigned int i;
@@ -120,19 +141,8 @@ static void machine_kexec_mask_interrupts(void)
120 141
121void machine_crash_shutdown(struct pt_regs *regs) 142void machine_crash_shutdown(struct pt_regs *regs)
122{ 143{
123 unsigned long msecs;
124
125 local_irq_disable(); 144 local_irq_disable();
126 145 crash_smp_send_stop();
127 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
128 smp_call_function(machine_crash_nonpanic_core, NULL, false);
129 msecs = 1000; /* Wait at most a second for the other cpus to stop */
130 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
131 mdelay(1);
132 msecs--;
133 }
134 if (atomic_read(&waiting_for_crash_ipi) > 0)
135 pr_warn("Non-crashing CPUs did not react to IPI\n");
136 146
137 crash_save_cpu(regs, smp_processor_id()); 147 crash_save_cpu(regs, smp_processor_id());
138 machine_kexec_mask_interrupts(); 148 machine_kexec_mask_interrupts();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b9e08f50df41..0978282d5fc2 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -31,6 +31,7 @@
31#include <linux/irq_work.h> 31#include <linux/irq_work.h>
32 32
33#include <linux/atomic.h> 33#include <linux/atomic.h>
34#include <asm/bugs.h>
34#include <asm/smp.h> 35#include <asm/smp.h>
35#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
36#include <asm/cpu.h> 37#include <asm/cpu.h>
@@ -404,6 +405,9 @@ asmlinkage void secondary_start_kernel(void)
404 * before we continue - which happens after __cpu_up returns. 405 * before we continue - which happens after __cpu_up returns.
405 */ 406 */
406 set_cpu_online(cpu, true); 407 set_cpu_online(cpu, true);
408
409 check_other_bugs();
410
407 complete(&cpu_running); 411 complete(&cpu_running);
408 412
409 local_irq_enable(); 413 local_irq_enable();
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index a40ebb7c0896..d08099269e35 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -3,6 +3,7 @@
3#include <linux/slab.h> 3#include <linux/slab.h>
4#include <linux/mm_types.h> 4#include <linux/mm_types.h>
5 5
6#include <asm/bugs.h>
6#include <asm/cacheflush.h> 7#include <asm/cacheflush.h>
7#include <asm/idmap.h> 8#include <asm/idmap.h>
8#include <asm/pgalloc.h> 9#include <asm/pgalloc.h>
@@ -36,6 +37,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
36 cpu_switch_mm(mm->pgd, mm); 37 cpu_switch_mm(mm->pgd, mm);
37 local_flush_bp_all(); 38 local_flush_bp_all();
38 local_flush_tlb_all(); 39 local_flush_tlb_all();
40 check_other_bugs();
39 } 41 }
40 42
41 return ret; 43 return ret;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 5e3633c24e63..2fe87109ae46 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -19,6 +19,7 @@
19#include <linux/uaccess.h> 19#include <linux/uaccess.h>
20#include <linux/hardirq.h> 20#include <linux/hardirq.h>
21#include <linux/kdebug.h> 21#include <linux/kdebug.h>
22#include <linux/kprobes.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/kexec.h> 24#include <linux/kexec.h>
24#include <linux/bug.h> 25#include <linux/bug.h>
@@ -417,7 +418,8 @@ void unregister_undef_hook(struct undef_hook *hook)
417 raw_spin_unlock_irqrestore(&undef_lock, flags); 418 raw_spin_unlock_irqrestore(&undef_lock, flags);
418} 419}
419 420
420static int call_undef_hook(struct pt_regs *regs, unsigned int instr) 421static nokprobe_inline
422int call_undef_hook(struct pt_regs *regs, unsigned int instr)
421{ 423{
422 struct undef_hook *hook; 424 struct undef_hook *hook;
423 unsigned long flags; 425 unsigned long flags;
@@ -490,6 +492,7 @@ die_sig:
490 492
491 arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6); 493 arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
492} 494}
495NOKPROBE_SYMBOL(do_undefinstr)
493 496
494/* 497/*
495 * Handle FIQ similarly to NMI on x86 systems. 498 * Handle FIQ similarly to NMI on x86 systems.
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
index 95a2faefc070..aa3f9a9837ac 100644
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -16,6 +16,7 @@
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */ 17 */
18 18
19#include <linux/arm-smccc.h>
19#include <linux/linkage.h> 20#include <linux/linkage.h>
20#include <asm/kvm_arm.h> 21#include <asm/kvm_arm.h>
21#include <asm/kvm_asm.h> 22#include <asm/kvm_asm.h>
@@ -71,6 +72,90 @@ __kvm_hyp_vector:
71 W(b) hyp_irq 72 W(b) hyp_irq
72 W(b) hyp_fiq 73 W(b) hyp_fiq
73 74
75#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
76 .align 5
77__kvm_hyp_vector_ic_inv:
78 .global __kvm_hyp_vector_ic_inv
79
80 /*
81 * We encode the exception entry in the bottom 3 bits of
82 * SP, and we have to guarantee to be 8 bytes aligned.
83 */
84 W(add) sp, sp, #1 /* Reset 7 */
85 W(add) sp, sp, #1 /* Undef 6 */
86 W(add) sp, sp, #1 /* Syscall 5 */
87 W(add) sp, sp, #1 /* Prefetch abort 4 */
88 W(add) sp, sp, #1 /* Data abort 3 */
89 W(add) sp, sp, #1 /* HVC 2 */
90 W(add) sp, sp, #1 /* IRQ 1 */
91 W(nop) /* FIQ 0 */
92
93 mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
94 isb
95
96 b decode_vectors
97
98 .align 5
99__kvm_hyp_vector_bp_inv:
100 .global __kvm_hyp_vector_bp_inv
101
102 /*
103 * We encode the exception entry in the bottom 3 bits of
104 * SP, and we have to guarantee to be 8 bytes aligned.
105 */
106 W(add) sp, sp, #1 /* Reset 7 */
107 W(add) sp, sp, #1 /* Undef 6 */
108 W(add) sp, sp, #1 /* Syscall 5 */
109 W(add) sp, sp, #1 /* Prefetch abort 4 */
110 W(add) sp, sp, #1 /* Data abort 3 */
111 W(add) sp, sp, #1 /* HVC 2 */
112 W(add) sp, sp, #1 /* IRQ 1 */
113 W(nop) /* FIQ 0 */
114
115 mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
116 isb
117
118decode_vectors:
119
120#ifdef CONFIG_THUMB2_KERNEL
121 /*
122 * Yet another silly hack: Use VPIDR as a temp register.
123 * Thumb2 is really a pain, as SP cannot be used with most
124 * of the bitwise instructions. The vect_br macro ensures
125 * things gets cleaned-up.
126 */
127 mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
128 mov r0, sp
129 and r0, r0, #7
130 sub sp, sp, r0
131 push {r1, r2}
132 mov r1, r0
133 mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
134 mrc p15, 0, r2, c0, c0, 0 /* MIDR */
135 mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
136#endif
137
138.macro vect_br val, targ
139ARM( eor sp, sp, #\val )
140ARM( tst sp, #7 )
141ARM( eorne sp, sp, #\val )
142
143THUMB( cmp r1, #\val )
144THUMB( popeq {r1, r2} )
145
146 beq \targ
147.endm
148
149 vect_br 0, hyp_fiq
150 vect_br 1, hyp_irq
151 vect_br 2, hyp_hvc
152 vect_br 3, hyp_dabt
153 vect_br 4, hyp_pabt
154 vect_br 5, hyp_svc
155 vect_br 6, hyp_undef
156 vect_br 7, hyp_reset
157#endif
158
74.macro invalid_vector label, cause 159.macro invalid_vector label, cause
75 .align 160 .align
76\label: mov r0, #\cause 161\label: mov r0, #\cause
@@ -118,7 +203,7 @@ hyp_hvc:
118 lsr r2, r2, #16 203 lsr r2, r2, #16
119 and r2, r2, #0xff 204 and r2, r2, #0xff
120 cmp r2, #0 205 cmp r2, #0
121 bne guest_trap @ Guest called HVC 206 bne guest_hvc_trap @ Guest called HVC
122 207
123 /* 208 /*
124 * Getting here means host called HVC, we shift parameters and branch 209 * Getting here means host called HVC, we shift parameters and branch
@@ -149,7 +234,14 @@ hyp_hvc:
149 bx ip 234 bx ip
150 235
1511: 2361:
152 push {lr} 237 /*
238 * Pushing r2 here is just a way of keeping the stack aligned to
239 * 8 bytes on any path that can trigger a HYP exception. Here,
240 * we may well be about to jump into the guest, and the guest
241 * exit would otherwise be badly decoded by our fancy
242 * "decode-exception-without-a-branch" code...
243 */
244 push {r2, lr}
153 245
154 mov lr, r0 246 mov lr, r0
155 mov r0, r1 247 mov r0, r1
@@ -159,7 +251,21 @@ hyp_hvc:
159THUMB( orr lr, #1) 251THUMB( orr lr, #1)
160 blx lr @ Call the HYP function 252 blx lr @ Call the HYP function
161 253
162 pop {lr} 254 pop {r2, lr}
255 eret
256
257guest_hvc_trap:
258 movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
259 movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
260 ldr r0, [sp] @ Guest's r0
261 teq r0, r2
262 bne guest_trap
263 add sp, sp, #12
264 @ Returns:
265 @ r0 = 0
266 @ r1 = HSR value (perfectly predictable)
267 @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
268 mov r0, #0
163 eret 269 eret
164 270
165guest_trap: 271guest_trap:
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index df73914e81c8..746e7801dcdf 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -38,6 +38,7 @@ ENTRY(__get_user_1)
38 mov r0, #0 38 mov r0, #0
39 ret lr 39 ret lr
40ENDPROC(__get_user_1) 40ENDPROC(__get_user_1)
41_ASM_NOKPROBE(__get_user_1)
41 42
42ENTRY(__get_user_2) 43ENTRY(__get_user_2)
43 check_uaccess r0, 2, r1, r2, __get_user_bad 44 check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -58,6 +59,7 @@ rb .req r0
58 mov r0, #0 59 mov r0, #0
59 ret lr 60 ret lr
60ENDPROC(__get_user_2) 61ENDPROC(__get_user_2)
62_ASM_NOKPROBE(__get_user_2)
61 63
62ENTRY(__get_user_4) 64ENTRY(__get_user_4)
63 check_uaccess r0, 4, r1, r2, __get_user_bad 65 check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -65,6 +67,7 @@ ENTRY(__get_user_4)
65 mov r0, #0 67 mov r0, #0
66 ret lr 68 ret lr
67ENDPROC(__get_user_4) 69ENDPROC(__get_user_4)
70_ASM_NOKPROBE(__get_user_4)
68 71
69ENTRY(__get_user_8) 72ENTRY(__get_user_8)
70 check_uaccess r0, 8, r1, r2, __get_user_bad8 73 check_uaccess r0, 8, r1, r2, __get_user_bad8
@@ -78,6 +81,7 @@ ENTRY(__get_user_8)
78 mov r0, #0 81 mov r0, #0
79 ret lr 82 ret lr
80ENDPROC(__get_user_8) 83ENDPROC(__get_user_8)
84_ASM_NOKPROBE(__get_user_8)
81 85
82#ifdef __ARMEB__ 86#ifdef __ARMEB__
83ENTRY(__get_user_32t_8) 87ENTRY(__get_user_32t_8)
@@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8)
91 mov r0, #0 95 mov r0, #0
92 ret lr 96 ret lr
93ENDPROC(__get_user_32t_8) 97ENDPROC(__get_user_32t_8)
98_ASM_NOKPROBE(__get_user_32t_8)
94 99
95ENTRY(__get_user_64t_1) 100ENTRY(__get_user_64t_1)
96 check_uaccess r0, 1, r1, r2, __get_user_bad8 101 check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1)
98 mov r0, #0 103 mov r0, #0
99 ret lr 104 ret lr
100ENDPROC(__get_user_64t_1) 105ENDPROC(__get_user_64t_1)
106_ASM_NOKPROBE(__get_user_64t_1)
101 107
102ENTRY(__get_user_64t_2) 108ENTRY(__get_user_64t_2)
103 check_uaccess r0, 2, r1, r2, __get_user_bad8 109 check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -114,6 +120,7 @@ rb .req r0
114 mov r0, #0 120 mov r0, #0
115 ret lr 121 ret lr
116ENDPROC(__get_user_64t_2) 122ENDPROC(__get_user_64t_2)
123_ASM_NOKPROBE(__get_user_64t_2)
117 124
118ENTRY(__get_user_64t_4) 125ENTRY(__get_user_64t_4)
119 check_uaccess r0, 4, r1, r2, __get_user_bad8 126 check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4)
121 mov r0, #0 128 mov r0, #0
122 ret lr 129 ret lr
123ENDPROC(__get_user_64t_4) 130ENDPROC(__get_user_64t_4)
131_ASM_NOKPROBE(__get_user_64t_4)
124#endif 132#endif
125 133
126__get_user_bad8: 134__get_user_bad8:
@@ -131,6 +139,8 @@ __get_user_bad:
131 ret lr 139 ret lr
132ENDPROC(__get_user_bad) 140ENDPROC(__get_user_bad)
133ENDPROC(__get_user_bad8) 141ENDPROC(__get_user_bad8)
142_ASM_NOKPROBE(__get_user_bad)
143_ASM_NOKPROBE(__get_user_bad8)
134 144
135.pushsection __ex_table, "a" 145.pushsection __ex_table, "a"
136 .long 1b, __get_user_bad 146 .long 1b, __get_user_bad
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7f14acf67caf..9357ff52c221 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -415,6 +415,7 @@ config CPU_V7
415 select CPU_CP15_MPU if !MMU 415 select CPU_CP15_MPU if !MMU
416 select CPU_HAS_ASID if MMU 416 select CPU_HAS_ASID if MMU
417 select CPU_PABRT_V7 417 select CPU_PABRT_V7
418 select CPU_SPECTRE if MMU
418 select CPU_THUMB_CAPABLE 419 select CPU_THUMB_CAPABLE
419 select CPU_TLB_V7 if MMU 420 select CPU_TLB_V7 if MMU
420 421
@@ -826,6 +827,28 @@ config CPU_BPREDICT_DISABLE
826 help 827 help
827 Say Y here to disable branch prediction. If unsure, say N. 828 Say Y here to disable branch prediction. If unsure, say N.
828 829
830config CPU_SPECTRE
831 bool
832
833config HARDEN_BRANCH_PREDICTOR
834 bool "Harden the branch predictor against aliasing attacks" if EXPERT
835 depends on CPU_SPECTRE
836 default y
837 help
838 Speculation attacks against some high-performance processors rely
839 on being able to manipulate the branch predictor for a victim
840 context by executing aliasing branches in the attacker context.
841 Such attacks can be partially mitigated against by clearing
842 internal branch predictor state and limiting the prediction
843 logic in some situations.
844
845 This config option will take CPU-specific actions to harden
846 the branch predictor against aliasing attacks and may rely on
847 specific instruction sequences or control bits being set by
848 the system firmware.
849
850 If unsure, say Y.
851
829config TLS_REG_EMUL 852config TLS_REG_EMUL
830 bool 853 bool
831 select NEED_KUSER_HELPERS 854 select NEED_KUSER_HELPERS
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index d19b209e04e0..7cb1699fbfc4 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -97,7 +97,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
97obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o 97obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
98obj-$(CONFIG_CPU_V6) += proc-v6.o 98obj-$(CONFIG_CPU_V6) += proc-v6.o
99obj-$(CONFIG_CPU_V6K) += proc-v6.o 99obj-$(CONFIG_CPU_V6K) += proc-v6.o
100obj-$(CONFIG_CPU_V7) += proc-v7.o 100obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
101obj-$(CONFIG_CPU_V7M) += proc-v7m.o 101obj-$(CONFIG_CPU_V7M) += proc-v7m.o
102 102
103AFLAGS_proc-v6.o :=-Wa,-march=armv6 103AFLAGS_proc-v6.o :=-Wa,-march=armv6
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index b75eada23d0a..3b1ba003c4f9 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -163,6 +163,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
163{ 163{
164 struct siginfo si; 164 struct siginfo si;
165 165
166 if (addr > TASK_SIZE)
167 harden_branch_predictor();
168
166#ifdef CONFIG_DEBUG_USER 169#ifdef CONFIG_DEBUG_USER
167 if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || 170 if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
168 ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { 171 ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index f10e31d0730a..81d0efb055c6 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -273,13 +273,14 @@
273 mcr p15, 0, ip, c7, c10, 4 @ data write barrier 273 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
274 .endm 274 .endm
275 275
276.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0 276.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
277 .type \name\()_processor_functions, #object 277 .type \name\()_processor_functions, #object
278 .align 2 278 .align 2
279ENTRY(\name\()_processor_functions) 279ENTRY(\name\()_processor_functions)
280 .word \dabort 280 .word \dabort
281 .word \pabort 281 .word \pabort
282 .word cpu_\name\()_proc_init 282 .word cpu_\name\()_proc_init
283 .word \bugs
283 .word cpu_\name\()_proc_fin 284 .word cpu_\name\()_proc_fin
284 .word cpu_\name\()_reset 285 .word cpu_\name\()_reset
285 .word cpu_\name\()_do_idle 286 .word cpu_\name\()_do_idle
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index c6141a5435c3..f8d45ad2a515 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -41,11 +41,6 @@
41 * even on Cortex-A8 revisions not affected by 430973. 41 * even on Cortex-A8 revisions not affected by 430973.
42 * If IBE is not set, the flush BTAC/BTB won't do anything. 42 * If IBE is not set, the flush BTAC/BTB won't do anything.
43 */ 43 */
44ENTRY(cpu_ca8_switch_mm)
45#ifdef CONFIG_MMU
46 mov r2, #0
47 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
48#endif
49ENTRY(cpu_v7_switch_mm) 44ENTRY(cpu_v7_switch_mm)
50#ifdef CONFIG_MMU 45#ifdef CONFIG_MMU
51 mmid r1, r1 @ get mm->context.id 46 mmid r1, r1 @ get mm->context.id
@@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
66#endif 61#endif
67 bx lr 62 bx lr
68ENDPROC(cpu_v7_switch_mm) 63ENDPROC(cpu_v7_switch_mm)
69ENDPROC(cpu_ca8_switch_mm)
70 64
71/* 65/*
72 * cpu_v7_set_pte_ext(ptep, pte) 66 * cpu_v7_set_pte_ext(ptep, pte)
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
new file mode 100644
index 000000000000..5544b82a2e7a
--- /dev/null
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -0,0 +1,174 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/arm-smccc.h>
3#include <linux/kernel.h>
4#include <linux/psci.h>
5#include <linux/smp.h>
6
7#include <asm/cp15.h>
8#include <asm/cputype.h>
9#include <asm/proc-fns.h>
10#include <asm/system_misc.h>
11
12#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
13DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
14
15extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
16extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
17extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
18extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
19
20static void harden_branch_predictor_bpiall(void)
21{
22 write_sysreg(0, BPIALL);
23}
24
25static void harden_branch_predictor_iciallu(void)
26{
27 write_sysreg(0, ICIALLU);
28}
29
30static void __maybe_unused call_smc_arch_workaround_1(void)
31{
32 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
33}
34
35static void __maybe_unused call_hvc_arch_workaround_1(void)
36{
37 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
38}
39
40static void cpu_v7_spectre_init(void)
41{
42 const char *spectre_v2_method = NULL;
43 int cpu = smp_processor_id();
44
45 if (per_cpu(harden_branch_predictor_fn, cpu))
46 return;
47
48 switch (read_cpuid_part()) {
49 case ARM_CPU_PART_CORTEX_A8:
50 case ARM_CPU_PART_CORTEX_A9:
51 case ARM_CPU_PART_CORTEX_A12:
52 case ARM_CPU_PART_CORTEX_A17:
53 case ARM_CPU_PART_CORTEX_A73:
54 case ARM_CPU_PART_CORTEX_A75:
55 if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
56 goto bl_error;
57 per_cpu(harden_branch_predictor_fn, cpu) =
58 harden_branch_predictor_bpiall;
59 spectre_v2_method = "BPIALL";
60 break;
61
62 case ARM_CPU_PART_CORTEX_A15:
63 case ARM_CPU_PART_BRAHMA_B15:
64 if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
65 goto bl_error;
66 per_cpu(harden_branch_predictor_fn, cpu) =
67 harden_branch_predictor_iciallu;
68 spectre_v2_method = "ICIALLU";
69 break;
70
71#ifdef CONFIG_ARM_PSCI
72 default:
73 /* Other ARM CPUs require no workaround */
74 if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
75 break;
76 /* fallthrough */
77 /* Cortex A57/A72 require firmware workaround */
78 case ARM_CPU_PART_CORTEX_A57:
79 case ARM_CPU_PART_CORTEX_A72: {
80 struct arm_smccc_res res;
81
82 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
83 break;
84
85 switch (psci_ops.conduit) {
86 case PSCI_CONDUIT_HVC:
87 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
88 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
89 if ((int)res.a0 != 0)
90 break;
91 if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
92 goto bl_error;
93 per_cpu(harden_branch_predictor_fn, cpu) =
94 call_hvc_arch_workaround_1;
95 processor.switch_mm = cpu_v7_hvc_switch_mm;
96 spectre_v2_method = "hypervisor";
97 break;
98
99 case PSCI_CONDUIT_SMC:
100 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
101 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
102 if ((int)res.a0 != 0)
103 break;
104 if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
105 goto bl_error;
106 per_cpu(harden_branch_predictor_fn, cpu) =
107 call_smc_arch_workaround_1;
108 processor.switch_mm = cpu_v7_smc_switch_mm;
109 spectre_v2_method = "firmware";
110 break;
111
112 default:
113 break;
114 }
115 }
116#endif
117 }
118
119 if (spectre_v2_method)
120 pr_info("CPU%u: Spectre v2: using %s workaround\n",
121 smp_processor_id(), spectre_v2_method);
122 return;
123
124bl_error:
125 pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
126 cpu);
127}
128#else
129static void cpu_v7_spectre_init(void)
130{
131}
132#endif
133
134static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
135 u32 mask, const char *msg)
136{
137 u32 aux_cr;
138
139 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
140
141 if ((aux_cr & mask) != mask) {
142 if (!*warned)
143 pr_err("CPU%u: %s", smp_processor_id(), msg);
144 *warned = true;
145 return false;
146 }
147 return true;
148}
149
150static DEFINE_PER_CPU(bool, spectre_warned);
151
152static bool check_spectre_auxcr(bool *warned, u32 bit)
153{
154 return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
155 cpu_v7_check_auxcr_set(warned, bit,
156 "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
157}
158
159void cpu_v7_ca8_ibe(void)
160{
161 if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
162 cpu_v7_spectre_init();
163}
164
165void cpu_v7_ca15_ibe(void)
166{
167 if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
168 cpu_v7_spectre_init();
169}
170
171void cpu_v7_bugs_init(void)
172{
173 cpu_v7_spectre_init();
174}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b528a15f460d..6fe52819e014 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -9,6 +9,7 @@
9 * 9 *
10 * This is the "shell" of the ARMv7 processor support. 10 * This is the "shell" of the ARMv7 processor support.
11 */ 11 */
12#include <linux/arm-smccc.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/linkage.h> 14#include <linux/linkage.h>
14#include <asm/assembler.h> 15#include <asm/assembler.h>
@@ -93,6 +94,37 @@ ENTRY(cpu_v7_dcache_clean_area)
93 ret lr 94 ret lr
94ENDPROC(cpu_v7_dcache_clean_area) 95ENDPROC(cpu_v7_dcache_clean_area)
95 96
97#ifdef CONFIG_ARM_PSCI
98 .arch_extension sec
99ENTRY(cpu_v7_smc_switch_mm)
100 stmfd sp!, {r0 - r3}
101 movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
102 movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
103 smc #0
104 ldmfd sp!, {r0 - r3}
105 b cpu_v7_switch_mm
106ENDPROC(cpu_v7_smc_switch_mm)
107 .arch_extension virt
108ENTRY(cpu_v7_hvc_switch_mm)
109 stmfd sp!, {r0 - r3}
110 movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
111 movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
112 hvc #0
113 ldmfd sp!, {r0 - r3}
114 b cpu_v7_switch_mm
115ENDPROC(cpu_v7_smc_switch_mm)
116#endif
117ENTRY(cpu_v7_iciallu_switch_mm)
118 mov r3, #0
119 mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
120 b cpu_v7_switch_mm
121ENDPROC(cpu_v7_iciallu_switch_mm)
122ENTRY(cpu_v7_bpiall_switch_mm)
123 mov r3, #0
124 mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
125 b cpu_v7_switch_mm
126ENDPROC(cpu_v7_bpiall_switch_mm)
127
96 string cpu_v7_name, "ARMv7 Processor" 128 string cpu_v7_name, "ARMv7 Processor"
97 .align 129 .align
98 130
@@ -158,31 +190,6 @@ ENTRY(cpu_v7_do_resume)
158ENDPROC(cpu_v7_do_resume) 190ENDPROC(cpu_v7_do_resume)
159#endif 191#endif
160 192
161/*
162 * Cortex-A8
163 */
164 globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
165 globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
166 globl_equ cpu_ca8_reset, cpu_v7_reset
167 globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
168 globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
169 globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
170 globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
171#ifdef CONFIG_ARM_CPU_SUSPEND
172 globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
173 globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
174#endif
175
176/*
177 * Cortex-A9 processor functions
178 */
179 globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
180 globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
181 globl_equ cpu_ca9mp_reset, cpu_v7_reset
182 globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
183 globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
184 globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
185 globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
186.globl cpu_ca9mp_suspend_size 193.globl cpu_ca9mp_suspend_size
187.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2 194.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
188#ifdef CONFIG_ARM_CPU_SUSPEND 195#ifdef CONFIG_ARM_CPU_SUSPEND
@@ -547,12 +554,79 @@ __v7_setup_stack:
547 554
548 __INITDATA 555 __INITDATA
549 556
557 .weak cpu_v7_bugs_init
558
550 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 559 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
551 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 560 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
561
562#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
563 @ generic v7 bpiall on context switch
564 globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init
565 globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin
566 globl_equ cpu_v7_bpiall_reset, cpu_v7_reset
567 globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle
568 globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
569 globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext
570 globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size
571#ifdef CONFIG_ARM_CPU_SUSPEND
572 globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend
573 globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume
574#endif
575 define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
576
577#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
578#else
579#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
580#endif
581
552#ifndef CONFIG_ARM_LPAE 582#ifndef CONFIG_ARM_LPAE
553 define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 583 @ Cortex-A8 - always needs bpiall switch_mm implementation
554 define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 584 globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
585 globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
586 globl_equ cpu_ca8_reset, cpu_v7_reset
587 globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
588 globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
589 globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
590 globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm
591 globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
592#ifdef CONFIG_ARM_CPU_SUSPEND
593 globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
594 globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
595#endif
596 define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
597
598 @ Cortex-A9 - needs more registers preserved across suspend/resume
599 @ and bpiall switch_mm for hardening
600 globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
601 globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
602 globl_equ cpu_ca9mp_reset, cpu_v7_reset
603 globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
604 globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
605#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
606 globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm
607#else
608 globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
609#endif
610 globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
611 define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
555#endif 612#endif
613
614 @ Cortex-A15 - needs iciallu switch_mm for hardening
615 globl_equ cpu_ca15_proc_init, cpu_v7_proc_init
616 globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin
617 globl_equ cpu_ca15_reset, cpu_v7_reset
618 globl_equ cpu_ca15_do_idle, cpu_v7_do_idle
619 globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
620#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
621 globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm
622#else
623 globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm
624#endif
625 globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext
626 globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size
627 globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend
628 globl_equ cpu_ca15_do_resume, cpu_v7_do_resume
629 define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
556#ifdef CONFIG_CPU_PJ4B 630#ifdef CONFIG_CPU_PJ4B
557 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 631 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
558#endif 632#endif
@@ -669,7 +743,7 @@ __v7_ca7mp_proc_info:
669__v7_ca12mp_proc_info: 743__v7_ca12mp_proc_info:
670 .long 0x410fc0d0 744 .long 0x410fc0d0
671 .long 0xff0ffff0 745 .long 0xff0ffff0
672 __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup 746 __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
673 .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info 747 .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
674 748
675 /* 749 /*
@@ -679,7 +753,7 @@ __v7_ca12mp_proc_info:
679__v7_ca15mp_proc_info: 753__v7_ca15mp_proc_info:
680 .long 0x410fc0f0 754 .long 0x410fc0f0
681 .long 0xff0ffff0 755 .long 0xff0ffff0
682 __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup 756 __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
683 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info 757 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
684 758
685 /* 759 /*
@@ -689,7 +763,7 @@ __v7_ca15mp_proc_info:
689__v7_b15mp_proc_info: 763__v7_b15mp_proc_info:
690 .long 0x420f00f0 764 .long 0x420f00f0
691 .long 0xff0ffff0 765 .long 0xff0ffff0
692 __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, cache_fns = b15_cache_fns 766 __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions, cache_fns = b15_cache_fns
693 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info 767 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
694 768
695 /* 769 /*
@@ -699,9 +773,25 @@ __v7_b15mp_proc_info:
699__v7_ca17mp_proc_info: 773__v7_ca17mp_proc_info:
700 .long 0x410fc0e0 774 .long 0x410fc0e0
701 .long 0xff0ffff0 775 .long 0xff0ffff0
702 __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup 776 __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
703 .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info 777 .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
704 778
779 /* ARM Ltd. Cortex A73 processor */
780 .type __v7_ca73_proc_info, #object
781__v7_ca73_proc_info:
782 .long 0x410fd090
783 .long 0xff0ffff0
784 __v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
785 .size __v7_ca73_proc_info, . - __v7_ca73_proc_info
786
787 /* ARM Ltd. Cortex A75 processor */
788 .type __v7_ca75_proc_info, #object
789__v7_ca75_proc_info:
790 .long 0x410fd0a0
791 .long 0xff0ffff0
792 __v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
793 .size __v7_ca75_proc_info, . - __v7_ca75_proc_info
794
705 /* 795 /*
706 * Qualcomm Inc. Krait processors. 796 * Qualcomm Inc. Krait processors.
707 */ 797 */
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index bcdecc25461b..b2aa9b32bff2 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
165{ 165{
166 unsigned long flags; 166 unsigned long flags;
167 struct kprobe *p = &op->kp; 167 struct kprobe *p = &op->kp;
168 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 168 struct kprobe_ctlblk *kcb;
169 169
170 /* Save skipped registers */ 170 /* Save skipped registers */
171 regs->ARM_pc = (unsigned long)op->kp.addr; 171 regs->ARM_pc = (unsigned long)op->kp.addr;
172 regs->ARM_ORIG_r0 = ~0UL; 172 regs->ARM_ORIG_r0 = ~0UL;
173 173
174 local_irq_save(flags); 174 local_irq_save(flags);
175 kcb = get_kprobe_ctlblk();
175 176
176 if (kprobe_running()) { 177 if (kprobe_running()) {
177 kprobes_inc_nmissed_count(&op->kp); 178 kprobes_inc_nmissed_count(&op->kp);
@@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
191 192
192 local_irq_restore(flags); 193 local_irq_restore(flags);
193} 194}
195NOKPROBE_SYMBOL(optimized_callback)
194 196
195int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig) 197int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
196{ 198{
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 4c375e11ae95..af4ee2cef2f9 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -257,7 +257,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
257 257
258 if (exceptions == VFP_EXCEPTION_ERROR) { 258 if (exceptions == VFP_EXCEPTION_ERROR) {
259 vfp_panic("unhandled bounce", inst); 259 vfp_panic("unhandled bounce", inst);
260 vfp_raise_sigfpe(FPE_FIXME, regs); 260 vfp_raise_sigfpe(FPE_FLTINV, regs);
261 return; 261 return;
262 } 262 }
263 263