aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/barrier.h32
-rw-r--r--arch/arm/include/asm/bugs.h6
-rw-r--r--arch/arm/include/asm/cp15.h3
-rw-r--r--arch/arm/include/asm/cputype.h8
-rw-r--r--arch/arm/include/asm/kgdb.h2
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_host.h14
-rw-r--r--arch/arm/include/asm/kvm_mmu.h23
-rw-r--r--arch/arm/include/asm/mpu.h112
-rw-r--r--arch/arm/include/asm/proc-fns.h4
-rw-r--r--arch/arm/include/asm/system_misc.h15
-rw-r--r--arch/arm/include/asm/uaccess.h2
-rw-r--r--arch/arm/include/asm/v7m.h14
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/asm-offsets.c8
-rw-r--r--arch/arm/kernel/bugs.c18
-rw-r--r--arch/arm/kernel/entry-common.S18
-rw-r--r--arch/arm/kernel/entry-header.S25
-rw-r--r--arch/arm/kernel/head-nommu.S289
-rw-r--r--arch/arm/kernel/smp.c7
-rw-r--r--arch/arm/kernel/suspend.c2
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S4
-rw-r--r--arch/arm/kernel/vmlinux.lds.S7
-rw-r--r--arch/arm/kernel/vmlinux.lds.h16
-rw-r--r--arch/arm/kvm/hyp/hyp-entry.S112
-rw-r--r--arch/arm/mm/Kconfig23
-rw-r--r--arch/arm/mm/Makefile4
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/fault.c3
-rw-r--r--arch/arm/mm/nommu.c32
-rw-r--r--arch/arm/mm/pmsa-v7.c59
-rw-r--r--arch/arm/mm/pmsa-v8.c307
-rw-r--r--arch/arm/mm/proc-macros.S3
-rw-r--r--arch/arm/mm/proc-v7-2level.S6
-rw-r--r--arch/arm/mm/proc-v7-bugs.c174
-rw-r--r--arch/arm/mm/proc-v7.S154
-rw-r--r--drivers/amba/bus.c1
41 files changed, 1318 insertions, 210 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c43f5bb55ac8..8f460bdd4be1 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1698,6 +1698,7 @@ config ARCH_WANT_GENERAL_HUGETLB
1698config ARM_MODULE_PLTS 1698config ARM_MODULE_PLTS
1699 bool "Use PLTs to allow module memory to spill over into vmalloc area" 1699 bool "Use PLTs to allow module memory to spill over into vmalloc area"
1700 depends on MODULES 1700 depends on MODULES
1701 default y
1701 help 1702 help
1702 Allocate PLTs when loading modules so that jumps and calls whose 1703 Allocate PLTs when loading modules so that jumps and calls whose
1703 targets are too far away for their relative offsets to be encoded 1704 targets are too far away for their relative offsets to be encoded
@@ -1708,7 +1709,8 @@ config ARM_MODULE_PLTS
1708 rounding up to page size, the actual memory footprint is usually 1709 rounding up to page size, the actual memory footprint is usually
1709 the same. 1710 the same.
1710 1711
1711 Say y if you are getting out of memory errors while loading modules 1712 Disabling this is usually safe for small single-platform
1713 configurations. If unsure, say y.
1712 1714
1713source "mm/Kconfig" 1715source "mm/Kconfig"
1714 1716
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index f32a5468d79f..1dc4045e1af6 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -106,7 +106,7 @@ tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
106tune-y := $(tune-y) 106tune-y := $(tune-y)
107 107
108ifeq ($(CONFIG_AEABI),y) 108ifeq ($(CONFIG_AEABI),y)
109CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork -mfpu=vfp 109CFLAGS_ABI :=-mabi=aapcs-linux -mfpu=vfp
110else 110else
111CFLAGS_ABI :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,) 111CFLAGS_ABI :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,)
112endif 112endif
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 6a4e7341ecd3..a3c5fbcad4ab 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -113,7 +113,7 @@ CFLAGS_fdt_ro.o := $(nossp_flags)
113CFLAGS_fdt_rw.o := $(nossp_flags) 113CFLAGS_fdt_rw.o := $(nossp_flags)
114CFLAGS_fdt_wip.o := $(nossp_flags) 114CFLAGS_fdt_wip.o := $(nossp_flags)
115 115
116ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) 116ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
117asflags-y := -DZIMAGE 117asflags-y := -DZIMAGE
118 118
119# Supply kernel BSS size to the decompressor via a linker symbol. 119# Supply kernel BSS size to the decompressor via a linker symbol.
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 9342904cccca..0cd4dccbae78 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -447,6 +447,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
447 .size \name , . - \name 447 .size \name , . - \name
448 .endm 448 .endm
449 449
450 .macro csdb
451#ifdef CONFIG_THUMB2_KERNEL
452 .inst.w 0xf3af8014
453#else
454 .inst 0xe320f014
455#endif
456 .endm
457
450 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req 458 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
451#ifndef CONFIG_CPU_USE_DOMAINS 459#ifndef CONFIG_CPU_USE_DOMAINS
452 adds \tmp, \addr, #\size - 1 460 adds \tmp, \addr, #\size - 1
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 40f5c410fd8c..69772e742a0a 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -17,6 +17,12 @@
17#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") 17#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
18#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") 18#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
19#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") 19#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
20#ifdef CONFIG_THUMB2_KERNEL
21#define CSDB ".inst.w 0xf3af8014"
22#else
23#define CSDB ".inst 0xe320f014"
24#endif
25#define csdb() __asm__ __volatile__(CSDB : : : "memory")
20#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 26#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
21#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 27#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
22 : : "r" (0) : "memory") 28 : : "r" (0) : "memory")
@@ -37,6 +43,13 @@
37#define dmb(x) __asm__ __volatile__ ("" : : : "memory") 43#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
38#endif 44#endif
39 45
46#ifndef CSDB
47#define CSDB
48#endif
49#ifndef csdb
50#define csdb()
51#endif
52
40#ifdef CONFIG_ARM_HEAVY_MB 53#ifdef CONFIG_ARM_HEAVY_MB
41extern void (*soc_mb)(void); 54extern void (*soc_mb)(void);
42extern void arm_heavy_mb(void); 55extern void arm_heavy_mb(void);
@@ -63,6 +76,25 @@ extern void arm_heavy_mb(void);
63#define __smp_rmb() __smp_mb() 76#define __smp_rmb() __smp_mb()
64#define __smp_wmb() dmb(ishst) 77#define __smp_wmb() dmb(ishst)
65 78
79#ifdef CONFIG_CPU_SPECTRE
80static inline unsigned long array_index_mask_nospec(unsigned long idx,
81 unsigned long sz)
82{
83 unsigned long mask;
84
85 asm volatile(
86 "cmp %1, %2\n"
87 " sbc %0, %1, %1\n"
88 CSDB
89 : "=r" (mask)
90 : "r" (idx), "Ir" (sz)
91 : "cc");
92
93 return mask;
94}
95#define array_index_mask_nospec array_index_mask_nospec
96#endif
97
66#include <asm-generic/barrier.h> 98#include <asm-generic/barrier.h>
67 99
68#endif /* !__ASSEMBLY__ */ 100#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h
index a97f1ea708d1..73a99c72a930 100644
--- a/arch/arm/include/asm/bugs.h
+++ b/arch/arm/include/asm/bugs.h
@@ -10,12 +10,14 @@
10#ifndef __ASM_BUGS_H 10#ifndef __ASM_BUGS_H
11#define __ASM_BUGS_H 11#define __ASM_BUGS_H
12 12
13#ifdef CONFIG_MMU
14extern void check_writebuffer_bugs(void); 13extern void check_writebuffer_bugs(void);
15 14
16#define check_bugs() check_writebuffer_bugs() 15#ifdef CONFIG_MMU
16extern void check_bugs(void);
17extern void check_other_bugs(void);
17#else 18#else
18#define check_bugs() do { } while (0) 19#define check_bugs() do { } while (0)
20#define check_other_bugs() do { } while (0)
19#endif 21#endif
20 22
21#endif 23#endif
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 4c9fa72b59f5..07e27f212dc7 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -65,6 +65,9 @@
65#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) 65#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
66#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) 66#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
67 67
68#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
69#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
70
68extern unsigned long cr_alignment; /* defined in entry-armv.S */ 71extern unsigned long cr_alignment; /* defined in entry-armv.S */
69 72
70static inline unsigned long get_cr(void) 73static inline unsigned long get_cr(void)
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index cb546425da8a..26021980504d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -77,8 +77,16 @@
77#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0 77#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
78#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0 78#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
79#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0 79#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
80#define ARM_CPU_PART_CORTEX_A53 0x4100d030
81#define ARM_CPU_PART_CORTEX_A57 0x4100d070
82#define ARM_CPU_PART_CORTEX_A72 0x4100d080
83#define ARM_CPU_PART_CORTEX_A73 0x4100d090
84#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0
80#define ARM_CPU_PART_MASK 0xff00fff0 85#define ARM_CPU_PART_MASK 0xff00fff0
81 86
87/* Broadcom cores */
88#define ARM_CPU_PART_BRAHMA_B15 0x420000f0
89
82/* DEC implemented cores */ 90/* DEC implemented cores */
83#define ARM_CPU_PART_SA1100 0x4400a110 91#define ARM_CPU_PART_SA1100 0x4400a110
84 92
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 3b73fdcf3627..8de1100d1067 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -77,7 +77,7 @@ extern int kgdb_fault_expected;
77 77
78#define KGDB_MAX_NO_CPUS 1 78#define KGDB_MAX_NO_CPUS 1
79#define BUFMAX 400 79#define BUFMAX 400
80#define NUMREGBYTES (DBG_MAX_REG_NUM << 2) 80#define NUMREGBYTES (GDB_MAX_REGS << 2)
81#define NUMCRITREGBYTES (32 << 2) 81#define NUMCRITREGBYTES (32 << 2)
82 82
83#define _R0 0 83#define _R0 0
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 5a953ecb0d78..231e87ad45d5 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -61,8 +61,6 @@ struct kvm_vcpu;
61extern char __kvm_hyp_init[]; 61extern char __kvm_hyp_init[];
62extern char __kvm_hyp_init_end[]; 62extern char __kvm_hyp_init_end[];
63 63
64extern char __kvm_hyp_vector[];
65
66extern void __kvm_flush_vm_context(void); 64extern void __kvm_flush_vm_context(void);
67extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 65extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
68extern void __kvm_tlb_flush_vmid(struct kvm *kvm); 66extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index c7c28c885a19..343fc9e6f78d 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -21,6 +21,7 @@
21 21
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/kvm_types.h> 23#include <linux/kvm_types.h>
24#include <asm/cputype.h>
24#include <asm/kvm.h> 25#include <asm/kvm.h>
25#include <asm/kvm_asm.h> 26#include <asm/kvm_asm.h>
26#include <asm/kvm_mmio.h> 27#include <asm/kvm_mmio.h>
@@ -311,8 +312,17 @@ static inline void kvm_arm_vhe_guest_exit(void) {}
311 312
312static inline bool kvm_arm_harden_branch_predictor(void) 313static inline bool kvm_arm_harden_branch_predictor(void)
313{ 314{
314 /* No way to detect it yet, pretend it is not there. */ 315 switch(read_cpuid_part()) {
315 return false; 316#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
317 case ARM_CPU_PART_BRAHMA_B15:
318 case ARM_CPU_PART_CORTEX_A12:
319 case ARM_CPU_PART_CORTEX_A15:
320 case ARM_CPU_PART_CORTEX_A17:
321 return true;
322#endif
323 default:
324 return false;
325 }
316} 326}
317 327
318static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {} 328static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index f675162663f0..c94d291fd1a8 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -327,7 +327,28 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
327 327
328static inline void *kvm_get_hyp_vector(void) 328static inline void *kvm_get_hyp_vector(void)
329{ 329{
330 return kvm_ksym_ref(__kvm_hyp_vector); 330 switch(read_cpuid_part()) {
331#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
332 case ARM_CPU_PART_CORTEX_A12:
333 case ARM_CPU_PART_CORTEX_A17:
334 {
335 extern char __kvm_hyp_vector_bp_inv[];
336 return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
337 }
338
339 case ARM_CPU_PART_BRAHMA_B15:
340 case ARM_CPU_PART_CORTEX_A15:
341 {
342 extern char __kvm_hyp_vector_ic_inv[];
343 return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
344 }
345#endif
346 default:
347 {
348 extern char __kvm_hyp_vector[];
349 return kvm_ksym_ref(__kvm_hyp_vector);
350 }
351 }
331} 352}
332 353
333static inline int kvm_map_vectors(void) 354static inline int kvm_map_vectors(void)
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
index 6d1491c8ee22..5e088c83d3d8 100644
--- a/arch/arm/include/asm/mpu.h
+++ b/arch/arm/include/asm/mpu.h
@@ -12,60 +12,101 @@
12/* ID_MMFR0 data relevant to MPU */ 12/* ID_MMFR0 data relevant to MPU */
13#define MMFR0_PMSA (0xF << 4) 13#define MMFR0_PMSA (0xF << 4)
14#define MMFR0_PMSAv7 (3 << 4) 14#define MMFR0_PMSAv7 (3 << 4)
15#define MMFR0_PMSAv8 (4 << 4)
15 16
16/* MPU D/I Size Register fields */ 17/* MPU D/I Size Register fields */
17#define MPU_RSR_SZ 1 18#define PMSAv7_RSR_SZ 1
18#define MPU_RSR_EN 0 19#define PMSAv7_RSR_EN 0
19#define MPU_RSR_SD 8 20#define PMSAv7_RSR_SD 8
20 21
21/* Number of subregions (SD) */ 22/* Number of subregions (SD) */
22#define MPU_NR_SUBREGS 8 23#define PMSAv7_NR_SUBREGS 8
23#define MPU_MIN_SUBREG_SIZE 256 24#define PMSAv7_MIN_SUBREG_SIZE 256
24 25
25/* The D/I RSR value for an enabled region spanning the whole of memory */ 26/* The D/I RSR value for an enabled region spanning the whole of memory */
26#define MPU_RSR_ALL_MEM 63 27#define PMSAv7_RSR_ALL_MEM 63
27 28
28/* Individual bits in the DR/IR ACR */ 29/* Individual bits in the DR/IR ACR */
29#define MPU_ACR_XN (1 << 12) 30#define PMSAv7_ACR_XN (1 << 12)
30#define MPU_ACR_SHARED (1 << 2) 31#define PMSAv7_ACR_SHARED (1 << 2)
31 32
32/* C, B and TEX[2:0] bits only have semantic meanings when grouped */ 33/* C, B and TEX[2:0] bits only have semantic meanings when grouped */
33#define MPU_RGN_CACHEABLE 0xB 34#define PMSAv7_RGN_CACHEABLE 0xB
34#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) 35#define PMSAv7_RGN_SHARED_CACHEABLE (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
35#define MPU_RGN_STRONGLY_ORDERED 0 36#define PMSAv7_RGN_STRONGLY_ORDERED 0
36 37
37/* Main region should only be shared for SMP */ 38/* Main region should only be shared for SMP */
38#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
39#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) 40#define PMSAv7_RGN_NORMAL (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED)
40#else 41#else
41#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE 42#define PMSAv7_RGN_NORMAL PMSAv7_RGN_CACHEABLE
42#endif 43#endif
43 44
44/* Access permission bits of ACR (only define those that we use)*/ 45/* Access permission bits of ACR (only define those that we use)*/
45#define MPU_AP_PL1RO_PL0NA (0x5 << 8) 46#define PMSAv7_AP_PL1RO_PL0NA (0x5 << 8)
46#define MPU_AP_PL1RW_PL0RW (0x3 << 8) 47#define PMSAv7_AP_PL1RW_PL0RW (0x3 << 8)
47#define MPU_AP_PL1RW_PL0R0 (0x2 << 8) 48#define PMSAv7_AP_PL1RW_PL0R0 (0x2 << 8)
48#define MPU_AP_PL1RW_PL0NA (0x1 << 8) 49#define PMSAv7_AP_PL1RW_PL0NA (0x1 << 8)
50
51#define PMSAv8_BAR_XN 1
52
53#define PMSAv8_LAR_EN 1
54#define PMSAv8_LAR_IDX(n) (((n) & 0x7) << 1)
55
56
57#define PMSAv8_AP_PL1RW_PL0NA (0 << 1)
58#define PMSAv8_AP_PL1RW_PL0RW (1 << 1)
59#define PMSAv8_AP_PL1RO_PL0RO (3 << 1)
60
61#ifdef CONFIG_SMP
62#define PMSAv8_RGN_SHARED (3 << 3) // inner sharable
63#else
64#define PMSAv8_RGN_SHARED (0 << 3)
65#endif
66
67#define PMSAv8_RGN_DEVICE_nGnRnE 0
68#define PMSAv8_RGN_NORMAL 1
69
70#define PMSAv8_MAIR(attr, mt) ((attr) << ((mt) * 8))
71
72#ifdef CONFIG_CPU_V7M
73#define PMSAv8_MINALIGN 32
74#else
75#define PMSAv8_MINALIGN 64
76#endif
49 77
50/* For minimal static MPU region configurations */ 78/* For minimal static MPU region configurations */
51#define MPU_PROBE_REGION 0 79#define PMSAv7_PROBE_REGION 0
52#define MPU_BG_REGION 1 80#define PMSAv7_BG_REGION 1
53#define MPU_RAM_REGION 2 81#define PMSAv7_RAM_REGION 2
54#define MPU_ROM_REGION 3 82#define PMSAv7_ROM_REGION 3
83
84/* Fixed for PMSAv8 only */
85#define PMSAv8_XIP_REGION 0
86#define PMSAv8_KERNEL_REGION 1
55 87
56/* Maximum number of regions Linux is interested in */ 88/* Maximum number of regions Linux is interested in */
57#define MPU_MAX_REGIONS 16 89#define MPU_MAX_REGIONS 16
58 90
59#define MPU_DATA_SIDE 0 91#define PMSAv7_DATA_SIDE 0
60#define MPU_INSTR_SIDE 1 92#define PMSAv7_INSTR_SIDE 1
61 93
62#ifndef __ASSEMBLY__ 94#ifndef __ASSEMBLY__
63 95
64struct mpu_rgn { 96struct mpu_rgn {
65 /* Assume same attributes for d/i-side */ 97 /* Assume same attributes for d/i-side */
66 u32 drbar; 98 union {
67 u32 drsr; 99 u32 drbar; /* PMSAv7 */
68 u32 dracr; 100 u32 prbar; /* PMSAv8 */
101 };
102 union {
103 u32 drsr; /* PMSAv7 */
104 u32 prlar; /* PMSAv8 */
105 };
106 union {
107 u32 dracr; /* PMSAv7 */
108 u32 unused; /* not used in PMSAv8 */
109 };
69}; 110};
70 111
71struct mpu_rgn_info { 112struct mpu_rgn_info {
@@ -75,16 +116,17 @@ struct mpu_rgn_info {
75extern struct mpu_rgn_info mpu_rgn_info; 116extern struct mpu_rgn_info mpu_rgn_info;
76 117
77#ifdef CONFIG_ARM_MPU 118#ifdef CONFIG_ARM_MPU
119extern void __init pmsav7_adjust_lowmem_bounds(void);
120extern void __init pmsav8_adjust_lowmem_bounds(void);
78 121
79extern void __init adjust_lowmem_bounds_mpu(void); 122extern void __init pmsav7_setup(void);
80extern void __init mpu_setup(void); 123extern void __init pmsav8_setup(void);
81
82#else 124#else
83 125static inline void pmsav7_adjust_lowmem_bounds(void) {};
84static inline void adjust_lowmem_bounds_mpu(void) {} 126static inline void pmsav8_adjust_lowmem_bounds(void) {};
85static inline void mpu_setup(void) {} 127static inline void pmsav7_setup(void) {};
86 128static inline void pmsav8_setup(void) {};
87#endif /* !CONFIG_ARM_MPU */ 129#endif
88 130
89#endif /* __ASSEMBLY__ */ 131#endif /* __ASSEMBLY__ */
90 132
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index f2e1af45bd6f..e25f4392e1b2 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -37,6 +37,10 @@ extern struct processor {
37 */ 37 */
38 void (*_proc_init)(void); 38 void (*_proc_init)(void);
39 /* 39 /*
40 * Check for processor bugs
41 */
42 void (*check_bugs)(void);
43 /*
40 * Disable any processor specifics 44 * Disable any processor specifics
41 */ 45 */
42 void (*_proc_fin)(void); 46 void (*_proc_fin)(void);
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 78f6db114faf..8e76db83c498 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -8,6 +8,7 @@
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <linux/irqflags.h> 9#include <linux/irqflags.h>
10#include <linux/reboot.h> 10#include <linux/reboot.h>
11#include <linux/percpu.h>
11 12
12extern void cpu_init(void); 13extern void cpu_init(void);
13 14
@@ -15,6 +16,20 @@ void soft_restart(unsigned long);
15extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 16extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
16extern void (*arm_pm_idle)(void); 17extern void (*arm_pm_idle)(void);
17 18
19#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
20typedef void (*harden_branch_predictor_fn_t)(void);
21DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
22static inline void harden_branch_predictor(void)
23{
24 harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn,
25 smp_processor_id());
26 if (fn)
27 fn();
28}
29#else
30#define harden_branch_predictor() do { } while (0)
31#endif
32
18#define UDBG_UNDEFINED (1 << 0) 33#define UDBG_UNDEFINED (1 << 0)
19#define UDBG_SYSCALL (1 << 1) 34#define UDBG_SYSCALL (1 << 1)
20#define UDBG_BADABORT (1 << 2) 35#define UDBG_BADABORT (1 << 2)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 0bf2347495f1..3d614e90c19f 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -152,7 +152,7 @@ extern int __get_user_64t_4(void *);
152#define __get_user_check(x, p) \ 152#define __get_user_check(x, p) \
153 ({ \ 153 ({ \
154 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 154 unsigned long __limit = current_thread_info()->addr_limit - 1; \
155 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 155 register typeof(*(p)) __user *__p asm("r0") = (p); \
156 register typeof(x) __r2 asm("r2"); \ 156 register typeof(x) __r2 asm("r2"); \
157 register unsigned long __l asm("r1") = __limit; \ 157 register unsigned long __l asm("r1") = __limit; \
158 register int __e asm("r0"); \ 158 register int __e asm("r0"); \
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index 634e77107425..187ccf6496ad 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -64,9 +64,17 @@
64#define MPU_CTRL_ENABLE 1 64#define MPU_CTRL_ENABLE 1
65#define MPU_CTRL_PRIVDEFENA (1 << 2) 65#define MPU_CTRL_PRIVDEFENA (1 << 2)
66 66
67#define MPU_RNR 0x98 67#define PMSAv7_RNR 0x98
68#define MPU_RBAR 0x9c 68#define PMSAv7_RBAR 0x9c
69#define MPU_RASR 0xa0 69#define PMSAv7_RASR 0xa0
70
71#define PMSAv8_RNR 0x98
72#define PMSAv8_RBAR 0x9c
73#define PMSAv8_RLAR 0xa0
74#define PMSAv8_RBAR_A(n) (PMSAv8_RBAR + 8*(n))
75#define PMSAv8_RLAR_A(n) (PMSAv8_RLAR + 8*(n))
76#define PMSAv8_MAIR0 0xc0
77#define PMSAv8_MAIR1 0xc4
70 78
71/* Cache opeartions */ 79/* Cache opeartions */
72#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */ 80#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index b59ac4bf82b8..8cad59465af3 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -31,6 +31,7 @@ else
31obj-y += entry-armv.o 31obj-y += entry-armv.o
32endif 32endif
33 33
34obj-$(CONFIG_MMU) += bugs.o
34obj-$(CONFIG_CPU_IDLE) += cpuidle.o 35obj-$(CONFIG_CPU_IDLE) += cpuidle.o
35obj-$(CONFIG_ISA_DMA_API) += dma.o 36obj-$(CONFIG_ISA_DMA_API) += dma.o
36obj-$(CONFIG_FIQ) += fiq.o fiqasm.o 37obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index f369ece99958..27c5381518d8 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -194,9 +194,11 @@ int main(void)
194 DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used)); 194 DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used));
195 195
196 DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn)); 196 DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn));
197 DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar)); 197 DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar));
198 DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr)); 198 DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr));
199 DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr)); 199 DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr));
200 DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
201 DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
200#endif 202#endif
201 return 0; 203 return 0;
202} 204}
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
new file mode 100644
index 000000000000..7be511310191
--- /dev/null
+++ b/arch/arm/kernel/bugs.c
@@ -0,0 +1,18 @@
1// SPDX-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <asm/bugs.h>
4#include <asm/proc-fns.h>
5
6void check_other_bugs(void)
7{
8#ifdef MULTI_CPU
9 if (processor.check_bugs)
10 processor.check_bugs();
11#endif
12}
13
14void __init check_bugs(void)
15{
16 check_writebuffer_bugs();
17 check_other_bugs();
18}
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 3c4f88701f22..20df608bf343 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -242,9 +242,7 @@ local_restart:
242 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 242 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
243 bne __sys_trace 243 bne __sys_trace
244 244
245 cmp scno, #NR_syscalls @ check upper syscall limit 245 invoke_syscall tbl, scno, r10, ret_fast_syscall
246 badr lr, ret_fast_syscall @ return address
247 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
248 246
249 add r1, sp, #S_OFF 247 add r1, sp, #S_OFF
2502: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 2482: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
@@ -278,14 +276,8 @@ __sys_trace:
278 mov r1, scno 276 mov r1, scno
279 add r0, sp, #S_OFF 277 add r0, sp, #S_OFF
280 bl syscall_trace_enter 278 bl syscall_trace_enter
281 279 mov scno, r0
282 badr lr, __sys_trace_return @ return address 280 invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1
283 mov scno, r0 @ syscall number (possibly new)
284 add r1, sp, #S_R0 + S_OFF @ pointer to regs
285 cmp scno, #NR_syscalls @ check upper syscall limit
286 ldmccia r1, {r0 - r6} @ have to reload r0 - r6
287 stmccia sp, {r4, r5} @ and update the stack args
288 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
289 cmp scno, #-1 @ skip the syscall? 281 cmp scno, #-1 @ skip the syscall?
290 bne 2b 282 bne 2b
291 add sp, sp, #S_OFF @ restore stack 283 add sp, sp, #S_OFF @ restore stack
@@ -363,6 +355,10 @@ sys_syscall:
363 bic scno, r0, #__NR_OABI_SYSCALL_BASE 355 bic scno, r0, #__NR_OABI_SYSCALL_BASE
364 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE 356 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
365 cmpne scno, #NR_syscalls @ check range 357 cmpne scno, #NR_syscalls @ check range
358#ifdef CONFIG_CPU_SPECTRE
359 movhs scno, #0
360 csdb
361#endif
366 stmloia sp, {r5, r6} @ shuffle args 362 stmloia sp, {r5, r6} @ shuffle args
367 movlo r0, r1 363 movlo r0, r1
368 movlo r1, r2 364 movlo r1, r2
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 0f07579af472..773424843d6e 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -378,6 +378,31 @@
378#endif 378#endif
379 .endm 379 .endm
380 380
381 .macro invoke_syscall, table, nr, tmp, ret, reload=0
382#ifdef CONFIG_CPU_SPECTRE
383 mov \tmp, \nr
384 cmp \tmp, #NR_syscalls @ check upper syscall limit
385 movcs \tmp, #0
386 csdb
387 badr lr, \ret @ return address
388 .if \reload
389 add r1, sp, #S_R0 + S_OFF @ pointer to regs
390 ldmccia r1, {r0 - r6} @ reload r0-r6
391 stmccia sp, {r4, r5} @ update stack arguments
392 .endif
393 ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
394#else
395 cmp \nr, #NR_syscalls @ check upper syscall limit
396 badr lr, \ret @ return address
397 .if \reload
398 add r1, sp, #S_R0 + S_OFF @ pointer to regs
399 ldmccia r1, {r0 - r6} @ reload r0-r6
400 stmccia sp, {r4, r5} @ update stack arguments
401 .endif
402 ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
403#endif
404 .endm
405
381/* 406/*
382 * These are the registers used in the syscall handler, and allow us to 407 * These are the registers used in the syscall handler, and allow us to
383 * have in theory up to 7 arguments to a function - r0 to r6. 408 * have in theory up to 7 arguments to a function - r0 to r6.
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2e38f85b757a..dd546d65a383 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -68,14 +68,6 @@ ENTRY(stext)
68 beq __error_p @ yes, error 'p' 68 beq __error_p @ yes, error 'p'
69 69
70#ifdef CONFIG_ARM_MPU 70#ifdef CONFIG_ARM_MPU
71 /* Calculate the size of a region covering just the kernel */
72 ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
73 ldr r6, =(_end) @ Cover whole kernel
74 sub r6, r6, r5 @ Minimum size of region to map
75 clz r6, r6 @ Region size must be 2^N...
76 rsb r6, r6, #31 @ ...so round up region size
77 lsl r6, r6, #MPU_RSR_SZ @ Put size in right field
78 orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit
79 bl __setup_mpu 71 bl __setup_mpu
80#endif 72#endif
81 73
@@ -83,8 +75,8 @@ ENTRY(stext)
83 ldr r12, [r10, #PROCINFO_INITFUNC] 75 ldr r12, [r10, #PROCINFO_INITFUNC]
84 add r12, r12, r10 76 add r12, r12, r10
85 ret r12 77 ret r12
861: bl __after_proc_init 781: ldr lr, =__mmap_switched
87 b __mmap_switched 79 b __after_proc_init
88ENDPROC(stext) 80ENDPROC(stext)
89 81
90#ifdef CONFIG_SMP 82#ifdef CONFIG_SMP
@@ -110,8 +102,6 @@ ENTRY(secondary_startup)
110 ldr r7, __secondary_data 102 ldr r7, __secondary_data
111 103
112#ifdef CONFIG_ARM_MPU 104#ifdef CONFIG_ARM_MPU
113 /* Use MPU region info supplied by __cpu_up */
114 ldr r6, [r7] @ get secondary_data.mpu_rgn_info
115 bl __secondary_setup_mpu @ Initialize the MPU 105 bl __secondary_setup_mpu @ Initialize the MPU
116#endif 106#endif
117 107
@@ -133,12 +123,45 @@ __secondary_data:
133/* 123/*
134 * Set the Control Register and Read the process ID. 124 * Set the Control Register and Read the process ID.
135 */ 125 */
126 .text
136__after_proc_init: 127__after_proc_init:
128#ifdef CONFIG_ARM_MPU
129M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
130M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
131M_CLASS(ldr r3, [r12, 0x50])
132AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
133 and r3, r3, #(MMFR0_PMSA) @ PMSA field
134 teq r3, #(MMFR0_PMSAv7) @ PMSA v7
135 beq 1f
136 teq r3, #(MMFR0_PMSAv8) @ PMSA v8
137 /*
138 * Memory region attributes for PMSAv8:
139 *
140 * n = AttrIndx[2:0]
141 * n MAIR
142 * DEVICE_nGnRnE 000 00000000
143 * NORMAL 001 11111111
144 */
145 ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
146 PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
147AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0
148M_CLASS(streq r3, [r12, #PMSAv8_MAIR0])
149 moveq r3, #0
150AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1
151M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
152
1531:
154#endif
137#ifdef CONFIG_CPU_CP15 155#ifdef CONFIG_CPU_CP15
138 /* 156 /*
139 * CP15 system control register value returned in r0 from 157 * CP15 system control register value returned in r0 from
140 * the CPU init function. 158 * the CPU init function.
141 */ 159 */
160
161#ifdef CONFIG_ARM_MPU
162 biceq r0, r0, #CR_BR @ Disable the 'default mem-map'
163 orreq r0, r0, #CR_M @ Set SCTRL.M (MPU on)
164#endif
142#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 165#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
143 orr r0, r0, #CR_A 166 orr r0, r0, #CR_A
144#else 167#else
@@ -154,7 +177,15 @@ __after_proc_init:
154 bic r0, r0, #CR_I 177 bic r0, r0, #CR_I
155#endif 178#endif
156 mcr p15, 0, r0, c1, c0, 0 @ write control reg 179 mcr p15, 0, r0, c1, c0, 0 @ write control reg
180 isb
157#elif defined (CONFIG_CPU_V7M) 181#elif defined (CONFIG_CPU_V7M)
182#ifdef CONFIG_ARM_MPU
183 ldreq r3, [r12, MPU_CTRL]
184 biceq r3, #MPU_CTRL_PRIVDEFENA
185 orreq r3, #MPU_CTRL_ENABLE
186 streq r3, [r12, MPU_CTRL]
187 isb
188#endif
158 /* For V7M systems we want to modify the CCR similarly to the SCTLR */ 189 /* For V7M systems we want to modify the CCR similarly to the SCTLR */
159#ifdef CONFIG_CPU_DCACHE_DISABLE 190#ifdef CONFIG_CPU_DCACHE_DISABLE
160 bic r0, r0, #V7M_SCB_CCR_DC 191 bic r0, r0, #V7M_SCB_CCR_DC
@@ -165,9 +196,7 @@ __after_proc_init:
165#ifdef CONFIG_CPU_ICACHE_DISABLE 196#ifdef CONFIG_CPU_ICACHE_DISABLE
166 bic r0, r0, #V7M_SCB_CCR_IC 197 bic r0, r0, #V7M_SCB_CCR_IC
167#endif 198#endif
168 movw r3, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR) 199 str r0, [r12, V7M_SCB_CCR]
169 movt r3, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
170 str r0, [r3]
171#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */ 200#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
172 ret lr 201 ret lr
173ENDPROC(__after_proc_init) 202ENDPROC(__after_proc_init)
@@ -184,7 +213,7 @@ ENDPROC(__after_proc_init)
184.endm 213.endm
185 214
186/* Setup a single MPU region, either D or I side (D-side for unified) */ 215/* Setup a single MPU region, either D or I side (D-side for unified) */
187.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE, unused 216.macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
188 mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR 217 mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
189 mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR 218 mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
190 mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR 219 mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
@@ -192,14 +221,14 @@ ENDPROC(__after_proc_init)
192#else 221#else
193.macro set_region_nr tmp, rgnr, base 222.macro set_region_nr tmp, rgnr, base
194 mov \tmp, \rgnr 223 mov \tmp, \rgnr
195 str \tmp, [\base, #MPU_RNR] 224 str \tmp, [\base, #PMSAv7_RNR]
196.endm 225.endm
197 226
198.macro setup_region bar, acr, sr, unused, base 227.macro setup_region bar, acr, sr, unused, base
199 lsl \acr, \acr, #16 228 lsl \acr, \acr, #16
200 orr \acr, \acr, \sr 229 orr \acr, \acr, \sr
201 str \bar, [\base, #MPU_RBAR] 230 str \bar, [\base, #PMSAv7_RBAR]
202 str \acr, [\base, #MPU_RASR] 231 str \acr, [\base, #PMSAv7_RASR]
203.endm 232.endm
204 233
205#endif 234#endif
@@ -210,8 +239,9 @@ ENDPROC(__after_proc_init)
210 * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6 239 * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
211 * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page 240 * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
212 * 241 *
213 * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION 242 * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
214*/ 243*/
244 __HEAD
215 245
216ENTRY(__setup_mpu) 246ENTRY(__setup_mpu)
217 247
@@ -223,7 +253,22 @@ AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0
223M_CLASS(ldr r0, [r12, 0x50]) 253M_CLASS(ldr r0, [r12, 0x50])
224 and r0, r0, #(MMFR0_PMSA) @ PMSA field 254 and r0, r0, #(MMFR0_PMSA) @ PMSA field
225 teq r0, #(MMFR0_PMSAv7) @ PMSA v7 255 teq r0, #(MMFR0_PMSAv7) @ PMSA v7
226 bxne lr 256 beq __setup_pmsa_v7
257 teq r0, #(MMFR0_PMSAv8) @ PMSA v8
258 beq __setup_pmsa_v8
259
260 ret lr
261ENDPROC(__setup_mpu)
262
263ENTRY(__setup_pmsa_v7)
264 /* Calculate the size of a region covering just the kernel */
265 ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
266 ldr r6, =(_end) @ Cover whole kernel
267 sub r6, r6, r5 @ Minimum size of region to map
268 clz r6, r6 @ Region size must be 2^N...
269 rsb r6, r6, #31 @ ...so round up region size
270 lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
271 orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
227 272
228 /* Determine whether the D/I-side memory map is unified. We set the 273 /* Determine whether the D/I-side memory map is unified. We set the
229 * flags here and continue to use them for the rest of this function */ 274 * flags here and continue to use them for the rest of this function */
@@ -234,77 +279,189 @@ M_CLASS(ldr r0, [r12, #MPU_TYPE])
234 tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified 279 tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
235 280
236 /* Setup second region first to free up r6 */ 281 /* Setup second region first to free up r6 */
237 set_region_nr r0, #MPU_RAM_REGION, r12 282 set_region_nr r0, #PMSAv7_RAM_REGION, r12
238 isb 283 isb
239 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ 284 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
240 ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET 285 ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
241 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) 286 ldr r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
242 287
243 setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled 288 setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
244 beq 1f @ Memory-map not unified 289 beq 1f @ Memory-map not unified
245 setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled 290 setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
2461: isb 2911: isb
247 292
248 /* First/background region */ 293 /* First/background region */
249 set_region_nr r0, #MPU_BG_REGION, r12 294 set_region_nr r0, #PMSAv7_BG_REGION, r12
250 isb 295 isb
251 /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */ 296 /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
252 mov r0, #0 @ BG region starts at 0x0 297 mov r0, #0 @ BG region starts at 0x0
253 ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA) 298 ldr r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
254 mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled 299 mov r6, #PMSAv7_RSR_ALL_MEM @ 4GB region, enabled
255 300
256 setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ 0x0, BG region, enabled 301 setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ 0x0, BG region, enabled
257 beq 2f @ Memory-map not unified 302 beq 2f @ Memory-map not unified
258 setup_region r0, r5, r6, MPU_INSTR_SIDE r12 @ 0x0, BG region, enabled 303 setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12 @ 0x0, BG region, enabled
2592: isb 3042: isb
260 305
261#ifdef CONFIG_XIP_KERNEL 306#ifdef CONFIG_XIP_KERNEL
262 set_region_nr r0, #MPU_ROM_REGION, r12 307 set_region_nr r0, #PMSAv7_ROM_REGION, r12
263 isb 308 isb
264 309
265 ldr r5,=(MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL) 310 ldr r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
266 311
267 ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start 312 ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
268 ldr r6, =(_exiprom) @ ROM end 313 ldr r6, =(_exiprom) @ ROM end
269 sub r6, r6, r0 @ Minimum size of region to map 314 sub r6, r6, r0 @ Minimum size of region to map
270 clz r6, r6 @ Region size must be 2^N... 315 clz r6, r6 @ Region size must be 2^N...
271 rsb r6, r6, #31 @ ...so round up region size 316 rsb r6, r6, #31 @ ...so round up region size
272 lsl r6, r6, #MPU_RSR_SZ @ Put size in right field 317 lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
273 orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit 318 orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
274 319
275 setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled 320 setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
276 beq 3f @ Memory-map not unified 321 beq 3f @ Memory-map not unified
277 setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled 322 setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
2783: isb 3233: isb
279#endif 324#endif
325 ret lr
326ENDPROC(__setup_pmsa_v7)
327
328ENTRY(__setup_pmsa_v8)
329 mov r0, #0
330AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL
331M_CLASS(str r0, [r12, #PMSAv8_RNR])
332 isb
333
334#ifdef CONFIG_XIP_KERNEL
335 ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start
336 ldr r6, =(_exiprom) @ ROM end
337 sub r6, r6, #1
338 bic r6, r6, #(PMSAv8_MINALIGN - 1)
339
340 orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
341 orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
342
343AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0
344AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0
345M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)])
346M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)])
347#endif
348
349 ldr r5, =KERNEL_START
350 ldr r6, =KERNEL_END
351 sub r6, r6, #1
352 bic r6, r6, #(PMSAv8_MINALIGN - 1)
353
354 orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
355 orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
356
357AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1
358AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1
359M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)])
360M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)])
361
362 /* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
363#ifdef CONFIG_XIP_KERNEL
364 ldr r6, =KERNEL_START
365 ldr r5, =CONFIG_XIP_PHYS_ADDR
366 cmp r6, r5
367 movcs r6, r5
368#else
369 ldr r6, =KERNEL_START
370#endif
371 cmp r6, #0
372 beq 1f
373
374 mov r5, #0
375 sub r6, r6, #1
376 bic r6, r6, #(PMSAv8_MINALIGN - 1)
377
378 orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
379 orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
380
381AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2
382AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2
383M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)])
384M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)])
385
3861:
387 /* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
388#ifdef CONFIG_XIP_KERNEL
389 ldr r5, =KERNEL_END
390 ldr r6, =(_exiprom)
391 cmp r5, r6
392 movcc r5, r6
393#else
394 ldr r5, =KERNEL_END
395#endif
396 mov r6, #0xffffffff
397 bic r6, r6, #(PMSAv8_MINALIGN - 1)
398
399 orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
400 orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
280 401
281 /* Enable the MPU */ 402AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3
282AR_CLASS(mrc p15, 0, r0, c1, c0, 0) @ Read SCTLR 403AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3
283AR_CLASS(bic r0, r0, #CR_BR) @ Disable the 'default mem-map' 404M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)])
284AR_CLASS(orr r0, r0, #CR_M) @ Set SCTRL.M (MPU on) 405M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
285AR_CLASS(mcr p15, 0, r0, c1, c0, 0) @ Enable MPU
286 406
287M_CLASS(ldr r0, [r12, #MPU_CTRL]) 407#ifdef CONFIG_XIP_KERNEL
288M_CLASS(bic r0, #MPU_CTRL_PRIVDEFENA) 408 /* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
289M_CLASS(orr r0, #MPU_CTRL_ENABLE) 409 ldr r5, =(_exiprom)
290M_CLASS(str r0, [r12, #MPU_CTRL]) 410 ldr r6, =KERNEL_END
411 cmp r5, r6
412 movcs r5, r6
413
414 ldr r6, =KERNEL_START
415 ldr r0, =CONFIG_XIP_PHYS_ADDR
416 cmp r6, r0
417 movcc r6, r0
418
419 sub r6, r6, #1
420 bic r6, r6, #(PMSAv8_MINALIGN - 1)
421
422 orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
423 orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
424
425#ifdef CONFIG_CPU_V7M
426 /* There is no alias for n == 4 */
427 mov r0, #4
428 str r0, [r12, #PMSAv8_RNR] @ PRSEL
291 isb 429 isb
292 430
431 str r5, [r12, #PMSAv8_RBAR_A(0)]
432 str r6, [r12, #PMSAv8_RLAR_A(0)]
433#else
434 mcr p15, 0, r5, c6, c10, 1 @ PRBAR4
435 mcr p15, 0, r6, c6, c10, 2 @ PRLAR4
436#endif
437#endif
293 ret lr 438 ret lr
294ENDPROC(__setup_mpu) 439ENDPROC(__setup_pmsa_v8)
295 440
296#ifdef CONFIG_SMP 441#ifdef CONFIG_SMP
297/* 442/*
298 * r6: pointer at mpu_rgn_info 443 * r6: pointer at mpu_rgn_info
299 */ 444 */
300 445
446 .text
301ENTRY(__secondary_setup_mpu) 447ENTRY(__secondary_setup_mpu)
448 /* Use MPU region info supplied by __cpu_up */
449 ldr r6, [r7] @ get secondary_data.mpu_rgn_info
450
302 /* Probe for v7 PMSA compliance */ 451 /* Probe for v7 PMSA compliance */
303 mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 452 mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
304 and r0, r0, #(MMFR0_PMSA) @ PMSA field 453 and r0, r0, #(MMFR0_PMSA) @ PMSA field
305 teq r0, #(MMFR0_PMSAv7) @ PMSA v7 454 teq r0, #(MMFR0_PMSAv7) @ PMSA v7
306 bne __error_p 455 beq __secondary_setup_pmsa_v7
456 teq r0, #(MMFR0_PMSAv8) @ PMSA v8
457 beq __secondary_setup_pmsa_v8
458 b __error_p
459ENDPROC(__secondary_setup_mpu)
307 460
461/*
462 * r6: pointer at mpu_rgn_info
463 */
464ENTRY(__secondary_setup_pmsa_v7)
308 /* Determine whether the D/I-side memory map is unified. We set the 465 /* Determine whether the D/I-side memory map is unified. We set the
309 * flags here and continue to use them for the rest of this function */ 466 * flags here and continue to use them for the rest of this function */
310 mrc p15, 0, r0, c0, c0, 4 @ MPUIR 467 mrc p15, 0, r0, c0, c0, 4 @ MPUIR
@@ -328,25 +485,45 @@ ENTRY(__secondary_setup_mpu)
328 ldr r6, [r3, #MPU_RGN_DRSR] 485 ldr r6, [r3, #MPU_RGN_DRSR]
329 ldr r5, [r3, #MPU_RGN_DRACR] 486 ldr r5, [r3, #MPU_RGN_DRACR]
330 487
331 setup_region r0, r5, r6, MPU_DATA_SIDE 488 setup_region r0, r5, r6, PMSAv7_DATA_SIDE
332 beq 2f 489 beq 2f
333 setup_region r0, r5, r6, MPU_INSTR_SIDE 490 setup_region r0, r5, r6, PMSAv7_INSTR_SIDE
3342: isb 4912: isb
335 492
336 mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR 493 mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR
337 cmp r4, #0 494 cmp r4, #0
338 bgt 1b 495 bgt 1b
339 496
340 /* Enable the MPU */ 497 ret lr
341 mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR 498ENDPROC(__secondary_setup_pmsa_v7)
342 bic r0, r0, #CR_BR @ Disable the 'default mem-map' 499
343 orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) 500ENTRY(__secondary_setup_pmsa_v8)
344 mcr p15, 0, r0, c1, c0, 0 @ Enable MPU 501 ldr r4, [r6, #MPU_RNG_INFO_USED]
502#ifndef CONFIG_XIP_KERNEL
503 add r4, r4, #1
504#endif
505 mov r5, #MPU_RNG_SIZE
506 add r3, r6, #MPU_RNG_INFO_RNGS
507 mla r3, r4, r5, r3
508
5091:
510 sub r3, r3, #MPU_RNG_SIZE
511 sub r4, r4, #1
512
513 mcr p15, 0, r4, c6, c2, 1 @ PRSEL
345 isb 514 isb
346 515
347 ret lr 516 ldr r5, [r3, #MPU_RGN_PRBAR]
348ENDPROC(__secondary_setup_mpu) 517 ldr r6, [r3, #MPU_RGN_PRLAR]
349 518
519 mcr p15, 0, r5, c6, c3, 0 @ PRBAR
520 mcr p15, 0, r6, c6, c3, 1 @ PRLAR
521
522 cmp r4, #0
523 bgt 1b
524
525 ret lr
526ENDPROC(__secondary_setup_pmsa_v8)
350#endif /* CONFIG_SMP */ 527#endif /* CONFIG_SMP */
351#endif /* CONFIG_ARM_MPU */ 528#endif /* CONFIG_ARM_MPU */
352#include "head-common.S" 529#include "head-common.S"
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 2da087926ebe..0978282d5fc2 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -31,6 +31,7 @@
31#include <linux/irq_work.h> 31#include <linux/irq_work.h>
32 32
33#include <linux/atomic.h> 33#include <linux/atomic.h>
34#include <asm/bugs.h>
34#include <asm/smp.h> 35#include <asm/smp.h>
35#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
36#include <asm/cpu.h> 37#include <asm/cpu.h>
@@ -236,8 +237,6 @@ int __cpu_disable(void)
236 flush_cache_louis(); 237 flush_cache_louis();
237 local_flush_tlb_all(); 238 local_flush_tlb_all();
238 239
239 clear_tasks_mm_cpumask(cpu);
240
241 return 0; 240 return 0;
242} 241}
243 242
@@ -255,6 +254,7 @@ void __cpu_die(unsigned int cpu)
255 } 254 }
256 pr_debug("CPU%u: shutdown\n", cpu); 255 pr_debug("CPU%u: shutdown\n", cpu);
257 256
257 clear_tasks_mm_cpumask(cpu);
258 /* 258 /*
259 * platform_cpu_kill() is generally expected to do the powering off 259 * platform_cpu_kill() is generally expected to do the powering off
260 * and/or cutting of clocks to the dying CPU. Optionally, this may 260 * and/or cutting of clocks to the dying CPU. Optionally, this may
@@ -405,6 +405,9 @@ asmlinkage void secondary_start_kernel(void)
405 * before we continue - which happens after __cpu_up returns. 405 * before we continue - which happens after __cpu_up returns.
406 */ 406 */
407 set_cpu_online(cpu, true); 407 set_cpu_online(cpu, true);
408
409 check_other_bugs();
410
408 complete(&cpu_running); 411 complete(&cpu_running);
409 412
410 local_irq_enable(); 413 local_irq_enable();
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index a40ebb7c0896..d08099269e35 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -3,6 +3,7 @@
3#include <linux/slab.h> 3#include <linux/slab.h>
4#include <linux/mm_types.h> 4#include <linux/mm_types.h>
5 5
6#include <asm/bugs.h>
6#include <asm/cacheflush.h> 7#include <asm/cacheflush.h>
7#include <asm/idmap.h> 8#include <asm/idmap.h>
8#include <asm/pgalloc.h> 9#include <asm/pgalloc.h>
@@ -36,6 +37,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
36 cpu_switch_mm(mm->pgd, mm); 37 cpu_switch_mm(mm->pgd, mm);
37 local_flush_bp_all(); 38 local_flush_bp_all();
38 local_flush_tlb_all(); 39 local_flush_tlb_all();
40 check_other_bugs();
39 } 41 }
40 42
41 return ret; 43 return ret;
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index d32f5d35f602..3593d5c1acd2 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -13,6 +13,7 @@
13#include <asm/cache.h> 13#include <asm/cache.h>
14#include <asm/thread_info.h> 14#include <asm/thread_info.h>
15#include <asm/memory.h> 15#include <asm/memory.h>
16#include <asm/mpu.h>
16#include <asm/page.h> 17#include <asm/page.h>
17 18
18#include "vmlinux.lds.h" 19#include "vmlinux.lds.h"
@@ -148,6 +149,9 @@ SECTIONS
148 __init_end = .; 149 __init_end = .;
149 150
150 BSS_SECTION(0, 0, 8) 151 BSS_SECTION(0, 0, 8)
152#ifdef CONFIG_ARM_MPU
153 . = ALIGN(PMSAv8_MINALIGN);
154#endif
151 _end = .; 155 _end = .;
152 156
153 STABS_DEBUG 157 STABS_DEBUG
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b77dc675ae55..23150c0f0f4d 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -12,6 +12,7 @@
12#include <asm/cache.h> 12#include <asm/cache.h>
13#include <asm/thread_info.h> 13#include <asm/thread_info.h>
14#include <asm/memory.h> 14#include <asm/memory.h>
15#include <asm/mpu.h>
15#include <asm/page.h> 16#include <asm/page.h>
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
17 18
@@ -54,6 +55,9 @@ SECTIONS
54 . = ALIGN(1<<SECTION_SHIFT); 55 . = ALIGN(1<<SECTION_SHIFT);
55#endif 56#endif
56 57
58#ifdef CONFIG_ARM_MPU
59 . = ALIGN(PMSAv8_MINALIGN);
60#endif
57 .text : { /* Real text segment */ 61 .text : { /* Real text segment */
58 _stext = .; /* Text and read-only data */ 62 _stext = .; /* Text and read-only data */
59 ARM_TEXT 63 ARM_TEXT
@@ -143,6 +147,9 @@ SECTIONS
143 _edata = .; 147 _edata = .;
144 148
145 BSS_SECTION(0, 0, 0) 149 BSS_SECTION(0, 0, 0)
150#ifdef CONFIG_ARM_MPU
151 . = ALIGN(PMSAv8_MINALIGN);
152#endif
146 _end = .; 153 _end = .;
147 154
148 STABS_DEBUG 155 STABS_DEBUG
diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h
index 71281e08e1d4..ae5fdff18406 100644
--- a/arch/arm/kernel/vmlinux.lds.h
+++ b/arch/arm/kernel/vmlinux.lds.h
@@ -27,24 +27,24 @@
27 27
28#define PROC_INFO \ 28#define PROC_INFO \
29 . = ALIGN(4); \ 29 . = ALIGN(4); \
30 VMLINUX_SYMBOL(__proc_info_begin) = .; \ 30 __proc_info_begin = .; \
31 *(.proc.info.init) \ 31 *(.proc.info.init) \
32 VMLINUX_SYMBOL(__proc_info_end) = .; 32 __proc_info_end = .;
33 33
34#define HYPERVISOR_TEXT \ 34#define HYPERVISOR_TEXT \
35 VMLINUX_SYMBOL(__hyp_text_start) = .; \ 35 __hyp_text_start = .; \
36 *(.hyp.text) \ 36 *(.hyp.text) \
37 VMLINUX_SYMBOL(__hyp_text_end) = .; 37 __hyp_text_end = .;
38 38
39#define IDMAP_TEXT \ 39#define IDMAP_TEXT \
40 ALIGN_FUNCTION(); \ 40 ALIGN_FUNCTION(); \
41 VMLINUX_SYMBOL(__idmap_text_start) = .; \ 41 __idmap_text_start = .; \
42 *(.idmap.text) \ 42 *(.idmap.text) \
43 VMLINUX_SYMBOL(__idmap_text_end) = .; \ 43 __idmap_text_end = .; \
44 . = ALIGN(PAGE_SIZE); \ 44 . = ALIGN(PAGE_SIZE); \
45 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ 45 __hyp_idmap_text_start = .; \
46 *(.hyp.idmap.text) \ 46 *(.hyp.idmap.text) \
47 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; 47 __hyp_idmap_text_end = .;
48 48
49#define ARM_DISCARD \ 49#define ARM_DISCARD \
50 *(.ARM.exidx.exit.text) \ 50 *(.ARM.exidx.exit.text) \
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
index 95a2faefc070..aa3f9a9837ac 100644
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -16,6 +16,7 @@
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */ 17 */
18 18
19#include <linux/arm-smccc.h>
19#include <linux/linkage.h> 20#include <linux/linkage.h>
20#include <asm/kvm_arm.h> 21#include <asm/kvm_arm.h>
21#include <asm/kvm_asm.h> 22#include <asm/kvm_asm.h>
@@ -71,6 +72,90 @@ __kvm_hyp_vector:
71 W(b) hyp_irq 72 W(b) hyp_irq
72 W(b) hyp_fiq 73 W(b) hyp_fiq
73 74
75#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
76 .align 5
77__kvm_hyp_vector_ic_inv:
78 .global __kvm_hyp_vector_ic_inv
79
80 /*
81 * We encode the exception entry in the bottom 3 bits of
82 * SP, and we have to guarantee to be 8 bytes aligned.
83 */
84 W(add) sp, sp, #1 /* Reset 7 */
85 W(add) sp, sp, #1 /* Undef 6 */
86 W(add) sp, sp, #1 /* Syscall 5 */
87 W(add) sp, sp, #1 /* Prefetch abort 4 */
88 W(add) sp, sp, #1 /* Data abort 3 */
89 W(add) sp, sp, #1 /* HVC 2 */
90 W(add) sp, sp, #1 /* IRQ 1 */
91 W(nop) /* FIQ 0 */
92
93 mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
94 isb
95
96 b decode_vectors
97
98 .align 5
99__kvm_hyp_vector_bp_inv:
100 .global __kvm_hyp_vector_bp_inv
101
102 /*
103 * We encode the exception entry in the bottom 3 bits of
104 * SP, and we have to guarantee to be 8 bytes aligned.
105 */
106 W(add) sp, sp, #1 /* Reset 7 */
107 W(add) sp, sp, #1 /* Undef 6 */
108 W(add) sp, sp, #1 /* Syscall 5 */
109 W(add) sp, sp, #1 /* Prefetch abort 4 */
110 W(add) sp, sp, #1 /* Data abort 3 */
111 W(add) sp, sp, #1 /* HVC 2 */
112 W(add) sp, sp, #1 /* IRQ 1 */
113 W(nop) /* FIQ 0 */
114
115 mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
116 isb
117
118decode_vectors:
119
120#ifdef CONFIG_THUMB2_KERNEL
121 /*
122 * Yet another silly hack: Use VPIDR as a temp register.
123 * Thumb2 is really a pain, as SP cannot be used with most
124 * of the bitwise instructions. The vect_br macro ensures
125 * things gets cleaned-up.
126 */
127 mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
128 mov r0, sp
129 and r0, r0, #7
130 sub sp, sp, r0
131 push {r1, r2}
132 mov r1, r0
133 mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
134 mrc p15, 0, r2, c0, c0, 0 /* MIDR */
135 mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
136#endif
137
138.macro vect_br val, targ
139ARM( eor sp, sp, #\val )
140ARM( tst sp, #7 )
141ARM( eorne sp, sp, #\val )
142
143THUMB( cmp r1, #\val )
144THUMB( popeq {r1, r2} )
145
146 beq \targ
147.endm
148
149 vect_br 0, hyp_fiq
150 vect_br 1, hyp_irq
151 vect_br 2, hyp_hvc
152 vect_br 3, hyp_dabt
153 vect_br 4, hyp_pabt
154 vect_br 5, hyp_svc
155 vect_br 6, hyp_undef
156 vect_br 7, hyp_reset
157#endif
158
74.macro invalid_vector label, cause 159.macro invalid_vector label, cause
75 .align 160 .align
76\label: mov r0, #\cause 161\label: mov r0, #\cause
@@ -118,7 +203,7 @@ hyp_hvc:
118 lsr r2, r2, #16 203 lsr r2, r2, #16
119 and r2, r2, #0xff 204 and r2, r2, #0xff
120 cmp r2, #0 205 cmp r2, #0
121 bne guest_trap @ Guest called HVC 206 bne guest_hvc_trap @ Guest called HVC
122 207
123 /* 208 /*
124 * Getting here means host called HVC, we shift parameters and branch 209 * Getting here means host called HVC, we shift parameters and branch
@@ -149,7 +234,14 @@ hyp_hvc:
149 bx ip 234 bx ip
150 235
1511: 2361:
152 push {lr} 237 /*
238 * Pushing r2 here is just a way of keeping the stack aligned to
239 * 8 bytes on any path that can trigger a HYP exception. Here,
240 * we may well be about to jump into the guest, and the guest
241 * exit would otherwise be badly decoded by our fancy
242 * "decode-exception-without-a-branch" code...
243 */
244 push {r2, lr}
153 245
154 mov lr, r0 246 mov lr, r0
155 mov r0, r1 247 mov r0, r1
@@ -159,7 +251,21 @@ hyp_hvc:
159THUMB( orr lr, #1) 251THUMB( orr lr, #1)
160 blx lr @ Call the HYP function 252 blx lr @ Call the HYP function
161 253
162 pop {lr} 254 pop {r2, lr}
255 eret
256
257guest_hvc_trap:
258 movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
259 movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
260 ldr r0, [sp] @ Guest's r0
261 teq r0, r2
262 bne guest_trap
263 add sp, sp, #12
264 @ Returns:
265 @ r0 = 0
266 @ r1 = HSR value (perfectly predictable)
267 @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
268 mov r0, #0
163 eret 269 eret
164 270
165guest_trap: 271guest_trap:
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 5a016bc80e26..96a7b6cf459b 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -415,6 +415,7 @@ config CPU_V7
415 select CPU_CP15_MPU if !MMU 415 select CPU_CP15_MPU if !MMU
416 select CPU_HAS_ASID if MMU 416 select CPU_HAS_ASID if MMU
417 select CPU_PABRT_V7 417 select CPU_PABRT_V7
418 select CPU_SPECTRE if MMU
418 select CPU_THUMB_CAPABLE 419 select CPU_THUMB_CAPABLE
419 select CPU_TLB_V7 if MMU 420 select CPU_TLB_V7 if MMU
420 421
@@ -821,6 +822,28 @@ config CPU_BPREDICT_DISABLE
821 help 822 help
822 Say Y here to disable branch prediction. If unsure, say N. 823 Say Y here to disable branch prediction. If unsure, say N.
823 824
825config CPU_SPECTRE
826 bool
827
828config HARDEN_BRANCH_PREDICTOR
829 bool "Harden the branch predictor against aliasing attacks" if EXPERT
830 depends on CPU_SPECTRE
831 default y
832 help
833 Speculation attacks against some high-performance processors rely
834 on being able to manipulate the branch predictor for a victim
835 context by executing aliasing branches in the attacker context.
836 Such attacks can be partially mitigated against by clearing
837 internal branch predictor state and limiting the prediction
838 logic in some situations.
839
840 This config option will take CPU-specific actions to harden
841 the branch predictor against aliasing attacks and may rely on
842 specific instruction sequences or control bits being set by
843 the system firmware.
844
845 If unsure, say Y.
846
824config TLS_REG_EMUL 847config TLS_REG_EMUL
825 bool 848 bool
826 select NEED_KUSER_HELPERS 849 select NEED_KUSER_HELPERS
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 9dbb84923e12..7cb1699fbfc4 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
10 10
11ifneq ($(CONFIG_MMU),y) 11ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
13obj-$(CONFIG_ARM_MPU) += pmsa-v7.o 13obj-$(CONFIG_ARM_MPU) += pmsa-v7.o pmsa-v8.o
14endif 14endif
15 15
16obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o 16obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o
@@ -97,7 +97,7 @@ obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
97obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o 97obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
98obj-$(CONFIG_CPU_V6) += proc-v6.o 98obj-$(CONFIG_CPU_V6) += proc-v6.o
99obj-$(CONFIG_CPU_V6K) += proc-v6.o 99obj-$(CONFIG_CPU_V6K) += proc-v6.o
100obj-$(CONFIG_CPU_V7) += proc-v7.o 100obj-$(CONFIG_CPU_V7) += proc-v7.o proc-v7-bugs.o
101obj-$(CONFIG_CPU_V7M) += proc-v7m.o 101obj-$(CONFIG_CPU_V7M) += proc-v7m.o
102 102
103AFLAGS_proc-v6.o :=-Wa,-march=armv6 103AFLAGS_proc-v6.o :=-Wa,-march=armv6
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4b6613b5e042..af27f1c22d93 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -831,7 +831,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
831 unsigned long attrs) 831 unsigned long attrs)
832{ 832{
833 int ret; 833 int ret;
834 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 834 unsigned long nr_vma_pages = vma_pages(vma);
835 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 835 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
836 unsigned long pfn = dma_to_pfn(dev, dma_addr); 836 unsigned long pfn = dma_to_pfn(dev, dma_addr);
837 unsigned long off = vma->vm_pgoff; 837 unsigned long off = vma->vm_pgoff;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 32034543f49c..84becc911ee3 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -163,6 +163,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
163{ 163{
164 struct siginfo si; 164 struct siginfo si;
165 165
166 if (addr > TASK_SIZE)
167 harden_branch_predictor();
168
166 clear_siginfo(&si); 169 clear_siginfo(&si);
167 170
168#ifdef CONFIG_DEBUG_USER 171#ifdef CONFIG_DEBUG_USER
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 7c087961b7ce..5dd6c58d653b 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -99,6 +99,38 @@ void __init arm_mm_memblock_reserve(void)
99 memblock_reserve(0, 1); 99 memblock_reserve(0, 1);
100} 100}
101 101
102static void __init adjust_lowmem_bounds_mpu(void)
103{
104 unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
105
106 switch (pmsa) {
107 case MMFR0_PMSAv7:
108 pmsav7_adjust_lowmem_bounds();
109 break;
110 case MMFR0_PMSAv8:
111 pmsav8_adjust_lowmem_bounds();
112 break;
113 default:
114 break;
115 }
116}
117
118static void __init mpu_setup(void)
119{
120 unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
121
122 switch (pmsa) {
123 case MMFR0_PMSAv7:
124 pmsav7_setup();
125 break;
126 case MMFR0_PMSAv8:
127 pmsav8_setup();
128 break;
129 default:
130 break;
131 }
132}
133
102void __init adjust_lowmem_bounds(void) 134void __init adjust_lowmem_bounds(void)
103{ 135{
104 phys_addr_t end; 136 phys_addr_t end;
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index e2853bfff74e..699fa2e88725 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -102,7 +102,7 @@ static inline u32 irbar_read(void)
102 102
103static inline void rgnr_write(u32 v) 103static inline void rgnr_write(u32 v)
104{ 104{
105 writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RNR); 105 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RNR);
106} 106}
107 107
108/* Data-side / unified region attributes */ 108/* Data-side / unified region attributes */
@@ -110,28 +110,28 @@ static inline void rgnr_write(u32 v)
110/* Region access control register */ 110/* Region access control register */
111static inline void dracr_write(u32 v) 111static inline void dracr_write(u32 v)
112{ 112{
113 u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(15, 0); 113 u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(15, 0);
114 114
115 writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + MPU_RASR); 115 writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + PMSAv7_RASR);
116} 116}
117 117
118/* Region size register */ 118/* Region size register */
119static inline void drsr_write(u32 v) 119static inline void drsr_write(u32 v)
120{ 120{
121 u32 racr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(31, 16); 121 u32 racr = readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RASR) & GENMASK(31, 16);
122 122
123 writel_relaxed(v | racr, BASEADDR_V7M_SCB + MPU_RASR); 123 writel_relaxed(v | racr, BASEADDR_V7M_SCB + PMSAv7_RASR);
124} 124}
125 125
126/* Region base address register */ 126/* Region base address register */
127static inline void drbar_write(u32 v) 127static inline void drbar_write(u32 v)
128{ 128{
129 writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RBAR); 129 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv7_RBAR);
130} 130}
131 131
132static inline u32 drbar_read(void) 132static inline u32 drbar_read(void)
133{ 133{
134 return readl_relaxed(BASEADDR_V7M_SCB + MPU_RBAR); 134 return readl_relaxed(BASEADDR_V7M_SCB + PMSAv7_RBAR);
135} 135}
136 136
137/* ARMv7-M only supports a unified MPU, so I-side operations are nop */ 137/* ARMv7-M only supports a unified MPU, so I-side operations are nop */
@@ -143,11 +143,6 @@ static inline unsigned long irbar_read(void) {return 0;}
143 143
144#endif 144#endif
145 145
146static int __init mpu_present(void)
147{
148 return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
149}
150
151static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region) 146static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
152{ 147{
153 unsigned long subreg, bslots, sslots; 148 unsigned long subreg, bslots, sslots;
@@ -161,7 +156,7 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r
161 156
162 bdiff = base - abase; 157 bdiff = base - abase;
163 sdiff = p2size - asize; 158 sdiff = p2size - asize;
164 subreg = p2size / MPU_NR_SUBREGS; 159 subreg = p2size / PMSAv7_NR_SUBREGS;
165 160
166 if ((bdiff % subreg) || (sdiff % subreg)) 161 if ((bdiff % subreg) || (sdiff % subreg))
167 return false; 162 return false;
@@ -172,17 +167,17 @@ static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct r
172 if (bslots || sslots) { 167 if (bslots || sslots) {
173 int i; 168 int i;
174 169
175 if (subreg < MPU_MIN_SUBREG_SIZE) 170 if (subreg < PMSAv7_MIN_SUBREG_SIZE)
176 return false; 171 return false;
177 172
178 if (bslots + sslots > MPU_NR_SUBREGS) 173 if (bslots + sslots > PMSAv7_NR_SUBREGS)
179 return false; 174 return false;
180 175
181 for (i = 0; i < bslots; i++) 176 for (i = 0; i < bslots; i++)
182 _set_bit(i, &region->subreg); 177 _set_bit(i, &region->subreg);
183 178
184 for (i = 1; i <= sslots; i++) 179 for (i = 1; i <= sslots; i++)
185 _set_bit(MPU_NR_SUBREGS - i, &region->subreg); 180 _set_bit(PMSAv7_NR_SUBREGS - i, &region->subreg);
186 } 181 }
187 182
188 region->base = abase; 183 region->base = abase;
@@ -233,7 +228,7 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size,
233} 228}
234 229
235/* MPU initialisation functions */ 230/* MPU initialisation functions */
236void __init adjust_lowmem_bounds_mpu(void) 231void __init pmsav7_adjust_lowmem_bounds(void)
237{ 232{
238 phys_addr_t specified_mem_size = 0, total_mem_size = 0; 233 phys_addr_t specified_mem_size = 0, total_mem_size = 0;
239 struct memblock_region *reg; 234 struct memblock_region *reg;
@@ -243,10 +238,7 @@ void __init adjust_lowmem_bounds_mpu(void)
243 unsigned int mem_max_regions; 238 unsigned int mem_max_regions;
244 int num, i; 239 int num, i;
245 240
246 if (!mpu_present()) 241 /* Free-up PMSAv7_PROBE_REGION */
247 return;
248
249 /* Free-up MPU_PROBE_REGION */
250 mpu_min_region_order = __mpu_min_region_order(); 242 mpu_min_region_order = __mpu_min_region_order();
251 243
252 /* How many regions are supported */ 244 /* How many regions are supported */
@@ -301,12 +293,12 @@ void __init adjust_lowmem_bounds_mpu(void)
301 num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem); 293 num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
302 294
303 for (i = 0; i < num; i++) { 295 for (i = 0; i < num; i++) {
304 unsigned long subreg = mem[i].size / MPU_NR_SUBREGS; 296 unsigned long subreg = mem[i].size / PMSAv7_NR_SUBREGS;
305 297
306 total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg); 298 total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);
307 299
308 pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n", 300 pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
309 &mem[i].base, &mem[i].size, MPU_NR_SUBREGS, &mem[i].subreg); 301 &mem[i].base, &mem[i].size, PMSAv7_NR_SUBREGS, &mem[i].subreg);
310 } 302 }
311 303
312 if (total_mem_size != specified_mem_size) { 304 if (total_mem_size != specified_mem_size) {
@@ -349,7 +341,7 @@ static int __init __mpu_min_region_order(void)
349 u32 drbar_result, irbar_result; 341 u32 drbar_result, irbar_result;
350 342
351 /* We've kept a region free for this probing */ 343 /* We've kept a region free for this probing */
352 rgnr_write(MPU_PROBE_REGION); 344 rgnr_write(PMSAv7_PROBE_REGION);
353 isb(); 345 isb();
354 /* 346 /*
355 * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum 347 * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
@@ -388,8 +380,8 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
388 return -ENOMEM; 380 return -ENOMEM;
389 381
390 /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ 382 /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
391 size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; 383 size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN;
392 size_data |= subregions << MPU_RSR_SD; 384 size_data |= subregions << PMSAv7_RSR_SD;
393 385
394 if (need_flush) 386 if (need_flush)
395 flush_cache_all(); 387 flush_cache_all();
@@ -424,18 +416,15 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
424/* 416/*
425* Set up default MPU regions, doing nothing if there is no MPU 417* Set up default MPU regions, doing nothing if there is no MPU
426*/ 418*/
427void __init mpu_setup(void) 419void __init pmsav7_setup(void)
428{ 420{
429 int i, region = 0, err = 0; 421 int i, region = 0, err = 0;
430 422
431 if (!mpu_present())
432 return;
433
434 /* Setup MPU (order is important) */ 423 /* Setup MPU (order is important) */
435 424
436 /* Background */ 425 /* Background */
437 err |= mpu_setup_region(region++, 0, 32, 426 err |= mpu_setup_region(region++, 0, 32,
438 MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0RW, 427 PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0RW,
439 0, false); 428 0, false);
440 429
441#ifdef CONFIG_XIP_KERNEL 430#ifdef CONFIG_XIP_KERNEL
@@ -448,13 +437,13 @@ void __init mpu_setup(void)
448 * with BG region (which is uncachable), thus we need 437 * with BG region (which is uncachable), thus we need
449 * to clean and invalidate cache. 438 * to clean and invalidate cache.
450 */ 439 */
451 bool need_flush = region == MPU_RAM_REGION; 440 bool need_flush = region == PMSAv7_RAM_REGION;
452 441
453 if (!xip[i].size) 442 if (!xip[i].size)
454 continue; 443 continue;
455 444
456 err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size), 445 err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
457 MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL, 446 PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL,
458 xip[i].subreg, need_flush); 447 xip[i].subreg, need_flush);
459 } 448 }
460#endif 449#endif
@@ -465,14 +454,14 @@ void __init mpu_setup(void)
465 continue; 454 continue;
466 455
467 err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size), 456 err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
468 MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL, 457 PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL,
469 mem[i].subreg, false); 458 mem[i].subreg, false);
470 } 459 }
471 460
472 /* Vectors */ 461 /* Vectors */
473#ifndef CONFIG_CPU_V7M 462#ifndef CONFIG_CPU_V7M
474 err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE), 463 err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
475 MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL, 464 PMSAv7_AP_PL1RW_PL0NA | PMSAv7_RGN_NORMAL,
476 0, false); 465 0, false);
477#endif 466#endif
478 if (err) { 467 if (err) {
diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
new file mode 100644
index 000000000000..617a83def88a
--- /dev/null
+++ b/arch/arm/mm/pmsa-v8.c
@@ -0,0 +1,307 @@
1/*
2 * Based on linux/arch/arm/pmsa-v7.c
3 *
4 * ARM PMSAv8 supporting functions.
5 */
6
7#include <linux/memblock.h>
8#include <linux/range.h>
9
10#include <asm/cp15.h>
11#include <asm/cputype.h>
12#include <asm/mpu.h>
13
14#include <asm/memory.h>
15#include <asm/sections.h>
16
17#include "mm.h"
18
19#ifndef CONFIG_CPU_V7M
20
21#define PRSEL __ACCESS_CP15(c6, 0, c2, 1)
22#define PRBAR __ACCESS_CP15(c6, 0, c3, 0)
23#define PRLAR __ACCESS_CP15(c6, 0, c3, 1)
24
25static inline u32 prlar_read(void)
26{
27 return read_sysreg(PRLAR);
28}
29
30static inline u32 prbar_read(void)
31{
32 return read_sysreg(PRBAR);
33}
34
35static inline void prsel_write(u32 v)
36{
37 write_sysreg(v, PRSEL);
38}
39
40static inline void prbar_write(u32 v)
41{
42 write_sysreg(v, PRBAR);
43}
44
45static inline void prlar_write(u32 v)
46{
47 write_sysreg(v, PRLAR);
48}
49#else
50
51static inline u32 prlar_read(void)
52{
53 return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RLAR);
54}
55
56static inline u32 prbar_read(void)
57{
58 return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RBAR);
59}
60
61static inline void prsel_write(u32 v)
62{
63 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RNR);
64}
65
66static inline void prbar_write(u32 v)
67{
68 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RBAR);
69}
70
71static inline void prlar_write(u32 v)
72{
73 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RLAR);
74}
75
76#endif
77
78static struct range __initdata io[MPU_MAX_REGIONS];
79static struct range __initdata mem[MPU_MAX_REGIONS];
80
81static unsigned int __initdata mpu_max_regions;
82
83static __init bool is_region_fixed(int number)
84{
85 switch (number) {
86 case PMSAv8_XIP_REGION:
87 case PMSAv8_KERNEL_REGION:
88 return true;
89 default:
90 return false;
91 }
92}
93
94void __init pmsav8_adjust_lowmem_bounds(void)
95{
96 phys_addr_t mem_end;
97 struct memblock_region *reg;
98 bool first = true;
99
100 for_each_memblock(memory, reg) {
101 if (first) {
102 phys_addr_t phys_offset = PHYS_OFFSET;
103
104 /*
105 * Initially only use memory continuous from
106 * PHYS_OFFSET */
107 if (reg->base != phys_offset)
108 panic("First memory bank must be contiguous from PHYS_OFFSET");
109 mem_end = reg->base + reg->size;
110 first = false;
111 } else {
112 /*
113 * memblock auto merges contiguous blocks, remove
114 * all blocks afterwards in one go (we can't remove
115 * blocks separately while iterating)
116 */
117 pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
118 &mem_end, &reg->base);
119 memblock_remove(reg->base, 0 - reg->base);
120 break;
121 }
122 }
123}
124
125static int __init __mpu_max_regions(void)
126{
127 static int max_regions;
128 u32 mpuir;
129
130 if (max_regions)
131 return max_regions;
132
133 mpuir = read_cpuid_mputype();
134
135 max_regions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
136
137 return max_regions;
138}
139
140static int __init __pmsav8_setup_region(unsigned int number, u32 bar, u32 lar)
141{
142 if (number > mpu_max_regions
143 || number >= MPU_MAX_REGIONS)
144 return -ENOENT;
145
146 dsb();
147 prsel_write(number);
148 isb();
149 prbar_write(bar);
150 prlar_write(lar);
151
152 mpu_rgn_info.rgns[number].prbar = bar;
153 mpu_rgn_info.rgns[number].prlar = lar;
154
155 mpu_rgn_info.used++;
156
157 return 0;
158}
159
160static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_addr_t end)
161{
162 u32 bar, lar;
163
164 if (is_region_fixed(number))
165 return -EINVAL;
166
167 bar = start;
168 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
169
170 bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED;
171 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
172
173 return __pmsav8_setup_region(number, bar, lar);
174}
175
176static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_addr_t end)
177{
178 u32 bar, lar;
179
180 if (is_region_fixed(number))
181 return -EINVAL;
182
183 bar = start;
184 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
185
186 bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN;
187 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN;
188
189 return __pmsav8_setup_region(number, bar, lar);
190}
191
192static int __init pmsav8_setup_fixed(unsigned int number, phys_addr_t start,phys_addr_t end)
193{
194 u32 bar, lar;
195
196 if (!is_region_fixed(number))
197 return -EINVAL;
198
199 bar = start;
200 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
201
202 bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
203 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
204
205 prsel_write(number);
206 isb();
207
208 if (prbar_read() != bar || prlar_read() != lar)
209 return -EINVAL;
210
211 /* Reserved region was set up early, we just need a record for secondaries */
212 mpu_rgn_info.rgns[number].prbar = bar;
213 mpu_rgn_info.rgns[number].prlar = lar;
214
215 mpu_rgn_info.used++;
216
217 return 0;
218}
219
220#ifndef CONFIG_CPU_V7M
221static int __init pmsav8_setup_vector(unsigned int number, phys_addr_t start,phys_addr_t end)
222{
223 u32 bar, lar;
224
225 if (number == PMSAv8_KERNEL_REGION)
226 return -EINVAL;
227
228 bar = start;
229 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
230
231 bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
232 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
233
234 return __pmsav8_setup_region(number, bar, lar);
235}
236#endif
237
238void __init pmsav8_setup(void)
239{
240 int i, err = 0;
241 int region = PMSAv8_KERNEL_REGION;
242
243 /* How many regions are supported ? */
244 mpu_max_regions = __mpu_max_regions();
245
246 /* RAM: single chunk of memory */
247 add_range(mem, ARRAY_SIZE(mem), 0, memblock.memory.regions[0].base,
248 memblock.memory.regions[0].base + memblock.memory.regions[0].size);
249
250 /* IO: cover full 4G range */
251 add_range(io, ARRAY_SIZE(io), 0, 0, 0xffffffff);
252
253 /* RAM and IO: exclude kernel */
254 subtract_range(mem, ARRAY_SIZE(mem), __pa(KERNEL_START), __pa(KERNEL_END));
255 subtract_range(io, ARRAY_SIZE(io), __pa(KERNEL_START), __pa(KERNEL_END));
256
257#ifdef CONFIG_XIP_KERNEL
258 /* RAM and IO: exclude xip */
259 subtract_range(mem, ARRAY_SIZE(mem), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
260 subtract_range(io, ARRAY_SIZE(io), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
261#endif
262
263#ifndef CONFIG_CPU_V7M
264 /* RAM and IO: exclude vectors */
265 subtract_range(mem, ARRAY_SIZE(mem), vectors_base, vectors_base + 2 * PAGE_SIZE);
266 subtract_range(io, ARRAY_SIZE(io), vectors_base, vectors_base + 2 * PAGE_SIZE);
267#endif
268 /* IO: exclude RAM */
269 for (i = 0; i < ARRAY_SIZE(mem); i++)
270 subtract_range(io, ARRAY_SIZE(io), mem[i].start, mem[i].end);
271
272 /* Now program MPU */
273
274#ifdef CONFIG_XIP_KERNEL
275 /* ROM */
276 err |= pmsav8_setup_fixed(PMSAv8_XIP_REGION, CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
277#endif
278 /* Kernel */
279 err |= pmsav8_setup_fixed(region++, __pa(KERNEL_START), __pa(KERNEL_END));
280
281
282 /* IO */
283 for (i = 0; i < ARRAY_SIZE(io); i++) {
284 if (!io[i].end)
285 continue;
286
287 err |= pmsav8_setup_io(region++, io[i].start, io[i].end);
288 }
289
290 /* RAM */
291 for (i = 0; i < ARRAY_SIZE(mem); i++) {
292 if (!mem[i].end)
293 continue;
294
295 err |= pmsav8_setup_ram(region++, mem[i].start, mem[i].end);
296 }
297
298 /* Vectors */
299#ifndef CONFIG_CPU_V7M
300 err |= pmsav8_setup_vector(region++, vectors_base, vectors_base + 2 * PAGE_SIZE);
301#endif
302 if (err)
303 pr_warn("MPU region initialization failure! %d", err);
304 else
305 pr_info("Using ARM PMSAv8 Compliant MPU. Used %d of %d regions\n",
306 mpu_rgn_info.used, mpu_max_regions);
307}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index f10e31d0730a..81d0efb055c6 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -273,13 +273,14 @@
273 mcr p15, 0, ip, c7, c10, 4 @ data write barrier 273 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
274 .endm 274 .endm
275 275
276.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0 276.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
277 .type \name\()_processor_functions, #object 277 .type \name\()_processor_functions, #object
278 .align 2 278 .align 2
279ENTRY(\name\()_processor_functions) 279ENTRY(\name\()_processor_functions)
280 .word \dabort 280 .word \dabort
281 .word \pabort 281 .word \pabort
282 .word cpu_\name\()_proc_init 282 .word cpu_\name\()_proc_init
283 .word \bugs
283 .word cpu_\name\()_proc_fin 284 .word cpu_\name\()_proc_fin
284 .word cpu_\name\()_reset 285 .word cpu_\name\()_reset
285 .word cpu_\name\()_do_idle 286 .word cpu_\name\()_do_idle
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index c6141a5435c3..f8d45ad2a515 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -41,11 +41,6 @@
41 * even on Cortex-A8 revisions not affected by 430973. 41 * even on Cortex-A8 revisions not affected by 430973.
42 * If IBE is not set, the flush BTAC/BTB won't do anything. 42 * If IBE is not set, the flush BTAC/BTB won't do anything.
43 */ 43 */
44ENTRY(cpu_ca8_switch_mm)
45#ifdef CONFIG_MMU
46 mov r2, #0
47 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
48#endif
49ENTRY(cpu_v7_switch_mm) 44ENTRY(cpu_v7_switch_mm)
50#ifdef CONFIG_MMU 45#ifdef CONFIG_MMU
51 mmid r1, r1 @ get mm->context.id 46 mmid r1, r1 @ get mm->context.id
@@ -66,7 +61,6 @@ ENTRY(cpu_v7_switch_mm)
66#endif 61#endif
67 bx lr 62 bx lr
68ENDPROC(cpu_v7_switch_mm) 63ENDPROC(cpu_v7_switch_mm)
69ENDPROC(cpu_ca8_switch_mm)
70 64
71/* 65/*
72 * cpu_v7_set_pte_ext(ptep, pte) 66 * cpu_v7_set_pte_ext(ptep, pte)
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
new file mode 100644
index 000000000000..5544b82a2e7a
--- /dev/null
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -0,0 +1,174 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/arm-smccc.h>
3#include <linux/kernel.h>
4#include <linux/psci.h>
5#include <linux/smp.h>
6
7#include <asm/cp15.h>
8#include <asm/cputype.h>
9#include <asm/proc-fns.h>
10#include <asm/system_misc.h>
11
12#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
13DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
14
15extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
16extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
17extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
18extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
19
20static void harden_branch_predictor_bpiall(void)
21{
22 write_sysreg(0, BPIALL);
23}
24
25static void harden_branch_predictor_iciallu(void)
26{
27 write_sysreg(0, ICIALLU);
28}
29
30static void __maybe_unused call_smc_arch_workaround_1(void)
31{
32 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
33}
34
35static void __maybe_unused call_hvc_arch_workaround_1(void)
36{
37 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
38}
39
40static void cpu_v7_spectre_init(void)
41{
42 const char *spectre_v2_method = NULL;
43 int cpu = smp_processor_id();
44
45 if (per_cpu(harden_branch_predictor_fn, cpu))
46 return;
47
48 switch (read_cpuid_part()) {
49 case ARM_CPU_PART_CORTEX_A8:
50 case ARM_CPU_PART_CORTEX_A9:
51 case ARM_CPU_PART_CORTEX_A12:
52 case ARM_CPU_PART_CORTEX_A17:
53 case ARM_CPU_PART_CORTEX_A73:
54 case ARM_CPU_PART_CORTEX_A75:
55 if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
56 goto bl_error;
57 per_cpu(harden_branch_predictor_fn, cpu) =
58 harden_branch_predictor_bpiall;
59 spectre_v2_method = "BPIALL";
60 break;
61
62 case ARM_CPU_PART_CORTEX_A15:
63 case ARM_CPU_PART_BRAHMA_B15:
64 if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
65 goto bl_error;
66 per_cpu(harden_branch_predictor_fn, cpu) =
67 harden_branch_predictor_iciallu;
68 spectre_v2_method = "ICIALLU";
69 break;
70
71#ifdef CONFIG_ARM_PSCI
72 default:
73 /* Other ARM CPUs require no workaround */
74 if (read_cpuid_implementor() == ARM_CPU_IMP_ARM)
75 break;
76 /* fallthrough */
77 /* Cortex A57/A72 require firmware workaround */
78 case ARM_CPU_PART_CORTEX_A57:
79 case ARM_CPU_PART_CORTEX_A72: {
80 struct arm_smccc_res res;
81
82 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
83 break;
84
85 switch (psci_ops.conduit) {
86 case PSCI_CONDUIT_HVC:
87 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
88 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
89 if ((int)res.a0 != 0)
90 break;
91 if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
92 goto bl_error;
93 per_cpu(harden_branch_predictor_fn, cpu) =
94 call_hvc_arch_workaround_1;
95 processor.switch_mm = cpu_v7_hvc_switch_mm;
96 spectre_v2_method = "hypervisor";
97 break;
98
99 case PSCI_CONDUIT_SMC:
100 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
101 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
102 if ((int)res.a0 != 0)
103 break;
104 if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
105 goto bl_error;
106 per_cpu(harden_branch_predictor_fn, cpu) =
107 call_smc_arch_workaround_1;
108 processor.switch_mm = cpu_v7_smc_switch_mm;
109 spectre_v2_method = "firmware";
110 break;
111
112 default:
113 break;
114 }
115 }
116#endif
117 }
118
119 if (spectre_v2_method)
120 pr_info("CPU%u: Spectre v2: using %s workaround\n",
121 smp_processor_id(), spectre_v2_method);
122 return;
123
124bl_error:
125 pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
126 cpu);
127}
128#else
129static void cpu_v7_spectre_init(void)
130{
131}
132#endif
133
134static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
135 u32 mask, const char *msg)
136{
137 u32 aux_cr;
138
139 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
140
141 if ((aux_cr & mask) != mask) {
142 if (!*warned)
143 pr_err("CPU%u: %s", smp_processor_id(), msg);
144 *warned = true;
145 return false;
146 }
147 return true;
148}
149
150static DEFINE_PER_CPU(bool, spectre_warned);
151
152static bool check_spectre_auxcr(bool *warned, u32 bit)
153{
154 return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
155 cpu_v7_check_auxcr_set(warned, bit,
156 "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
157}
158
159void cpu_v7_ca8_ibe(void)
160{
161 if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
162 cpu_v7_spectre_init();
163}
164
165void cpu_v7_ca15_ibe(void)
166{
167 if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
168 cpu_v7_spectre_init();
169}
170
171void cpu_v7_bugs_init(void)
172{
173 cpu_v7_spectre_init();
174}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b528a15f460d..6fe52819e014 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -9,6 +9,7 @@
9 * 9 *
10 * This is the "shell" of the ARMv7 processor support. 10 * This is the "shell" of the ARMv7 processor support.
11 */ 11 */
12#include <linux/arm-smccc.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/linkage.h> 14#include <linux/linkage.h>
14#include <asm/assembler.h> 15#include <asm/assembler.h>
@@ -93,6 +94,37 @@ ENTRY(cpu_v7_dcache_clean_area)
93 ret lr 94 ret lr
94ENDPROC(cpu_v7_dcache_clean_area) 95ENDPROC(cpu_v7_dcache_clean_area)
95 96
97#ifdef CONFIG_ARM_PSCI
98 .arch_extension sec
99ENTRY(cpu_v7_smc_switch_mm)
100 stmfd sp!, {r0 - r3}
101 movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
102 movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
103 smc #0
104 ldmfd sp!, {r0 - r3}
105 b cpu_v7_switch_mm
106ENDPROC(cpu_v7_smc_switch_mm)
107 .arch_extension virt
108ENTRY(cpu_v7_hvc_switch_mm)
109 stmfd sp!, {r0 - r3}
110 movw r0, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
111 movt r0, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
112 hvc #0
113 ldmfd sp!, {r0 - r3}
114 b cpu_v7_switch_mm
115ENDPROC(cpu_v7_smc_switch_mm)
116#endif
117ENTRY(cpu_v7_iciallu_switch_mm)
118 mov r3, #0
119 mcr p15, 0, r3, c7, c5, 0 @ ICIALLU
120 b cpu_v7_switch_mm
121ENDPROC(cpu_v7_iciallu_switch_mm)
122ENTRY(cpu_v7_bpiall_switch_mm)
123 mov r3, #0
124 mcr p15, 0, r3, c7, c5, 6 @ flush BTAC/BTB
125 b cpu_v7_switch_mm
126ENDPROC(cpu_v7_bpiall_switch_mm)
127
96 string cpu_v7_name, "ARMv7 Processor" 128 string cpu_v7_name, "ARMv7 Processor"
97 .align 129 .align
98 130
@@ -158,31 +190,6 @@ ENTRY(cpu_v7_do_resume)
158ENDPROC(cpu_v7_do_resume) 190ENDPROC(cpu_v7_do_resume)
159#endif 191#endif
160 192
161/*
162 * Cortex-A8
163 */
164 globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
165 globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
166 globl_equ cpu_ca8_reset, cpu_v7_reset
167 globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
168 globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
169 globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
170 globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
171#ifdef CONFIG_ARM_CPU_SUSPEND
172 globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
173 globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
174#endif
175
176/*
177 * Cortex-A9 processor functions
178 */
179 globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
180 globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
181 globl_equ cpu_ca9mp_reset, cpu_v7_reset
182 globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
183 globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
184 globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
185 globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
186.globl cpu_ca9mp_suspend_size 193.globl cpu_ca9mp_suspend_size
187.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2 194.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
188#ifdef CONFIG_ARM_CPU_SUSPEND 195#ifdef CONFIG_ARM_CPU_SUSPEND
@@ -547,12 +554,79 @@ __v7_setup_stack:
547 554
548 __INITDATA 555 __INITDATA
549 556
557 .weak cpu_v7_bugs_init
558
550 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 559 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
551 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 560 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
561
562#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
563 @ generic v7 bpiall on context switch
564 globl_equ cpu_v7_bpiall_proc_init, cpu_v7_proc_init
565 globl_equ cpu_v7_bpiall_proc_fin, cpu_v7_proc_fin
566 globl_equ cpu_v7_bpiall_reset, cpu_v7_reset
567 globl_equ cpu_v7_bpiall_do_idle, cpu_v7_do_idle
568 globl_equ cpu_v7_bpiall_dcache_clean_area, cpu_v7_dcache_clean_area
569 globl_equ cpu_v7_bpiall_set_pte_ext, cpu_v7_set_pte_ext
570 globl_equ cpu_v7_bpiall_suspend_size, cpu_v7_suspend_size
571#ifdef CONFIG_ARM_CPU_SUSPEND
572 globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend
573 globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume
574#endif
575 define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
576
577#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions
578#else
579#define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_processor_functions
580#endif
581
552#ifndef CONFIG_ARM_LPAE 582#ifndef CONFIG_ARM_LPAE
553 define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 583 @ Cortex-A8 - always needs bpiall switch_mm implementation
554 define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 584 globl_equ cpu_ca8_proc_init, cpu_v7_proc_init
585 globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin
586 globl_equ cpu_ca8_reset, cpu_v7_reset
587 globl_equ cpu_ca8_do_idle, cpu_v7_do_idle
588 globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area
589 globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext
590 globl_equ cpu_ca8_switch_mm, cpu_v7_bpiall_switch_mm
591 globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size
592#ifdef CONFIG_ARM_CPU_SUSPEND
593 globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend
594 globl_equ cpu_ca8_do_resume, cpu_v7_do_resume
595#endif
596 define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca8_ibe
597
598 @ Cortex-A9 - needs more registers preserved across suspend/resume
599 @ and bpiall switch_mm for hardening
600 globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
601 globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
602 globl_equ cpu_ca9mp_reset, cpu_v7_reset
603 globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
604 globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
605#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
606 globl_equ cpu_ca9mp_switch_mm, cpu_v7_bpiall_switch_mm
607#else
608 globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
609#endif
610 globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
611 define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init
555#endif 612#endif
613
614 @ Cortex-A15 - needs iciallu switch_mm for hardening
615 globl_equ cpu_ca15_proc_init, cpu_v7_proc_init
616 globl_equ cpu_ca15_proc_fin, cpu_v7_proc_fin
617 globl_equ cpu_ca15_reset, cpu_v7_reset
618 globl_equ cpu_ca15_do_idle, cpu_v7_do_idle
619 globl_equ cpu_ca15_dcache_clean_area, cpu_v7_dcache_clean_area
620#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
621 globl_equ cpu_ca15_switch_mm, cpu_v7_iciallu_switch_mm
622#else
623 globl_equ cpu_ca15_switch_mm, cpu_v7_switch_mm
624#endif
625 globl_equ cpu_ca15_set_pte_ext, cpu_v7_set_pte_ext
626 globl_equ cpu_ca15_suspend_size, cpu_v7_suspend_size
627 globl_equ cpu_ca15_do_suspend, cpu_v7_do_suspend
628 globl_equ cpu_ca15_do_resume, cpu_v7_do_resume
629 define_processor_functions ca15, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_ca15_ibe
556#ifdef CONFIG_CPU_PJ4B 630#ifdef CONFIG_CPU_PJ4B
557 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 631 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
558#endif 632#endif
@@ -669,7 +743,7 @@ __v7_ca7mp_proc_info:
669__v7_ca12mp_proc_info: 743__v7_ca12mp_proc_info:
670 .long 0x410fc0d0 744 .long 0x410fc0d0
671 .long 0xff0ffff0 745 .long 0xff0ffff0
672 __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup 746 __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
673 .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info 747 .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
674 748
675 /* 749 /*
@@ -679,7 +753,7 @@ __v7_ca12mp_proc_info:
679__v7_ca15mp_proc_info: 753__v7_ca15mp_proc_info:
680 .long 0x410fc0f0 754 .long 0x410fc0f0
681 .long 0xff0ffff0 755 .long 0xff0ffff0
682 __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup 756 __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup, proc_fns = ca15_processor_functions
683 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info 757 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
684 758
685 /* 759 /*
@@ -689,7 +763,7 @@ __v7_ca15mp_proc_info:
689__v7_b15mp_proc_info: 763__v7_b15mp_proc_info:
690 .long 0x420f00f0 764 .long 0x420f00f0
691 .long 0xff0ffff0 765 .long 0xff0ffff0
692 __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, cache_fns = b15_cache_fns 766 __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, proc_fns = ca15_processor_functions, cache_fns = b15_cache_fns
693 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info 767 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
694 768
695 /* 769 /*
@@ -699,9 +773,25 @@ __v7_b15mp_proc_info:
699__v7_ca17mp_proc_info: 773__v7_ca17mp_proc_info:
700 .long 0x410fc0e0 774 .long 0x410fc0e0
701 .long 0xff0ffff0 775 .long 0xff0ffff0
702 __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup 776 __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
703 .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info 777 .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info
704 778
779 /* ARM Ltd. Cortex A73 processor */
780 .type __v7_ca73_proc_info, #object
781__v7_ca73_proc_info:
782 .long 0x410fd090
783 .long 0xff0ffff0
784 __v7_proc __v7_ca73_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
785 .size __v7_ca73_proc_info, . - __v7_ca73_proc_info
786
787 /* ARM Ltd. Cortex A75 processor */
788 .type __v7_ca75_proc_info, #object
789__v7_ca75_proc_info:
790 .long 0x410fd0a0
791 .long 0xff0ffff0
792 __v7_proc __v7_ca75_proc_info, __v7_setup, proc_fns = HARDENED_BPIALL_PROCESSOR_FUNCTIONS
793 .size __v7_ca75_proc_info, . - __v7_ca75_proc_info
794
705 /* 795 /*
706 * Qualcomm Inc. Krait processors. 796 * Qualcomm Inc. Krait processors.
707 */ 797 */
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 3ece711a6a17..41b706403ef7 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -205,6 +205,7 @@ struct bus_type amba_bustype = {
205 .dma_configure = platform_dma_configure, 205 .dma_configure = platform_dma_configure,
206 .pm = &amba_pm, 206 .pm = &amba_pm,
207}; 207};
208EXPORT_SYMBOL_GPL(amba_bustype);
208 209
209static int __init amba_init(void) 210static int __init amba_init(void)
210{ 211{