aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig16
-rw-r--r--arch/arm/Kconfig-nommu14
-rw-r--r--arch/arm/Kconfig.debug10
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/include/asm/arch_timer.h9
-rw-r--r--arch/arm/include/asm/assembler.h17
-rw-r--r--arch/arm/include/asm/cp15.h5
-rw-r--r--arch/arm/include/asm/cputype.h45
-rw-r--r--arch/arm/include/asm/glue-cache.h27
-rw-r--r--arch/arm/include/asm/glue-df.h8
-rw-r--r--arch/arm/include/asm/glue-proc.h9
-rw-r--r--arch/arm/include/asm/hugetlb-3level.h71
-rw-r--r--arch/arm/include/asm/hugetlb.h84
-rw-r--r--arch/arm/include/asm/irqflags.h22
-rw-r--r--arch/arm/include/asm/mach/arch.h5
-rw-r--r--arch/arm/include/asm/memory.h18
-rw-r--r--arch/arm/include/asm/mpu.h76
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h24
-rw-r--r--arch/arm/include/asm/pgtable-3level.h96
-rw-r--r--arch/arm/include/asm/pgtable.h3
-rw-r--r--arch/arm/include/asm/proc-fns.h30
-rw-r--r--arch/arm/include/asm/psci.h9
-rw-r--r--arch/arm/include/asm/ptrace.h4
-rw-r--r--arch/arm/include/asm/smp.h5
-rw-r--r--arch/arm/include/asm/smp_plat.h22
-rw-r--r--arch/arm/include/asm/suspend.h5
-rw-r--r--arch/arm/include/asm/system_info.h1
-rw-r--r--arch/arm/include/asm/tlb.h6
-rw-r--r--arch/arm/include/asm/tlbflush.h27
-rw-r--r--arch/arm/include/asm/v7m.h44
-rw-r--r--arch/arm/include/debug/vexpress.S10
-rw-r--r--arch/arm/include/uapi/asm/hwcap.h2
-rw-r--r--arch/arm/include/uapi/asm/ptrace.h35
-rw-r--r--arch/arm/kernel/Makefile18
-rw-r--r--arch/arm/kernel/asm-offsets.c6
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/entry-header.S124
-rw-r--r--arch/arm/kernel/entry-v7m.S143
-rw-r--r--arch/arm/kernel/head-nommu.S170
-rw-r--r--arch/arm/kernel/head.S10
-rw-r--r--arch/arm/kernel/hyp-stub.S7
-rw-r--r--arch/arm/kernel/psci.c7
-rw-r--r--arch/arm/kernel/psci_smp.c84
-rw-r--r--arch/arm/kernel/setup.c101
-rw-r--r--arch/arm/kernel/signal.c9
-rw-r--r--arch/arm/kernel/sleep.S97
-rw-r--r--arch/arm/kernel/smp.c21
-rw-r--r--arch/arm/kernel/suspend.c76
-rw-r--r--arch/arm/kernel/traps.c8
-rw-r--r--arch/arm/kvm/interrupts_head.S4
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-shmobile/Kconfig4
-rw-r--r--arch/arm/mach-tegra/Kconfig2
-rw-r--r--arch/arm/mach-virt/Kconfig2
-rw-r--r--arch/arm/mach-virt/Makefile1
-rw-r--r--arch/arm/mach-virt/platsmp.c50
-rw-r--r--arch/arm/mach-virt/virt.c3
-rw-r--r--arch/arm/mm/Kconfig24
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/cache-nop.S50
-rw-r--r--arch/arm/mm/context.c9
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/mm/flush.c25
-rw-r--r--arch/arm/mm/fsr-3level.c4
-rw-r--r--arch/arm/mm/hugetlbpage.c101
-rw-r--r--arch/arm/mm/init.c19
-rw-r--r--arch/arm/mm/mmu.c49
-rw-r--r--arch/arm/mm/nommu.c264
-rw-r--r--arch/arm/mm/proc-v6.S6
-rw-r--r--arch/arm/mm/proc-v7-3level.S53
-rw-r--r--arch/arm/mm/proc-v7.S27
-rw-r--r--arch/arm/mm/proc-v7m.S157
74 files changed, 2235 insertions, 275 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5a326f935858..921405df7ce9 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -9,7 +9,7 @@ config ARM
9 select BUILDTIME_EXTABLE_SORT if MMU 9 select BUILDTIME_EXTABLE_SORT if MMU
10 select CPU_PM if (SUSPEND || CPU_IDLE) 10 select CPU_PM if (SUSPEND || CPU_IDLE)
11 select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN && MMU 11 select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN && MMU
12 select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) 12 select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI)
13 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 13 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
14 select GENERIC_IRQ_PROBE 14 select GENERIC_IRQ_PROBE
15 select GENERIC_IRQ_SHOW 15 select GENERIC_IRQ_SHOW
@@ -1441,7 +1441,7 @@ config SMP
1441 depends on CPU_V6K || CPU_V7 1441 depends on CPU_V6K || CPU_V7
1442 depends on GENERIC_CLOCKEVENTS 1442 depends on GENERIC_CLOCKEVENTS
1443 depends on HAVE_SMP 1443 depends on HAVE_SMP
1444 depends on MMU 1444 depends on MMU || ARM_MPU
1445 select USE_GENERIC_SMP_HELPERS 1445 select USE_GENERIC_SMP_HELPERS
1446 help 1446 help
1447 This enables support for systems with more than one CPU. If you have 1447 This enables support for systems with more than one CPU. If you have
@@ -1462,7 +1462,7 @@ config SMP
1462 1462
1463config SMP_ON_UP 1463config SMP_ON_UP
1464 bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)" 1464 bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
1465 depends on SMP && !XIP_KERNEL 1465 depends on SMP && !XIP_KERNEL && MMU
1466 default y 1466 default y
1467 help 1467 help
1468 SMP kernels contain instructions which fail on non-SMP processors. 1468 SMP kernels contain instructions which fail on non-SMP processors.
@@ -1612,7 +1612,7 @@ config SCHED_HRTICK
1612 1612
1613config THUMB2_KERNEL 1613config THUMB2_KERNEL
1614 bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY 1614 bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
1615 depends on CPU_V7 && !CPU_V6 && !CPU_V6K 1615 depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K
1616 default y if CPU_THUMBONLY 1616 default y if CPU_THUMBONLY
1617 select AEABI 1617 select AEABI
1618 select ARM_ASM_UNIFIED 1618 select ARM_ASM_UNIFIED
@@ -1734,6 +1734,14 @@ config HW_PERF_EVENTS
1734 Enable hardware performance counter support for perf events. If 1734 Enable hardware performance counter support for perf events. If
1735 disabled, perf events will use software events only. 1735 disabled, perf events will use software events only.
1736 1736
1737config SYS_SUPPORTS_HUGETLBFS
1738 def_bool y
1739 depends on ARM_LPAE
1740
1741config HAVE_ARCH_TRANSPARENT_HUGEPAGE
1742 def_bool y
1743 depends on ARM_LPAE
1744
1737source "mm/Kconfig" 1745source "mm/Kconfig"
1738 1746
1739config FORCE_MAX_ZONEORDER 1747config FORCE_MAX_ZONEORDER
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
index 2cef8e13f9f8..aed66d5df7f1 100644
--- a/arch/arm/Kconfig-nommu
+++ b/arch/arm/Kconfig-nommu
@@ -28,7 +28,7 @@ config FLASH_SIZE
28config PROCESSOR_ID 28config PROCESSOR_ID
29 hex 'Hard wire the processor ID' 29 hex 'Hard wire the processor ID'
30 default 0x00007700 30 default 0x00007700
31 depends on !CPU_CP15 31 depends on !(CPU_CP15 || CPU_V7M)
32 help 32 help
33 If processor has no CP15 register, this processor ID is 33 If processor has no CP15 register, this processor ID is
34 used instead of the auto-probing which utilizes the register. 34 used instead of the auto-probing which utilizes the register.
@@ -50,3 +50,15 @@ config REMAP_VECTORS_TO_RAM
50 Otherwise, say 'y' here. In this case, the kernel will require 50 Otherwise, say 'y' here. In this case, the kernel will require
51 external support to redirect the hardware exception vectors to 51 external support to redirect the hardware exception vectors to
52 the writable versions located at DRAM_BASE. 52 the writable versions located at DRAM_BASE.
53
54config ARM_MPU
55 bool 'Use the ARM v7 PMSA Compliant MPU'
56 depends on CPU_V7
57 default y
58 help
59 Some ARM systems without an MMU have instead a Memory Protection
60 Unit (MPU) that defines the type and permissions for regions of
61 memory.
62
63 If your CPU has an MPU then you should choose 'y' here unless you
64 know that you do not want to use the MPU.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 1d41908d5cda..f2623b25ff9a 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -476,6 +476,13 @@ choice
476 of the tiles using the RS1 memory map, including all new A-class 476 of the tiles using the RS1 memory map, including all new A-class
477 core tiles, FPGA-based SMMs and software models. 477 core tiles, FPGA-based SMMs and software models.
478 478
479 config DEBUG_VEXPRESS_UART0_CRX
480 bool "Use PL011 UART0 at 0xb0090000 (Cortex-R compliant tiles)"
481 depends on ARCH_VEXPRESS && !MMU
482 help
483 This option selects UART0 at 0xb0090000. This is appropriate for
484 Cortex-R series tiles and SMMs, such as Cortex-R5 and Cortex-R7
485
479 config DEBUG_VT8500_UART0 486 config DEBUG_VT8500_UART0
480 bool "Use UART0 on VIA/Wondermedia SoCs" 487 bool "Use UART0 on VIA/Wondermedia SoCs"
481 depends on ARCH_VT8500 488 depends on ARCH_VT8500
@@ -645,7 +652,8 @@ config DEBUG_LL_INCLUDE
645 default "debug/tegra.S" if DEBUG_TEGRA_UART 652 default "debug/tegra.S" if DEBUG_TEGRA_UART
646 default "debug/ux500.S" if DEBUG_UX500_UART 653 default "debug/ux500.S" if DEBUG_UX500_UART
647 default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT || \ 654 default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT || \
648 DEBUG_VEXPRESS_UART0_CA9 || DEBUG_VEXPRESS_UART0_RS1 655 DEBUG_VEXPRESS_UART0_CA9 || DEBUG_VEXPRESS_UART0_RS1 || \
656 DEBUG_VEXPRESS_UART0_CRX
649 default "debug/vt8500.S" if DEBUG_VT8500_UART0 657 default "debug/vt8500.S" if DEBUG_VT8500_UART0
650 default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1 658 default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
651 default "mach/debug-macro.S" 659 default "mach/debug-macro.S"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index de4e1cb2f14f..d6608b10e062 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -59,6 +59,7 @@ comma = ,
59# Note that GCC does not numerically define an architecture version 59# Note that GCC does not numerically define an architecture version
60# macro, but instead defines a whole series of macros which makes 60# macro, but instead defines a whole series of macros which makes
61# testing for a specific architecture or later rather impossible. 61# testing for a specific architecture or later rather impossible.
62arch-$(CONFIG_CPU_32v7M) =-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m
62arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) 63arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a)
63arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) 64arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6)
64# Only override the compiler option if ARMv6. The ARMv6K extensions are 65# Only override the compiler option if ARMv6. The ARMv6K extensions are
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 7c1bfc0aea0c..accefe099182 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -80,15 +80,6 @@ static inline u32 arch_timer_get_cntfrq(void)
80 return val; 80 return val;
81} 81}
82 82
83static inline u64 arch_counter_get_cntpct(void)
84{
85 u64 cval;
86
87 isb();
88 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
89 return cval;
90}
91
92static inline u64 arch_counter_get_cntvct(void) 83static inline u64 arch_counter_get_cntvct(void)
93{ 84{
94 u64 cval; 85 u64 cval;
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 05ee9eebad6b..a5fef710af32 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -136,7 +136,11 @@
136 * assumes FIQs are enabled, and that the processor is in SVC mode. 136 * assumes FIQs are enabled, and that the processor is in SVC mode.
137 */ 137 */
138 .macro save_and_disable_irqs, oldcpsr 138 .macro save_and_disable_irqs, oldcpsr
139#ifdef CONFIG_CPU_V7M
140 mrs \oldcpsr, primask
141#else
139 mrs \oldcpsr, cpsr 142 mrs \oldcpsr, cpsr
143#endif
140 disable_irq 144 disable_irq
141 .endm 145 .endm
142 146
@@ -150,7 +154,11 @@
150 * guarantee that this will preserve the flags. 154 * guarantee that this will preserve the flags.
151 */ 155 */
152 .macro restore_irqs_notrace, oldcpsr 156 .macro restore_irqs_notrace, oldcpsr
157#ifdef CONFIG_CPU_V7M
158 msr primask, \oldcpsr
159#else
153 msr cpsr_c, \oldcpsr 160 msr cpsr_c, \oldcpsr
161#endif
154 .endm 162 .endm
155 163
156 .macro restore_irqs, oldcpsr 164 .macro restore_irqs, oldcpsr
@@ -229,7 +237,14 @@
229#endif 237#endif
230 .endm 238 .endm
231 239
232#ifdef CONFIG_THUMB2_KERNEL 240#if defined(CONFIG_CPU_V7M)
241 /*
242 * setmode is used to assert to be in svc mode during boot. For v7-M
243 * this is done in __v7m_setup, so setmode can be empty here.
244 */
245 .macro setmode, mode, reg
246 .endm
247#elif defined(CONFIG_THUMB2_KERNEL)
233 .macro setmode, mode, reg 248 .macro setmode, mode, reg
234 mov \reg, #\mode 249 mov \reg, #\mode
235 msr cpsr_c, \reg 250 msr cpsr_c, \reg
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 1f3262e99d81..a524a23d8627 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -23,6 +23,11 @@
23#define CR_RR (1 << 14) /* Round Robin cache replacement */ 23#define CR_RR (1 << 14) /* Round Robin cache replacement */
24#define CR_L4 (1 << 15) /* LDR pc can set T bit */ 24#define CR_L4 (1 << 15) /* LDR pc can set T bit */
25#define CR_DT (1 << 16) 25#define CR_DT (1 << 16)
26#ifdef CONFIG_MMU
27#define CR_HA (1 << 17) /* Hardware management of Access Flag */
28#else
29#define CR_BR (1 << 17) /* MPU Background region enable (PMSA) */
30#endif
26#define CR_IT (1 << 18) 31#define CR_IT (1 << 18)
27#define CR_ST (1 << 19) 32#define CR_ST (1 << 19)
28#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ 33#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index dba62cb1ad08..8c25dc4e9851 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -8,8 +8,25 @@
8#define CPUID_CACHETYPE 1 8#define CPUID_CACHETYPE 1
9#define CPUID_TCM 2 9#define CPUID_TCM 2
10#define CPUID_TLBTYPE 3 10#define CPUID_TLBTYPE 3
11#define CPUID_MPUIR 4
11#define CPUID_MPIDR 5 12#define CPUID_MPIDR 5
12 13
14#ifdef CONFIG_CPU_V7M
15#define CPUID_EXT_PFR0 0x40
16#define CPUID_EXT_PFR1 0x44
17#define CPUID_EXT_DFR0 0x48
18#define CPUID_EXT_AFR0 0x4c
19#define CPUID_EXT_MMFR0 0x50
20#define CPUID_EXT_MMFR1 0x54
21#define CPUID_EXT_MMFR2 0x58
22#define CPUID_EXT_MMFR3 0x5c
23#define CPUID_EXT_ISAR0 0x60
24#define CPUID_EXT_ISAR1 0x64
25#define CPUID_EXT_ISAR2 0x68
26#define CPUID_EXT_ISAR3 0x6c
27#define CPUID_EXT_ISAR4 0x70
28#define CPUID_EXT_ISAR5 0x74
29#else
13#define CPUID_EXT_PFR0 "c1, 0" 30#define CPUID_EXT_PFR0 "c1, 0"
14#define CPUID_EXT_PFR1 "c1, 1" 31#define CPUID_EXT_PFR1 "c1, 1"
15#define CPUID_EXT_DFR0 "c1, 2" 32#define CPUID_EXT_DFR0 "c1, 2"
@@ -24,6 +41,7 @@
24#define CPUID_EXT_ISAR3 "c2, 3" 41#define CPUID_EXT_ISAR3 "c2, 3"
25#define CPUID_EXT_ISAR4 "c2, 4" 42#define CPUID_EXT_ISAR4 "c2, 4"
26#define CPUID_EXT_ISAR5 "c2, 5" 43#define CPUID_EXT_ISAR5 "c2, 5"
44#endif
27 45
28#define MPIDR_SMP_BITMASK (0x3 << 30) 46#define MPIDR_SMP_BITMASK (0x3 << 30)
29#define MPIDR_SMP_VALUE (0x2 << 30) 47#define MPIDR_SMP_VALUE (0x2 << 30)
@@ -81,7 +99,23 @@ extern unsigned int processor_id;
81 __val; \ 99 __val; \
82 }) 100 })
83 101
84#else /* ifdef CONFIG_CPU_CP15 */ 102#elif defined(CONFIG_CPU_V7M)
103
104#include <asm/io.h>
105#include <asm/v7m.h>
106
107#define read_cpuid(reg) \
108 ({ \
109 WARN_ON_ONCE(1); \
110 0; \
111 })
112
113static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset)
114{
115 return readl(BASEADDR_V7M_SCB + offset);
116}
117
118#else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */
85 119
86/* 120/*
87 * read_cpuid and read_cpuid_ext should only ever be called on machines that 121 * read_cpuid and read_cpuid_ext should only ever be called on machines that
@@ -108,7 +142,14 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
108 return read_cpuid(CPUID_ID); 142 return read_cpuid(CPUID_ID);
109} 143}
110 144
111#else /* ifdef CONFIG_CPU_CP15 */ 145#elif defined(CONFIG_CPU_V7M)
146
147static inline unsigned int __attribute_const__ read_cpuid_id(void)
148{
149 return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID);
150}
151
152#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
112 153
113static inline unsigned int __attribute_const__ read_cpuid_id(void) 154static inline unsigned int __attribute_const__ read_cpuid_id(void)
114{ 155{
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index ea289e1435e7..c81adc08b3fb 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -117,10 +117,37 @@
117# endif 117# endif
118#endif 118#endif
119 119
120#if defined(CONFIG_CPU_V7M)
121# ifdef _CACHE
122# define MULTI_CACHE 1
123# else
124# define _CACHE nop
125# endif
126#endif
127
120#if !defined(_CACHE) && !defined(MULTI_CACHE) 128#if !defined(_CACHE) && !defined(MULTI_CACHE)
121#error Unknown cache maintenance model 129#error Unknown cache maintenance model
122#endif 130#endif
123 131
132#ifndef __ASSEMBLER__
133extern inline void nop_flush_icache_all(void) { }
134extern inline void nop_flush_kern_cache_all(void) { }
135extern inline void nop_flush_kern_cache_louis(void) { }
136extern inline void nop_flush_user_cache_all(void) { }
137extern inline void nop_flush_user_cache_range(unsigned long a,
138 unsigned long b, unsigned int c) { }
139
140extern inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
141extern inline int nop_coherent_user_range(unsigned long a,
142 unsigned long b) { return 0; }
143extern inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
144
145extern inline void nop_dma_flush_range(const void *a, const void *b) { }
146
147extern inline void nop_dma_map_area(const void *s, size_t l, int f) { }
148extern inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
149#endif
150
124#ifndef MULTI_CACHE 151#ifndef MULTI_CACHE
125#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) 152#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
126#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) 153#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index b6e9f2c108b5..6b70f1b46a6e 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -95,6 +95,14 @@
95# endif 95# endif
96#endif 96#endif
97 97
98#ifdef CONFIG_CPU_ABRT_NOMMU
99# ifdef CPU_DABORT_HANDLER
100# define MULTI_DABORT 1
101# else
102# define CPU_DABORT_HANDLER nommu_early_abort
103# endif
104#endif
105
98#ifndef CPU_DABORT_HANDLER 106#ifndef CPU_DABORT_HANDLER
99#error Unknown data abort handler type 107#error Unknown data abort handler type
100#endif 108#endif
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index 8017e94acc5e..74a8b84f3cb1 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -230,6 +230,15 @@
230# endif 230# endif
231#endif 231#endif
232 232
233#ifdef CONFIG_CPU_V7M
234# ifdef CPU_NAME
235# undef MULTI_CPU
236# define MULTI_CPU
237# else
238# define CPU_NAME cpu_v7m
239# endif
240#endif
241
233#ifdef CONFIG_CPU_PJ4B 242#ifdef CONFIG_CPU_PJ4B
234# ifdef CPU_NAME 243# ifdef CPU_NAME
235# undef MULTI_CPU 244# undef MULTI_CPU
diff --git a/arch/arm/include/asm/hugetlb-3level.h b/arch/arm/include/asm/hugetlb-3level.h
new file mode 100644
index 000000000000..d4014fbe5ea3
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb-3level.h
@@ -0,0 +1,71 @@
1/*
2 * arch/arm/include/asm/hugetlb-3level.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * Based on arch/x86/include/asm/hugetlb.h.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef _ASM_ARM_HUGETLB_3LEVEL_H
23#define _ASM_ARM_HUGETLB_3LEVEL_H
24
25
26/*
27 * If our huge pte is non-zero then mark the valid bit.
28 * This allows pte_present(huge_ptep_get(ptep)) to return true for non-zero
29 * ptes.
30 * (The valid bit is automatically cleared by set_pte_at for PROT_NONE ptes).
31 */
32static inline pte_t huge_ptep_get(pte_t *ptep)
33{
34 pte_t retval = *ptep;
35 if (pte_val(retval))
36 pte_val(retval) |= L_PTE_VALID;
37 return retval;
38}
39
40static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
41 pte_t *ptep, pte_t pte)
42{
43 set_pte_at(mm, addr, ptep, pte);
44}
45
46static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
47 unsigned long addr, pte_t *ptep)
48{
49 ptep_clear_flush(vma, addr, ptep);
50}
51
52static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
53 unsigned long addr, pte_t *ptep)
54{
55 ptep_set_wrprotect(mm, addr, ptep);
56}
57
58static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
59 unsigned long addr, pte_t *ptep)
60{
61 return ptep_get_and_clear(mm, addr, ptep);
62}
63
64static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
65 unsigned long addr, pte_t *ptep,
66 pte_t pte, int dirty)
67{
68 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
69}
70
71#endif /* _ASM_ARM_HUGETLB_3LEVEL_H */
diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h
new file mode 100644
index 000000000000..1f1b1cd112f3
--- /dev/null
+++ b/arch/arm/include/asm/hugetlb.h
@@ -0,0 +1,84 @@
1/*
2 * arch/arm/include/asm/hugetlb.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * Based on arch/x86/include/asm/hugetlb.h
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#ifndef _ASM_ARM_HUGETLB_H
23#define _ASM_ARM_HUGETLB_H
24
25#include <asm/page.h>
26#include <asm-generic/hugetlb.h>
27
28#include <asm/hugetlb-3level.h>
29
30static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
31 unsigned long addr, unsigned long end,
32 unsigned long floor,
33 unsigned long ceiling)
34{
35 free_pgd_range(tlb, addr, end, floor, ceiling);
36}
37
38
39static inline int is_hugepage_only_range(struct mm_struct *mm,
40 unsigned long addr, unsigned long len)
41{
42 return 0;
43}
44
45static inline int prepare_hugepage_range(struct file *file,
46 unsigned long addr, unsigned long len)
47{
48 struct hstate *h = hstate_file(file);
49 if (len & ~huge_page_mask(h))
50 return -EINVAL;
51 if (addr & ~huge_page_mask(h))
52 return -EINVAL;
53 return 0;
54}
55
56static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
57{
58}
59
60static inline int huge_pte_none(pte_t pte)
61{
62 return pte_none(pte);
63}
64
65static inline pte_t huge_pte_wrprotect(pte_t pte)
66{
67 return pte_wrprotect(pte);
68}
69
70static inline int arch_prepare_hugepage(struct page *page)
71{
72 return 0;
73}
74
75static inline void arch_release_hugepage(struct page *page)
76{
77}
78
79static inline void arch_clear_hugepage_flags(struct page *page)
80{
81 clear_bit(PG_dcache_clean, &page->flags);
82}
83
84#endif /* _ASM_ARM_HUGETLB_H */
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
index 1e6cca55c750..3b763d6652a0 100644
--- a/arch/arm/include/asm/irqflags.h
+++ b/arch/arm/include/asm/irqflags.h
@@ -8,6 +8,16 @@
8/* 8/*
9 * CPU interrupt mask handling. 9 * CPU interrupt mask handling.
10 */ 10 */
11#ifdef CONFIG_CPU_V7M
12#define IRQMASK_REG_NAME_R "primask"
13#define IRQMASK_REG_NAME_W "primask"
14#define IRQMASK_I_BIT 1
15#else
16#define IRQMASK_REG_NAME_R "cpsr"
17#define IRQMASK_REG_NAME_W "cpsr_c"
18#define IRQMASK_I_BIT PSR_I_BIT
19#endif
20
11#if __LINUX_ARM_ARCH__ >= 6 21#if __LINUX_ARM_ARCH__ >= 6
12 22
13static inline unsigned long arch_local_irq_save(void) 23static inline unsigned long arch_local_irq_save(void)
@@ -15,7 +25,7 @@ static inline unsigned long arch_local_irq_save(void)
15 unsigned long flags; 25 unsigned long flags;
16 26
17 asm volatile( 27 asm volatile(
18 " mrs %0, cpsr @ arch_local_irq_save\n" 28 " mrs %0, " IRQMASK_REG_NAME_R " @ arch_local_irq_save\n"
19 " cpsid i" 29 " cpsid i"
20 : "=r" (flags) : : "memory", "cc"); 30 : "=r" (flags) : : "memory", "cc");
21 return flags; 31 return flags;
@@ -129,7 +139,7 @@ static inline unsigned long arch_local_save_flags(void)
129{ 139{
130 unsigned long flags; 140 unsigned long flags;
131 asm volatile( 141 asm volatile(
132 " mrs %0, cpsr @ local_save_flags" 142 " mrs %0, " IRQMASK_REG_NAME_R " @ local_save_flags"
133 : "=r" (flags) : : "memory", "cc"); 143 : "=r" (flags) : : "memory", "cc");
134 return flags; 144 return flags;
135} 145}
@@ -140,7 +150,7 @@ static inline unsigned long arch_local_save_flags(void)
140static inline void arch_local_irq_restore(unsigned long flags) 150static inline void arch_local_irq_restore(unsigned long flags)
141{ 151{
142 asm volatile( 152 asm volatile(
143 " msr cpsr_c, %0 @ local_irq_restore" 153 " msr " IRQMASK_REG_NAME_W ", %0 @ local_irq_restore"
144 : 154 :
145 : "r" (flags) 155 : "r" (flags)
146 : "memory", "cc"); 156 : "memory", "cc");
@@ -148,8 +158,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
148 158
149static inline int arch_irqs_disabled_flags(unsigned long flags) 159static inline int arch_irqs_disabled_flags(unsigned long flags)
150{ 160{
151 return flags & PSR_I_BIT; 161 return flags & IRQMASK_I_BIT;
152} 162}
153 163
154#endif 164#endif /* ifdef __KERNEL__ */
155#endif 165#endif /* ifndef __ASM_ARM_IRQFLAGS_H */
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 308ad7d6f98b..75bf07910b81 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -8,6 +8,8 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/types.h>
12
11#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
12 14
13struct tag; 15struct tag;
@@ -16,8 +18,10 @@ struct pt_regs;
16struct smp_operations; 18struct smp_operations;
17#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
18#define smp_ops(ops) (&(ops)) 20#define smp_ops(ops) (&(ops))
21#define smp_init_ops(ops) (&(ops))
19#else 22#else
20#define smp_ops(ops) (struct smp_operations *)NULL 23#define smp_ops(ops) (struct smp_operations *)NULL
24#define smp_init_ops(ops) (bool (*)(void))NULL
21#endif 25#endif
22 26
23struct machine_desc { 27struct machine_desc {
@@ -41,6 +45,7 @@ struct machine_desc {
41 unsigned char reserve_lp2 :1; /* never has lp2 */ 45 unsigned char reserve_lp2 :1; /* never has lp2 */
42 char restart_mode; /* default restart mode */ 46 char restart_mode; /* default restart mode */
43 struct smp_operations *smp; /* SMP operations */ 47 struct smp_operations *smp; /* SMP operations */
48 bool (*smp_init)(void);
44 void (*fixup)(struct tag *, char **, 49 void (*fixup)(struct tag *, char **,
45 struct meminfo *); 50 struct meminfo *);
46 void (*reserve)(void);/* reserve mem blocks */ 51 void (*reserve)(void);/* reserve mem blocks */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 57870ab313c5..584786f740f9 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -18,6 +18,8 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/sizes.h> 19#include <linux/sizes.h>
20 20
21#include <asm/cache.h>
22
21#ifdef CONFIG_NEED_MACH_MEMORY_H 23#ifdef CONFIG_NEED_MACH_MEMORY_H
22#include <mach/memory.h> 24#include <mach/memory.h>
23#endif 25#endif
@@ -141,6 +143,20 @@
141#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) 143#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
142#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) 144#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
143 145
146/*
147 * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed
148 * around in head.S and proc-*.S are shifted by this amount, in order to
149 * leave spare high bits for systems with physical address extension. This
150 * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
151 * gives us about 38-bits or so.
152 */
153#ifdef CONFIG_ARM_LPAE
154#define ARCH_PGD_SHIFT L1_CACHE_SHIFT
155#else
156#define ARCH_PGD_SHIFT 0
157#endif
158#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
159
144#ifndef __ASSEMBLY__ 160#ifndef __ASSEMBLY__
145 161
146/* 162/*
@@ -207,7 +223,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
207 * direct-mapped view. We assume this is the first page 223 * direct-mapped view. We assume this is the first page
208 * of RAM in the mem_map as well. 224 * of RAM in the mem_map as well.
209 */ 225 */
210#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) 226#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
211 227
212/* 228/*
213 * These are *only* valid on the kernel direct mapped RAM memory. 229 * These are *only* valid on the kernel direct mapped RAM memory.
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
new file mode 100644
index 000000000000..c3247cc2fe08
--- /dev/null
+++ b/arch/arm/include/asm/mpu.h
@@ -0,0 +1,76 @@
1#ifndef __ARM_MPU_H
2#define __ARM_MPU_H
3
4#ifdef CONFIG_ARM_MPU
5
6/* MPUIR layout */
7#define MPUIR_nU 1
8#define MPUIR_DREGION 8
9#define MPUIR_IREGION 16
10#define MPUIR_DREGION_SZMASK (0xFF << MPUIR_DREGION)
11#define MPUIR_IREGION_SZMASK (0xFF << MPUIR_IREGION)
12
13/* ID_MMFR0 data relevant to MPU */
14#define MMFR0_PMSA (0xF << 4)
15#define MMFR0_PMSAv7 (3 << 4)
16
17/* MPU D/I Size Register fields */
18#define MPU_RSR_SZ 1
19#define MPU_RSR_EN 0
20
21/* The D/I RSR value for an enabled region spanning the whole of memory */
22#define MPU_RSR_ALL_MEM 63
23
24/* Individual bits in the DR/IR ACR */
25#define MPU_ACR_XN (1 << 12)
26#define MPU_ACR_SHARED (1 << 2)
27
28/* C, B and TEX[2:0] bits only have semantic meanings when grouped */
29#define MPU_RGN_CACHEABLE 0xB
30#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
31#define MPU_RGN_STRONGLY_ORDERED 0
32
33/* Main region should only be shared for SMP */
34#ifdef CONFIG_SMP
35#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED)
36#else
37#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE
38#endif
39
40/* Access permission bits of ACR (only define those that we use)*/
41#define MPU_AP_PL1RW_PL0RW (0x3 << 8)
42#define MPU_AP_PL1RW_PL0R0 (0x2 << 8)
43#define MPU_AP_PL1RW_PL0NA (0x1 << 8)
44
45/* For minimal static MPU region configurations */
46#define MPU_PROBE_REGION 0
47#define MPU_BG_REGION 1
48#define MPU_RAM_REGION 2
49#define MPU_VECTORS_REGION 3
50
51/* Maximum number of regions Linux is interested in */
52#define MPU_MAX_REGIONS 16
53
54#define MPU_DATA_SIDE 0
55#define MPU_INSTR_SIDE 1
56
57#ifndef __ASSEMBLY__
58
59struct mpu_rgn {
60 /* Assume same attributes for d/i-side */
61 u32 drbar;
62 u32 drsr;
63 u32 dracr;
64};
65
66struct mpu_rgn_info {
67 u32 mpuir;
68 struct mpu_rgn rgns[MPU_MAX_REGIONS];
69};
70extern struct mpu_rgn_info mpu_rgn_info;
71
72#endif /* __ASSEMBLY__ */
73
74#endif /* CONFIG_ARM_MPU */
75
76#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 812a4944e783..6363f3d1d505 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -13,7 +13,7 @@
13/* PAGE_SHIFT determines the page size */ 13/* PAGE_SHIFT determines the page size */
14#define PAGE_SHIFT 12 14#define PAGE_SHIFT 12
15#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 15#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
16#define PAGE_MASK (~(PAGE_SIZE-1)) 16#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19 19
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 18f5cef82ad5..626989fec4d3 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -30,6 +30,7 @@
30#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) 30#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
31#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) 31#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
32#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) 32#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
33#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
33#define PMD_BIT4 (_AT(pmdval_t, 0)) 34#define PMD_BIT4 (_AT(pmdval_t, 0))
34#define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) 35#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
35#define PMD_APTABLE_SHIFT (61) 36#define PMD_APTABLE_SHIFT (61)
@@ -41,6 +42,8 @@
41 */ 42 */
42#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) 43#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
43#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) 44#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
45#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
46#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
44#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 47#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
45#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) 48#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
46#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) 49#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
@@ -66,6 +69,7 @@
66#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) 69#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
67#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) 70#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
68#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) 71#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
72#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
69#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */ 73#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
70#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */ 74#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
71#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ 75#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
@@ -79,4 +83,24 @@
79#define PHYS_MASK_SHIFT (40) 83#define PHYS_MASK_SHIFT (40)
80#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) 84#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
81 85
86/*
87 * TTBR0/TTBR1 split (PAGE_OFFSET):
88 * 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
89 * 0x80000000: T0SZ = 0, T1SZ = 1
90 * 0xc0000000: T0SZ = 0, T1SZ = 2
91 *
92 * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
93 * booting secondary CPUs would end up using TTBR1 for the identity
94 * mapping set up in TTBR0.
95 */
96#if defined CONFIG_VMSPLIT_2G
97#define TTBR1_OFFSET 16 /* skip two L1 entries */
98#elif defined CONFIG_VMSPLIT_3G
99#define TTBR1_OFFSET (4096 * (1 + 3)) /* only L2, skip pgd + 3*pmd */
100#else
101#define TTBR1_OFFSET 0
102#endif
103
104#define TTBR1_SIZE (((PAGE_OFFSET >> 30) - 1) << 16)
105
82#endif 106#endif
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 86b8fe398b95..5689c18c85f5 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -33,7 +33,7 @@
33#define PTRS_PER_PMD 512 33#define PTRS_PER_PMD 512
34#define PTRS_PER_PGD 4 34#define PTRS_PER_PGD 4
35 35
36#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) 36#define PTE_HWTABLE_PTRS (0)
37#define PTE_HWTABLE_OFF (0) 37#define PTE_HWTABLE_OFF (0)
38#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) 38#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
39 39
@@ -48,20 +48,28 @@
48#define PMD_SHIFT 21 48#define PMD_SHIFT 21
49 49
50#define PMD_SIZE (1UL << PMD_SHIFT) 50#define PMD_SIZE (1UL << PMD_SHIFT)
51#define PMD_MASK (~(PMD_SIZE-1)) 51#define PMD_MASK (~((1 << PMD_SHIFT) - 1))
52#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 52#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
53#define PGDIR_MASK (~(PGDIR_SIZE-1)) 53#define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1))
54 54
55/* 55/*
56 * section address mask and size definitions. 56 * section address mask and size definitions.
57 */ 57 */
58#define SECTION_SHIFT 21 58#define SECTION_SHIFT 21
59#define SECTION_SIZE (1UL << SECTION_SHIFT) 59#define SECTION_SIZE (1UL << SECTION_SHIFT)
60#define SECTION_MASK (~(SECTION_SIZE-1)) 60#define SECTION_MASK (~((1 << SECTION_SHIFT) - 1))
61 61
62#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) 62#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
63 63
64/* 64/*
65 * Hugetlb definitions.
66 */
67#define HPAGE_SHIFT PMD_SHIFT
68#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
69#define HPAGE_MASK (~(HPAGE_SIZE - 1))
70#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
71
72/*
65 * "Linux" PTE definitions for LPAE. 73 * "Linux" PTE definitions for LPAE.
66 * 74 *
67 * These bits overlap with the hardware bits but the naming is preserved for 75 * These bits overlap with the hardware bits but the naming is preserved for
@@ -79,6 +87,11 @@
79#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ 87#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
80#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ 88#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
81 89
90#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
91#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
92#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
93#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
94
82/* 95/*
83 * To be used in assembly code with the upper page attributes. 96 * To be used in assembly code with the upper page attributes.
84 */ 97 */
@@ -166,8 +179,83 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
166 clean_pmd_entry(pmdp); \ 179 clean_pmd_entry(pmdp); \
167 } while (0) 180 } while (0)
168 181
182/*
183 * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
184 * that are written to a page table but not for ptes created with mk_pte.
185 *
186 * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
187 * hugetlb_cow, where it is compared with an entry in a page table.
188 * This comparison test fails erroneously leading ultimately to a memory leak.
189 *
190 * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
191 * present before running the comparison.
192 */
193#define __HAVE_ARCH_PTE_SAME
194#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \
195 : pte_val(pte_a)) \
196 == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \
197 : pte_val(pte_b)))
198
169#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) 199#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
170 200
201#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
202#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
203
204#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF)
205
206#define __HAVE_ARCH_PMD_WRITE
207#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
208
209#ifdef CONFIG_TRANSPARENT_HUGEPAGE
210#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
211#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
212#endif
213
214#define PMD_BIT_FUNC(fn,op) \
215static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
216
217PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY);
218PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
219PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
220PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY);
221PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY);
222PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
223
224#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
225
226#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
227#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
228#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
229
230/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
231#define pmd_mknotpresent(pmd) (__pmd(0))
232
233static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
234{
235 const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY |
236 PMD_SECT_VALID | PMD_SECT_NONE;
237 pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
238 return pmd;
239}
240
241static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
242 pmd_t *pmdp, pmd_t pmd)
243{
244 BUG_ON(addr >= TASK_SIZE);
245
246 /* create a faulting entry if PROT_NONE protected */
247 if (pmd_val(pmd) & PMD_SECT_NONE)
248 pmd_val(pmd) &= ~PMD_SECT_VALID;
249
250 *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
251 flush_pmd_entry(pmdp);
252}
253
254static inline int has_transparent_hugepage(void)
255{
256 return 1;
257}
258
171#endif /* __ASSEMBLY__ */ 259#endif /* __ASSEMBLY__ */
172 260
173#endif /* _ASM_PGTABLE_3LEVEL_H */ 261#endif /* _ASM_PGTABLE_3LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 9bcd262a9008..eaedce7b7e3a 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -24,6 +24,9 @@
24#include <asm/memory.h> 24#include <asm/memory.h>
25#include <asm/pgtable-hwdef.h> 25#include <asm/pgtable-hwdef.h>
26 26
27
28#include <asm/tlbflush.h>
29
27#ifdef CONFIG_ARM_LPAE 30#ifdef CONFIG_ARM_LPAE
28#include <asm/pgtable-3level.h> 31#include <asm/pgtable-3level.h>
29#else 32#else
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index f3628fb3d2b3..5324c1112f3a 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -60,7 +60,7 @@ extern struct processor {
60 /* 60 /*
61 * Set the page table 61 * Set the page table
62 */ 62 */
63 void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); 63 void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
64 /* 64 /*
65 * Set a possibly extended PTE. Non-extended PTEs should 65 * Set a possibly extended PTE. Non-extended PTEs should
66 * ignore 'ext'. 66 * ignore 'ext'.
@@ -82,7 +82,7 @@ extern void cpu_proc_init(void);
82extern void cpu_proc_fin(void); 82extern void cpu_proc_fin(void);
83extern int cpu_do_idle(void); 83extern int cpu_do_idle(void);
84extern void cpu_dcache_clean_area(void *, int); 84extern void cpu_dcache_clean_area(void *, int);
85extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); 85extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
86#ifdef CONFIG_ARM_LPAE 86#ifdef CONFIG_ARM_LPAE
87extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); 87extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
88#else 88#else
@@ -116,13 +116,25 @@ extern void cpu_resume(void);
116#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) 116#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
117 117
118#ifdef CONFIG_ARM_LPAE 118#ifdef CONFIG_ARM_LPAE
119
120#define cpu_get_ttbr(nr) \
121 ({ \
122 u64 ttbr; \
123 __asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \
124 : "=r" (ttbr)); \
125 ttbr; \
126 })
127
128#define cpu_set_ttbr(nr, val) \
129 do { \
130 u64 ttbr = val; \
131 __asm__("mcrr p15, " #nr ", %Q0, %R0, c2" \
132 : : "r" (ttbr)); \
133 } while (0)
134
119#define cpu_get_pgd() \ 135#define cpu_get_pgd() \
120 ({ \ 136 ({ \
121 unsigned long pg, pg2; \ 137 u64 pg = cpu_get_ttbr(0); \
122 __asm__("mrrc p15, 0, %0, %1, c2" \
123 : "=r" (pg), "=r" (pg2) \
124 : \
125 : "cc"); \
126 pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ 138 pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \
127 (pgd_t *)phys_to_virt(pg); \ 139 (pgd_t *)phys_to_virt(pg); \
128 }) 140 })
@@ -137,6 +149,10 @@ extern void cpu_resume(void);
137 }) 149 })
138#endif 150#endif
139 151
152#else /*!CONFIG_MMU */
153
154#define cpu_switch_mm(pgd,mm) { }
155
140#endif 156#endif
141 157
142#endif /* __ASSEMBLY__ */ 158#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index ce0dbe7c1625..c4ae171850f8 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -32,5 +32,14 @@ struct psci_operations {
32}; 32};
33 33
34extern struct psci_operations psci_ops; 34extern struct psci_operations psci_ops;
35extern struct smp_operations psci_smp_ops;
36
37#ifdef CONFIG_ARM_PSCI
38void psci_init(void);
39bool psci_smp_available(void);
40#else
41static inline void psci_init(void) { }
42static inline bool psci_smp_available(void) { return false; }
43#endif
35 44
36#endif /* __ASM_ARM_PSCI_H */ 45#endif /* __ASM_ARM_PSCI_H */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 3d52ee1bfb31..04c99f36ff7f 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -45,6 +45,7 @@ struct pt_regs {
45 */ 45 */
46static inline int valid_user_regs(struct pt_regs *regs) 46static inline int valid_user_regs(struct pt_regs *regs)
47{ 47{
48#ifndef CONFIG_CPU_V7M
48 unsigned long mode = regs->ARM_cpsr & MODE_MASK; 49 unsigned long mode = regs->ARM_cpsr & MODE_MASK;
49 50
50 /* 51 /*
@@ -67,6 +68,9 @@ static inline int valid_user_regs(struct pt_regs *regs)
67 regs->ARM_cpsr |= USR_MODE; 68 regs->ARM_cpsr |= USR_MODE;
68 69
69 return 0; 70 return 0;
71#else /* ifndef CONFIG_CPU_V7M */
72 return 1;
73#endif
70} 74}
71 75
72static inline long regs_return_value(struct pt_regs *regs) 76static inline long regs_return_value(struct pt_regs *regs)
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index d3a22bebe6ce..a8cae71caceb 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -65,7 +65,10 @@ asmlinkage void secondary_start_kernel(void);
65 * Initial data for bringing up a secondary CPU. 65 * Initial data for bringing up a secondary CPU.
66 */ 66 */
67struct secondary_data { 67struct secondary_data {
68 unsigned long pgdir; 68 union {
69 unsigned long mpu_rgn_szr;
70 unsigned long pgdir;
71 };
69 unsigned long swapper_pg_dir; 72 unsigned long swapper_pg_dir;
70 void *stack; 73 void *stack;
71}; 74};
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index e78983202737..6462a721ebd4 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -26,6 +26,9 @@ static inline bool is_smp(void)
26} 26}
27 27
28/* all SMP configurations have the extended CPUID registers */ 28/* all SMP configurations have the extended CPUID registers */
29#ifndef CONFIG_MMU
30#define tlb_ops_need_broadcast() 0
31#else
29static inline int tlb_ops_need_broadcast(void) 32static inline int tlb_ops_need_broadcast(void)
30{ 33{
31 if (!is_smp()) 34 if (!is_smp())
@@ -33,6 +36,7 @@ static inline int tlb_ops_need_broadcast(void)
33 36
34 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; 37 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
35} 38}
39#endif
36 40
37#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7 41#if !defined(CONFIG_SMP) || __LINUX_ARM_ARCH__ >= 7
38#define cache_ops_need_broadcast() 0 42#define cache_ops_need_broadcast() 0
@@ -66,4 +70,22 @@ static inline int get_logical_index(u32 mpidr)
66 return -EINVAL; 70 return -EINVAL;
67} 71}
68 72
73/*
74 * NOTE ! Assembly code relies on the following
75 * structure memory layout in order to carry out load
76 * multiple from its base address. For more
77 * information check arch/arm/kernel/sleep.S
78 */
79struct mpidr_hash {
80 u32 mask; /* used by sleep.S */
81 u32 shift_aff[3]; /* used by sleep.S */
82 u32 bits;
83};
84
85extern struct mpidr_hash mpidr_hash;
86
87static inline u32 mpidr_hash_size(void)
88{
89 return 1 << mpidr_hash.bits;
90}
69#endif 91#endif
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
index 1c0a551ae375..cd20029bcd94 100644
--- a/arch/arm/include/asm/suspend.h
+++ b/arch/arm/include/asm/suspend.h
@@ -1,6 +1,11 @@
1#ifndef __ASM_ARM_SUSPEND_H 1#ifndef __ASM_ARM_SUSPEND_H
2#define __ASM_ARM_SUSPEND_H 2#define __ASM_ARM_SUSPEND_H
3 3
4struct sleep_save_sp {
5 u32 *save_ptr_stash;
6 u32 save_ptr_stash_phys;
7};
8
4extern void cpu_resume(void); 9extern void cpu_resume(void);
5extern int cpu_suspend(unsigned long, int (*)(unsigned long)); 10extern int cpu_suspend(unsigned long, int (*)(unsigned long));
6 11
diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h
index dfd386d0c022..720ea0320a6d 100644
--- a/arch/arm/include/asm/system_info.h
+++ b/arch/arm/include/asm/system_info.h
@@ -11,6 +11,7 @@
11#define CPU_ARCH_ARMv5TEJ 7 11#define CPU_ARCH_ARMv5TEJ 7
12#define CPU_ARCH_ARMv6 8 12#define CPU_ARCH_ARMv6 8
13#define CPU_ARCH_ARMv7 9 13#define CPU_ARCH_ARMv7 9
14#define CPU_ARCH_ARMv7M 10
14 15
15#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
16 17
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index bdf2b8458ec1..46e7cfb3e721 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -204,6 +204,12 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
204#endif 204#endif
205} 205}
206 206
207static inline void
208tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
209{
210 tlb_add_flush(tlb, addr);
211}
212
207#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) 213#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
208#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) 214#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
209#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) 215#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index a3625d141c1d..fdbb9e369745 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -535,8 +535,33 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
535} 535}
536#endif 536#endif
537 537
538#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
539
538#endif 540#endif
539 541
540#endif /* CONFIG_MMU */ 542#elif defined(CONFIG_SMP) /* !CONFIG_MMU */
543
544#ifndef __ASSEMBLY__
545
546#include <linux/mm_types.h>
547
548static inline void local_flush_tlb_all(void) { }
549static inline void local_flush_tlb_mm(struct mm_struct *mm) { }
550static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { }
551static inline void local_flush_tlb_kernel_page(unsigned long kaddr) { }
552static inline void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { }
553static inline void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { }
554static inline void local_flush_bp_all(void) { }
555
556extern void flush_tlb_all(void);
557extern void flush_tlb_mm(struct mm_struct *mm);
558extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
559extern void flush_tlb_kernel_page(unsigned long kaddr);
560extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
561extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
562extern void flush_bp_all(void);
563#endif /* __ASSEMBLY__ */
564
565#endif
541 566
542#endif 567#endif
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
new file mode 100644
index 000000000000..fa88d09fa3d9
--- /dev/null
+++ b/arch/arm/include/asm/v7m.h
@@ -0,0 +1,44 @@
1/*
2 * Common defines for v7m cpus
3 */
4#define V7M_SCS_ICTR IOMEM(0xe000e004)
5#define V7M_SCS_ICTR_INTLINESNUM_MASK 0x0000000f
6
7#define BASEADDR_V7M_SCB IOMEM(0xe000ed00)
8
9#define V7M_SCB_CPUID 0x00
10
11#define V7M_SCB_ICSR 0x04
12#define V7M_SCB_ICSR_PENDSVSET (1 << 28)
13#define V7M_SCB_ICSR_PENDSVCLR (1 << 27)
14#define V7M_SCB_ICSR_RETTOBASE (1 << 11)
15
16#define V7M_SCB_VTOR 0x08
17
18#define V7M_SCB_SCR 0x10
19#define V7M_SCB_SCR_SLEEPDEEP (1 << 2)
20
21#define V7M_SCB_CCR 0x14
22#define V7M_SCB_CCR_STKALIGN (1 << 9)
23
24#define V7M_SCB_SHPR2 0x1c
25#define V7M_SCB_SHPR3 0x20
26
27#define V7M_SCB_SHCSR 0x24
28#define V7M_SCB_SHCSR_USGFAULTENA (1 << 18)
29#define V7M_SCB_SHCSR_BUSFAULTENA (1 << 17)
30#define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16)
31
32#define V7M_xPSR_FRAMEPTRALIGN 0x00000200
33#define V7M_xPSR_EXCEPTIONNO 0x000001ff
34
35/*
36 * When branching to an address that has bits [31:28] == 0xf an exception return
37 * occurs. Bits [27:5] are reserved (SBOP). If the processor implements the FP
38 * extension Bit [4] defines if the exception frame has space allocated for FP
39 * state information, SBOP otherwise. Bit [3] defines the mode that is returned
40 * to (0 -> handler mode; 1 -> thread mode). Bit [2] defines which sp is used
41 * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
42 */
43#define EXC_RET_STACK_MASK 0x00000004
44#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
diff --git a/arch/arm/include/debug/vexpress.S b/arch/arm/include/debug/vexpress.S
index dc8e882a6257..acafb229e2b6 100644
--- a/arch/arm/include/debug/vexpress.S
+++ b/arch/arm/include/debug/vexpress.S
@@ -16,6 +16,8 @@
16#define DEBUG_LL_PHYS_BASE_RS1 0x1c000000 16#define DEBUG_LL_PHYS_BASE_RS1 0x1c000000
17#define DEBUG_LL_UART_OFFSET_RS1 0x00090000 17#define DEBUG_LL_UART_OFFSET_RS1 0x00090000
18 18
19#define DEBUG_LL_UART_PHYS_CRX 0xb0090000
20
19#define DEBUG_LL_VIRT_BASE 0xf8000000 21#define DEBUG_LL_VIRT_BASE 0xf8000000
20 22
21#if defined(CONFIG_DEBUG_VEXPRESS_UART0_DETECT) 23#if defined(CONFIG_DEBUG_VEXPRESS_UART0_DETECT)
@@ -67,6 +69,14 @@
67 69
68#include <asm/hardware/debug-pl01x.S> 70#include <asm/hardware/debug-pl01x.S>
69 71
72#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CRX)
73
74 .macro addruart,rp,tmp,tmp2
75 ldr \rp, =DEBUG_LL_UART_PHYS_CRX
76 .endm
77
78#include <asm/hardware/debug-pl01x.S>
79
70#else /* CONFIG_DEBUG_LL_UART_NONE */ 80#else /* CONFIG_DEBUG_LL_UART_NONE */
71 81
72 .macro addruart, rp, rv, tmp 82 .macro addruart, rp, rv, tmp
diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h
index 3688fd15a32d..6d34d080372a 100644
--- a/arch/arm/include/uapi/asm/hwcap.h
+++ b/arch/arm/include/uapi/asm/hwcap.h
@@ -25,6 +25,6 @@
25#define HWCAP_IDIVT (1 << 18) 25#define HWCAP_IDIVT (1 << 18)
26#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ 26#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
27#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) 27#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
28 28#define HWCAP_LPAE (1 << 20)
29 29
30#endif /* _UAPI__ASMARM_HWCAP_H */ 30#endif /* _UAPI__ASMARM_HWCAP_H */
diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
index 96ee0929790f..5af0ed1b825a 100644
--- a/arch/arm/include/uapi/asm/ptrace.h
+++ b/arch/arm/include/uapi/asm/ptrace.h
@@ -34,28 +34,47 @@
34 34
35/* 35/*
36 * PSR bits 36 * PSR bits
37 * Note on V7M there is no mode contained in the PSR
37 */ 38 */
38#define USR26_MODE 0x00000000 39#define USR26_MODE 0x00000000
39#define FIQ26_MODE 0x00000001 40#define FIQ26_MODE 0x00000001
40#define IRQ26_MODE 0x00000002 41#define IRQ26_MODE 0x00000002
41#define SVC26_MODE 0x00000003 42#define SVC26_MODE 0x00000003
43#if defined(__KERNEL__) && defined(CONFIG_CPU_V7M)
44/*
45 * Use 0 here to get code right that creates a userspace
46 * or kernel space thread.
47 */
48#define USR_MODE 0x00000000
49#define SVC_MODE 0x00000000
50#else
42#define USR_MODE 0x00000010 51#define USR_MODE 0x00000010
52#define SVC_MODE 0x00000013
53#endif
43#define FIQ_MODE 0x00000011 54#define FIQ_MODE 0x00000011
44#define IRQ_MODE 0x00000012 55#define IRQ_MODE 0x00000012
45#define SVC_MODE 0x00000013
46#define ABT_MODE 0x00000017 56#define ABT_MODE 0x00000017
47#define HYP_MODE 0x0000001a 57#define HYP_MODE 0x0000001a
48#define UND_MODE 0x0000001b 58#define UND_MODE 0x0000001b
49#define SYSTEM_MODE 0x0000001f 59#define SYSTEM_MODE 0x0000001f
50#define MODE32_BIT 0x00000010 60#define MODE32_BIT 0x00000010
51#define MODE_MASK 0x0000001f 61#define MODE_MASK 0x0000001f
52#define PSR_T_BIT 0x00000020 62
53#define PSR_F_BIT 0x00000040 63#define V4_PSR_T_BIT 0x00000020 /* >= V4T, but not V7M */
54#define PSR_I_BIT 0x00000080 64#define V7M_PSR_T_BIT 0x01000000
55#define PSR_A_BIT 0x00000100 65#if defined(__KERNEL__) && defined(CONFIG_CPU_V7M)
56#define PSR_E_BIT 0x00000200 66#define PSR_T_BIT V7M_PSR_T_BIT
57#define PSR_J_BIT 0x01000000 67#else
58#define PSR_Q_BIT 0x08000000 68/* for compatibility */
69#define PSR_T_BIT V4_PSR_T_BIT
70#endif
71
72#define PSR_F_BIT 0x00000040 /* >= V4, but not V7M */
73#define PSR_I_BIT 0x00000080 /* >= V4, but not V7M */
74#define PSR_A_BIT 0x00000100 /* >= V6, but not V7M */
75#define PSR_E_BIT 0x00000200 /* >= V6, but not V7M */
76#define PSR_J_BIT 0x01000000 /* >= V5J, but not V7M */
77#define PSR_Q_BIT 0x08000000 /* >= V5E, including V7M */
59#define PSR_V_BIT 0x10000000 78#define PSR_V_BIT 0x10000000
60#define PSR_C_BIT 0x20000000 79#define PSR_C_BIT 0x20000000
61#define PSR_Z_BIT 0x40000000 80#define PSR_Z_BIT 0x40000000
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5f3338eacad2..fccfbdb03df1 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -15,7 +15,7 @@ CFLAGS_REMOVE_return_address.o = -pg
15 15
16# Object file lists. 16# Object file lists.
17 17
18obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \ 18obj-y := elf.o entry-common.o irq.o opcodes.o \
19 process.o ptrace.o return_address.o sched_clock.o \ 19 process.o ptrace.o return_address.o sched_clock.o \
20 setup.o signal.o stacktrace.o sys_arm.o time.o traps.o 20 setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
21 21
@@ -23,6 +23,12 @@ obj-$(CONFIG_ATAGS) += atags_parse.o
23obj-$(CONFIG_ATAGS_PROC) += atags_proc.o 23obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
24obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o 24obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
25 25
26ifeq ($(CONFIG_CPU_V7M),y)
27obj-y += entry-v7m.o
28else
29obj-y += entry-armv.o
30endif
31
26obj-$(CONFIG_OC_ETM) += etm.o 32obj-$(CONFIG_OC_ETM) += etm.o
27obj-$(CONFIG_CPU_IDLE) += cpuidle.o 33obj-$(CONFIG_CPU_IDLE) += cpuidle.o
28obj-$(CONFIG_ISA_DMA_API) += dma.o 34obj-$(CONFIG_ISA_DMA_API) += dma.o
@@ -32,7 +38,10 @@ obj-$(CONFIG_ARTHUR) += arthur.o
32obj-$(CONFIG_ISA_DMA) += dma-isa.o 38obj-$(CONFIG_ISA_DMA) += dma-isa.o
33obj-$(CONFIG_PCI) += bios32.o isa.o 39obj-$(CONFIG_PCI) += bios32.o isa.o
34obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o 40obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
35obj-$(CONFIG_SMP) += smp.o smp_tlb.o 41obj-$(CONFIG_SMP) += smp.o
42ifdef CONFIG_MMU
43obj-$(CONFIG_SMP) += smp_tlb.o
44endif
36obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 45obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
37obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o 46obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
38obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o 47obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o
@@ -82,6 +91,9 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
82obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 91obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
83 92
84obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o 93obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
85obj-$(CONFIG_ARM_PSCI) += psci.o 94ifeq ($(CONFIG_ARM_PSCI),y)
95obj-y += psci.o
96obj-$(CONFIG_SMP) += psci_smp.o
97endif
86 98
87extra-y := $(head-y) vmlinux.lds 99extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index ee68cce6b48e..ded041711beb 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -23,6 +23,7 @@
23#include <asm/thread_info.h> 23#include <asm/thread_info.h>
24#include <asm/memory.h> 24#include <asm/memory.h>
25#include <asm/procinfo.h> 25#include <asm/procinfo.h>
26#include <asm/suspend.h>
26#include <asm/hardware/cache-l2x0.h> 27#include <asm/hardware/cache-l2x0.h>
27#include <linux/kbuild.h> 28#include <linux/kbuild.h>
28 29
@@ -145,6 +146,11 @@ int main(void)
145#ifdef MULTI_CACHE 146#ifdef MULTI_CACHE
146 DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); 147 DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all));
147#endif 148#endif
149#ifdef CONFIG_ARM_CPU_SUSPEND
150 DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
151 DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
152 DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
153#endif
148 BLANK(); 154 BLANK();
149 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); 155 DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
150 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); 156 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 4bc816a74a2e..94104bf69719 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -350,6 +350,9 @@ ENDPROC(ftrace_stub)
350 350
351 .align 5 351 .align 5
352ENTRY(vector_swi) 352ENTRY(vector_swi)
353#ifdef CONFIG_CPU_V7M
354 v7m_exception_entry
355#else
353 sub sp, sp, #S_FRAME_SIZE 356 sub sp, sp, #S_FRAME_SIZE
354 stmia sp, {r0 - r12} @ Calling r0 - r12 357 stmia sp, {r0 - r12} @ Calling r0 - r12
355 ARM( add r8, sp, #S_PC ) 358 ARM( add r8, sp, #S_PC )
@@ -360,6 +363,7 @@ ENTRY(vector_swi)
360 str lr, [sp, #S_PC] @ Save calling PC 363 str lr, [sp, #S_PC] @ Save calling PC
361 str r8, [sp, #S_PSR] @ Save CPSR 364 str r8, [sp, #S_PSR] @ Save CPSR
362 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 365 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
366#endif
363 zero_fp 367 zero_fp
364 368
365#ifdef CONFIG_ALIGNMENT_TRAP 369#ifdef CONFIG_ALIGNMENT_TRAP
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 160f3376ba6d..de23a9beed13 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -5,6 +5,7 @@
5#include <asm/asm-offsets.h> 5#include <asm/asm-offsets.h>
6#include <asm/errno.h> 6#include <asm/errno.h>
7#include <asm/thread_info.h> 7#include <asm/thread_info.h>
8#include <asm/v7m.h>
8 9
9@ Bad Abort numbers 10@ Bad Abort numbers
10@ ----------------- 11@ -----------------
@@ -44,6 +45,116 @@
44#endif 45#endif
45 .endm 46 .endm
46 47
48#ifdef CONFIG_CPU_V7M
49/*
50 * ARMv7-M exception entry/exit macros.
51 *
52 * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
53 * automatically saved on the current stack (32 words) before
54 * switching to the exception stack (SP_main).
55 *
56 * If exception is taken while in user mode, SP_main is
57 * empty. Otherwise, SP_main is aligned to 64 bit automatically
58 * (CCR.STKALIGN set).
59 *
60 * Linux assumes that the interrupts are disabled when entering an
61 * exception handler and it may BUG if this is not the case. Interrupts
62 * are disabled during entry and reenabled in the exit macro.
63 *
64 * v7m_exception_slow_exit is used when returning from SVC or PendSV.
65 * When returning to kernel mode, we don't return from exception.
66 */
67 .macro v7m_exception_entry
68 @ determine the location of the registers saved by the core during
69 @ exception entry. Depending on the mode the cpu was in when the
70 @ exception happend that is either on the main or the process stack.
71 @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
72 @ was used.
73 tst lr, #EXC_RET_STACK_MASK
74 mrsne r12, psp
75 moveq r12, sp
76
77 @ we cannot rely on r0-r3 and r12 matching the value saved in the
78 @ exception frame because of tail-chaining. So these have to be
79 @ reloaded.
80 ldmia r12!, {r0-r3}
81
82 @ Linux expects to have irqs off. Do it here before taking stack space
83 cpsid i
84
85 sub sp, #S_FRAME_SIZE-S_IP
86 stmdb sp!, {r0-r11}
87
88 @ load saved r12, lr, return address and xPSR.
89 @ r0-r7 are used for signals and never touched from now on. Clobbering
90 @ r8-r12 is OK.
91 mov r9, r12
92 ldmia r9!, {r8, r10-r12}
93
94 @ calculate the original stack pointer value.
95 @ r9 currently points to the memory location just above the auto saved
96 @ xPSR.
97 @ The cpu might automatically 8-byte align the stack. Bit 9
98 @ of the saved xPSR specifies if stack aligning took place. In this case
99 @ another 32-bit value is included in the stack.
100
101 tst r12, V7M_xPSR_FRAMEPTRALIGN
102 addne r9, r9, #4
103
104 @ store saved r12 using str to have a register to hold the base for stm
105 str r8, [sp, #S_IP]
106 add r8, sp, #S_SP
107 @ store r13-r15, xPSR
108 stmia r8!, {r9-r12}
109 @ store old_r0
110 str r0, [r8]
111 .endm
112
113 /*
114 * PENDSV and SVCALL are configured to have the same exception
115 * priorities. As a kernel thread runs at SVCALL execution priority it
116 * can never be preempted and so we will never have to return to a
117 * kernel thread here.
118 */
119 .macro v7m_exception_slow_exit ret_r0
120 cpsid i
121 ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
122
123 @ read original r12, sp, lr, pc and xPSR
124 add r12, sp, #S_IP
125 ldmia r12, {r1-r5}
126
127 @ an exception frame is always 8-byte aligned. To tell the hardware if
128 @ the sp to be restored is aligned or not set bit 9 of the saved xPSR
129 @ accordingly.
130 tst r2, #4
131 subne r2, r2, #4
132 orrne r5, V7M_xPSR_FRAMEPTRALIGN
133 biceq r5, V7M_xPSR_FRAMEPTRALIGN
134
135 @ write basic exception frame
136 stmdb r2!, {r1, r3-r5}
137 ldmia sp, {r1, r3-r5}
138 .if \ret_r0
139 stmdb r2!, {r0, r3-r5}
140 .else
141 stmdb r2!, {r1, r3-r5}
142 .endif
143
144 @ restore process sp
145 msr psp, r2
146
147 @ restore original r4-r11
148 ldmia sp!, {r0-r11}
149
150 @ restore main sp
151 add sp, sp, #S_FRAME_SIZE-S_IP
152
153 cpsie i
154 bx lr
155 .endm
156#endif /* CONFIG_CPU_V7M */
157
47 @ 158 @
48 @ Store/load the USER SP and LR registers by switching to the SYS 159 @ Store/load the USER SP and LR registers by switching to the SYS
49 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not 160 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
@@ -165,6 +276,18 @@
165 rfeia sp! 276 rfeia sp!
166 .endm 277 .endm
167 278
279#ifdef CONFIG_CPU_V7M
280 /*
281 * Note we don't need to do clrex here as clearing the local monitor is
282 * part of each exception entry and exit sequence.
283 */
284 .macro restore_user_regs, fast = 0, offset = 0
285 .if \offset
286 add sp, #\offset
287 .endif
288 v7m_exception_slow_exit ret_r0 = \fast
289 .endm
290#else /* ifdef CONFIG_CPU_V7M */
168 .macro restore_user_regs, fast = 0, offset = 0 291 .macro restore_user_regs, fast = 0, offset = 0
169 clrex @ clear the exclusive monitor 292 clrex @ clear the exclusive monitor
170 mov r2, sp 293 mov r2, sp
@@ -181,6 +304,7 @@
181 add sp, sp, #S_FRAME_SIZE - S_SP 304 add sp, sp, #S_FRAME_SIZE - S_SP
182 movs pc, lr @ return & move spsr_svc into cpsr 305 movs pc, lr @ return & move spsr_svc into cpsr
183 .endm 306 .endm
307#endif /* ifdef CONFIG_CPU_V7M / else */
184 308
185 .macro get_thread_info, rd 309 .macro get_thread_info, rd
186 mov \rd, sp 310 mov \rd, sp
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
new file mode 100644
index 000000000000..e00621f1403f
--- /dev/null
+++ b/arch/arm/kernel/entry-v7m.S
@@ -0,0 +1,143 @@
1/*
2 * linux/arch/arm/kernel/entry-v7m.S
3 *
4 * Copyright (C) 2008 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Low-level vector interface routines for the ARMv7-M architecture
11 */
12#include <asm/memory.h>
13#include <asm/glue.h>
14#include <asm/thread_notify.h>
15#include <asm/v7m.h>
16
17#include <mach/entry-macro.S>
18
19#include "entry-header.S"
20
21#ifdef CONFIG_TRACE_IRQFLAGS
22#error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation"
23#endif
24
25__invalid_entry:
26 v7m_exception_entry
27 adr r0, strerr
28 mrs r1, ipsr
29 mov r2, lr
30 bl printk
31 mov r0, sp
32 bl show_regs
331: b 1b
34ENDPROC(__invalid_entry)
35
36strerr: .asciz "\nUnhandled exception: IPSR = %08lx LR = %08lx\n"
37
38 .align 2
39__irq_entry:
40 v7m_exception_entry
41
42 @
43 @ Invoke the IRQ handler
44 @
45 mrs r0, ipsr
46 ldr r1, =V7M_xPSR_EXCEPTIONNO
47 and r0, r1
48 sub r0, #16
49 mov r1, sp
50 stmdb sp!, {lr}
51 @ routine called with r0 = irq number, r1 = struct pt_regs *
52 bl nvic_do_IRQ
53
54 pop {lr}
55 @
56 @ Check for any pending work if returning to user
57 @
58 ldr r1, =BASEADDR_V7M_SCB
59 ldr r0, [r1, V7M_SCB_ICSR]
60 tst r0, V7M_SCB_ICSR_RETTOBASE
61 beq 2f
62
63 get_thread_info tsk
64 ldr r2, [tsk, #TI_FLAGS]
65 tst r2, #_TIF_WORK_MASK
66 beq 2f @ no work pending
67 mov r0, #V7M_SCB_ICSR_PENDSVSET
68 str r0, [r1, V7M_SCB_ICSR] @ raise PendSV
69
702:
71 @ registers r0-r3 and r12 are automatically restored on exception
72 @ return. r4-r7 were not clobbered in v7m_exception_entry so for
73 @ correctness they don't need to be restored. So only r8-r11 must be
74 @ restored here. The easiest way to do so is to restore r0-r7, too.
75 ldmia sp!, {r0-r11}
76 add sp, #S_FRAME_SIZE-S_IP
77 cpsie i
78 bx lr
79ENDPROC(__irq_entry)
80
81__pendsv_entry:
82 v7m_exception_entry
83
84 ldr r1, =BASEADDR_V7M_SCB
85 mov r0, #V7M_SCB_ICSR_PENDSVCLR
86 str r0, [r1, V7M_SCB_ICSR] @ clear PendSV
87
88 @ execute the pending work, including reschedule
89 get_thread_info tsk
90 mov why, #0
91 b ret_to_user
92ENDPROC(__pendsv_entry)
93
94/*
95 * Register switch for ARMv7-M processors.
96 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
97 * previous and next are guaranteed not to be the same.
98 */
99ENTRY(__switch_to)
100 .fnstart
101 .cantunwind
102 add ip, r1, #TI_CPU_SAVE
103 stmia ip!, {r4 - r11} @ Store most regs on stack
104 str sp, [ip], #4
105 str lr, [ip], #4
106 mov r5, r0
107 add r4, r2, #TI_CPU_SAVE
108 ldr r0, =thread_notify_head
109 mov r1, #THREAD_NOTIFY_SWITCH
110 bl atomic_notifier_call_chain
111 mov ip, r4
112 mov r0, r5
113 ldmia ip!, {r4 - r11} @ Load all regs saved previously
114 ldr sp, [ip]
115 ldr pc, [ip, #4]!
116 .fnend
117ENDPROC(__switch_to)
118
119 .data
120 .align 8
121/*
122 * Vector table (64 words => 256 bytes natural alignment)
123 */
124ENTRY(vector_table)
125 .long 0 @ 0 - Reset stack pointer
126 .long __invalid_entry @ 1 - Reset
127 .long __invalid_entry @ 2 - NMI
128 .long __invalid_entry @ 3 - HardFault
129 .long __invalid_entry @ 4 - MemManage
130 .long __invalid_entry @ 5 - BusFault
131 .long __invalid_entry @ 6 - UsageFault
132 .long __invalid_entry @ 7 - Reserved
133 .long __invalid_entry @ 8 - Reserved
134 .long __invalid_entry @ 9 - Reserved
135 .long __invalid_entry @ 10 - Reserved
136 .long vector_swi @ 11 - SVCall
137 .long __invalid_entry @ 12 - Debug Monitor
138 .long __invalid_entry @ 13 - Reserved
139 .long __pendsv_entry @ 14 - PendSV
140 .long __invalid_entry @ 15 - SysTick
141 .rept 64 - 16
142 .long __irq_entry @ 16..64 - External Interrupts
143 .endr
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 6a2e09c952c7..75f14cc3e073 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -17,8 +17,12 @@
17#include <asm/assembler.h> 17#include <asm/assembler.h>
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
20#include <asm/memory.h>
20#include <asm/cp15.h> 21#include <asm/cp15.h>
21#include <asm/thread_info.h> 22#include <asm/thread_info.h>
23#include <asm/v7m.h>
24#include <asm/mpu.h>
25#include <asm/page.h>
22 26
23/* 27/*
24 * Kernel startup entry point. 28 * Kernel startup entry point.
@@ -50,21 +54,86 @@ ENTRY(stext)
50 54
51 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 55 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
52 @ and irqs disabled 56 @ and irqs disabled
53#ifndef CONFIG_CPU_CP15 57#if defined(CONFIG_CPU_CP15)
54 ldr r9, =CONFIG_PROCESSOR_ID
55#else
56 mrc p15, 0, r9, c0, c0 @ get processor id 58 mrc p15, 0, r9, c0, c0 @ get processor id
59#elif defined(CONFIG_CPU_V7M)
60 ldr r9, =BASEADDR_V7M_SCB
61 ldr r9, [r9, V7M_SCB_CPUID]
62#else
63 ldr r9, =CONFIG_PROCESSOR_ID
57#endif 64#endif
58 bl __lookup_processor_type @ r5=procinfo r9=cpuid 65 bl __lookup_processor_type @ r5=procinfo r9=cpuid
59 movs r10, r5 @ invalid processor (r5=0)? 66 movs r10, r5 @ invalid processor (r5=0)?
60 beq __error_p @ yes, error 'p' 67 beq __error_p @ yes, error 'p'
61 68
62 adr lr, BSYM(__after_proc_init) @ return (PIC) address 69#ifdef CONFIG_ARM_MPU
70 /* Calculate the size of a region covering just the kernel */
71 ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET
72 ldr r6, =(_end) @ Cover whole kernel
73 sub r6, r6, r5 @ Minimum size of region to map
74 clz r6, r6 @ Region size must be 2^N...
75 rsb r6, r6, #31 @ ...so round up region size
76 lsl r6, r6, #MPU_RSR_SZ @ Put size in right field
77 orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit
78 bl __setup_mpu
79#endif
80 ldr r13, =__mmap_switched @ address to jump to after
81 @ initialising sctlr
82 adr lr, BSYM(1f) @ return (PIC) address
63 ARM( add pc, r10, #PROCINFO_INITFUNC ) 83 ARM( add pc, r10, #PROCINFO_INITFUNC )
64 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 84 THUMB( add r12, r10, #PROCINFO_INITFUNC )
65 THUMB( mov pc, r12 ) 85 THUMB( mov pc, r12 )
86 1: b __after_proc_init
66ENDPROC(stext) 87ENDPROC(stext)
67 88
89#ifdef CONFIG_SMP
90 __CPUINIT
91ENTRY(secondary_startup)
92 /*
93 * Common entry point for secondary CPUs.
94 *
95 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup
96 * the processor type - there is no need to check the machine type
97 * as it has already been validated by the primary processor.
98 */
99 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
100#ifndef CONFIG_CPU_CP15
101 ldr r9, =CONFIG_PROCESSOR_ID
102#else
103 mrc p15, 0, r9, c0, c0 @ get processor id
104#endif
105 bl __lookup_processor_type @ r5=procinfo r9=cpuid
106 movs r10, r5 @ invalid processor?
107 beq __error_p @ yes, error 'p'
108
109 adr r4, __secondary_data
110 ldmia r4, {r7, r12}
111
112#ifdef CONFIG_ARM_MPU
113 /* Use MPU region info supplied by __cpu_up */
114 ldr r6, [r7] @ get secondary_data.mpu_szr
115 bl __setup_mpu @ Initialize the MPU
116#endif
117
118 adr lr, BSYM(__after_proc_init) @ return address
119 mov r13, r12 @ __secondary_switched address
120 ARM( add pc, r10, #PROCINFO_INITFUNC )
121 THUMB( add r12, r10, #PROCINFO_INITFUNC )
122 THUMB( mov pc, r12 )
123ENDPROC(secondary_startup)
124
125ENTRY(__secondary_switched)
126 ldr sp, [r7, #8] @ set up the stack pointer
127 mov fp, #0
128 b secondary_start_kernel
129ENDPROC(__secondary_switched)
130
131 .type __secondary_data, %object
132__secondary_data:
133 .long secondary_data
134 .long __secondary_switched
135#endif /* CONFIG_SMP */
136
68/* 137/*
69 * Set the Control Register and Read the process ID. 138 * Set the Control Register and Read the process ID.
70 */ 139 */
@@ -95,10 +164,97 @@ __after_proc_init:
95#endif 164#endif
96 mcr p15, 0, r0, c1, c0, 0 @ write control reg 165 mcr p15, 0, r0, c1, c0, 0 @ write control reg
97#endif /* CONFIG_CPU_CP15 */ 166#endif /* CONFIG_CPU_CP15 */
98 167 mov pc, r13
99 b __mmap_switched @ clear the BSS and jump
100 @ to start_kernel
101ENDPROC(__after_proc_init) 168ENDPROC(__after_proc_init)
102 .ltorg 169 .ltorg
103 170
171#ifdef CONFIG_ARM_MPU
172
173
174/* Set which MPU region should be programmed */
175.macro set_region_nr tmp, rgnr
176 mov \tmp, \rgnr @ Use static region numbers
177 mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR
178.endm
179
180/* Setup a single MPU region, either D or I side (D-side for unified) */
181.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE
182 mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
183 mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
184 mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
185.endm
186
187/*
188 * Setup the MPU and initial MPU Regions. We create the following regions:
189 * Region 0: Use this for probing the MPU details, so leave disabled.
190 * Region 1: Background region - covers the whole of RAM as strongly ordered
191 * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
192 * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
193 *
194 * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION
195*/
196
197ENTRY(__setup_mpu)
198
199 /* Probe for v7 PMSA compliance */
200 mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
201 and r0, r0, #(MMFR0_PMSA) @ PMSA field
202 teq r0, #(MMFR0_PMSAv7) @ PMSA v7
203 bne __error_p @ Fail: ARM_MPU on NOT v7 PMSA
204
205 /* Determine whether the D/I-side memory map is unified. We set the
206 * flags here and continue to use them for the rest of this function */
207 mrc p15, 0, r0, c0, c0, 4 @ MPUIR
208 ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
209 beq __error_p @ Fail: ARM_MPU and no MPU
210 tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
211
212 /* Setup second region first to free up r6 */
213 set_region_nr r0, #MPU_RAM_REGION
214 isb
215 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
216 ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET
217 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
218
219 setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
220 beq 1f @ Memory-map not unified
221 setup_region r0, r5, r6, MPU_INSTR_SIDE @ PHYS_OFFSET, shared, enabled
2221: isb
223
224 /* First/background region */
225 set_region_nr r0, #MPU_BG_REGION
226 isb
227 /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
228 mov r0, #0 @ BG region starts at 0x0
229 ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA)
230 mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled
231
232 setup_region r0, r5, r6, MPU_DATA_SIDE @ 0x0, BG region, enabled
233 beq 2f @ Memory-map not unified
234 setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled
2352: isb
236
237 /* Vectors region */
238 set_region_nr r0, #MPU_VECTORS_REGION
239 isb
240 /* Shared, inaccessible to PL0, rw PL1 */
241 mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE
242 ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL)
243 /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */
244 mov r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)
245
246 setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled
247 beq 3f @ Memory-map not unified
248 setup_region r0, r5, r6, MPU_INSTR_SIDE @ VECTORS_BASE, PL0 NA, enabled
2493: isb
250
251 /* Enable the MPU */
252 mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR
253 bic r0, r0, #CR_BR @ Disable the 'default mem-map'
254 orr r0, r0, #CR_M @ Set SCTRL.M (MPU on)
255 mcr p15, 0, r0, c1, c0, 0 @ Enable MPU
256 isb
257 mov pc,lr
258ENDPROC(__setup_mpu)
259#endif
104#include "head-common.S" 260#include "head-common.S"
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 8bac553fe213..45e8935cae4e 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -156,7 +156,7 @@ ENDPROC(stext)
156 * 156 *
157 * Returns: 157 * Returns:
158 * r0, r3, r5-r7 corrupted 158 * r0, r3, r5-r7 corrupted
159 * r4 = physical page table address 159 * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
160 */ 160 */
161__create_page_tables: 161__create_page_tables:
162 pgtbl r4, r8 @ page table address 162 pgtbl r4, r8 @ page table address
@@ -331,6 +331,7 @@ __create_page_tables:
331#endif 331#endif
332#ifdef CONFIG_ARM_LPAE 332#ifdef CONFIG_ARM_LPAE
333 sub r4, r4, #0x1000 @ point to the PGD table 333 sub r4, r4, #0x1000 @ point to the PGD table
334 mov r4, r4, lsr #ARCH_PGD_SHIFT
334#endif 335#endif
335 mov pc, lr 336 mov pc, lr
336ENDPROC(__create_page_tables) 337ENDPROC(__create_page_tables)
@@ -408,7 +409,7 @@ __secondary_data:
408 * r0 = cp#15 control register 409 * r0 = cp#15 control register
409 * r1 = machine ID 410 * r1 = machine ID
410 * r2 = atags or dtb pointer 411 * r2 = atags or dtb pointer
411 * r4 = page table pointer 412 * r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
412 * r9 = processor ID 413 * r9 = processor ID
413 * r13 = *virtual* address to jump to upon completion 414 * r13 = *virtual* address to jump to upon completion
414 */ 415 */
@@ -427,10 +428,7 @@ __enable_mmu:
427#ifdef CONFIG_CPU_ICACHE_DISABLE 428#ifdef CONFIG_CPU_ICACHE_DISABLE
428 bic r0, r0, #CR_I 429 bic r0, r0, #CR_I
429#endif 430#endif
430#ifdef CONFIG_ARM_LPAE 431#ifndef CONFIG_ARM_LPAE
431 mov r5, #0
432 mcrr p15, 0, r4, r5, c2 @ load TTBR0
433#else
434 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 432 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
435 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ 433 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
436 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ 434 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 1315c4ccfa56..4910232c4833 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -153,6 +153,13 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
153 mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL 153 mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
154 orr r7, r7, #3 @ PL1PCEN | PL1PCTEN 154 orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
155 mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL 155 mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
156 mov r7, #0
157 mcrr p15, 4, r7, r7, c14 @ CNTVOFF
158
159 @ Disable virtual timer in case it was counting
160 mrc p15, 0, r7, c14, c3, 1 @ CNTV_CTL
161 bic r7, #1 @ Clear ENABLE
162 mcr p15, 0, r7, c14, c3, 1 @ CNTV_CTL
1561: 1631:
157#endif 164#endif
158 165
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
index 36531643cc2c..46931880093d 100644
--- a/arch/arm/kernel/psci.c
+++ b/arch/arm/kernel/psci.c
@@ -158,7 +158,7 @@ static const struct of_device_id psci_of_match[] __initconst = {
158 {}, 158 {},
159}; 159};
160 160
161static int __init psci_init(void) 161void __init psci_init(void)
162{ 162{
163 struct device_node *np; 163 struct device_node *np;
164 const char *method; 164 const char *method;
@@ -166,7 +166,7 @@ static int __init psci_init(void)
166 166
167 np = of_find_matching_node(NULL, psci_of_match); 167 np = of_find_matching_node(NULL, psci_of_match);
168 if (!np) 168 if (!np)
169 return 0; 169 return;
170 170
171 pr_info("probing function IDs from device-tree\n"); 171 pr_info("probing function IDs from device-tree\n");
172 172
@@ -206,6 +206,5 @@ static int __init psci_init(void)
206 206
207out_put_node: 207out_put_node:
208 of_node_put(np); 208 of_node_put(np);
209 return 0; 209 return;
210} 210}
211early_initcall(psci_init);
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
new file mode 100644
index 000000000000..219f1d73572a
--- /dev/null
+++ b/arch/arm/kernel/psci_smp.c
@@ -0,0 +1,84 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2012 ARM Limited
12 *
13 * Author: Will Deacon <will.deacon@arm.com>
14 */
15
16#include <linux/init.h>
17#include <linux/irqchip/arm-gic.h>
18#include <linux/smp.h>
19#include <linux/of.h>
20
21#include <asm/psci.h>
22#include <asm/smp_plat.h>
23
24/*
25 * psci_smp assumes that the following is true about PSCI:
26 *
27 * cpu_suspend Suspend the execution on a CPU
28 * @state we don't currently describe affinity levels, so just pass 0.
29 * @entry_point the first instruction to be executed on return
30 * returns 0 success, < 0 on failure
31 *
32 * cpu_off Power down a CPU
33 * @state we don't currently describe affinity levels, so just pass 0.
34 * no return on successful call
35 *
36 * cpu_on Power up a CPU
37 * @cpuid cpuid of target CPU, as from MPIDR
38 * @entry_point the first instruction to be executed on return
39 * returns 0 success, < 0 on failure
40 *
41 * migrate Migrate the context to a different CPU
42 * @cpuid cpuid of target CPU, as from MPIDR
43 * returns 0 success, < 0 on failure
44 *
45 */
46
47extern void secondary_startup(void);
48
49static int __cpuinit psci_boot_secondary(unsigned int cpu,
50 struct task_struct *idle)
51{
52 if (psci_ops.cpu_on)
53 return psci_ops.cpu_on(cpu_logical_map(cpu),
54 __pa(secondary_startup));
55 return -ENODEV;
56}
57
58#ifdef CONFIG_HOTPLUG_CPU
59void __ref psci_cpu_die(unsigned int cpu)
60{
61 const struct psci_power_state ps = {
62 .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
63 };
64
65 if (psci_ops.cpu_off)
66 psci_ops.cpu_off(ps);
67
68 /* We should never return */
69 panic("psci: cpu %d failed to shutdown\n", cpu);
70}
71#endif
72
73bool __init psci_smp_available(void)
74{
75 /* is cpu_on available at least? */
76 return (psci_ops.cpu_on != NULL);
77}
78
79struct smp_operations __initdata psci_smp_ops = {
80 .smp_boot_secondary = psci_boot_secondary,
81#ifdef CONFIG_HOTPLUG_CPU
82 .cpu_die = psci_cpu_die,
83#endif
84};
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 0cde326f5542..9b653278c9e8 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -37,6 +37,7 @@
37#include <asm/cputype.h> 37#include <asm/cputype.h>
38#include <asm/elf.h> 38#include <asm/elf.h>
39#include <asm/procinfo.h> 39#include <asm/procinfo.h>
40#include <asm/psci.h>
40#include <asm/sections.h> 41#include <asm/sections.h>
41#include <asm/setup.h> 42#include <asm/setup.h>
42#include <asm/smp_plat.h> 43#include <asm/smp_plat.h>
@@ -128,7 +129,9 @@ struct stack {
128 u32 und[3]; 129 u32 und[3];
129} ____cacheline_aligned; 130} ____cacheline_aligned;
130 131
132#ifndef CONFIG_CPU_V7M
131static struct stack stacks[NR_CPUS]; 133static struct stack stacks[NR_CPUS];
134#endif
132 135
133char elf_platform[ELF_PLATFORM_SIZE]; 136char elf_platform[ELF_PLATFORM_SIZE];
134EXPORT_SYMBOL(elf_platform); 137EXPORT_SYMBOL(elf_platform);
@@ -207,7 +210,7 @@ static const char *proc_arch[] = {
207 "5TEJ", 210 "5TEJ",
208 "6TEJ", 211 "6TEJ",
209 "7", 212 "7",
210 "?(11)", 213 "7M",
211 "?(12)", 214 "?(12)",
212 "?(13)", 215 "?(13)",
213 "?(14)", 216 "?(14)",
@@ -216,6 +219,12 @@ static const char *proc_arch[] = {
216 "?(17)", 219 "?(17)",
217}; 220};
218 221
222#ifdef CONFIG_CPU_V7M
223static int __get_cpu_architecture(void)
224{
225 return CPU_ARCH_ARMv7M;
226}
227#else
219static int __get_cpu_architecture(void) 228static int __get_cpu_architecture(void)
220{ 229{
221 int cpu_arch; 230 int cpu_arch;
@@ -248,6 +257,7 @@ static int __get_cpu_architecture(void)
248 257
249 return cpu_arch; 258 return cpu_arch;
250} 259}
260#endif
251 261
252int __pure cpu_architecture(void) 262int __pure cpu_architecture(void)
253{ 263{
@@ -293,7 +303,9 @@ static void __init cacheid_init(void)
293{ 303{
294 unsigned int arch = cpu_architecture(); 304 unsigned int arch = cpu_architecture();
295 305
296 if (arch >= CPU_ARCH_ARMv6) { 306 if (arch == CPU_ARCH_ARMv7M) {
307 cacheid = 0;
308 } else if (arch >= CPU_ARCH_ARMv6) {
297 unsigned int cachetype = read_cpuid_cachetype(); 309 unsigned int cachetype = read_cpuid_cachetype();
298 if ((cachetype & (7 << 29)) == 4 << 29) { 310 if ((cachetype & (7 << 29)) == 4 << 29) {
299 /* ARMv7 register format */ 311 /* ARMv7 register format */
@@ -355,7 +367,7 @@ void __init early_print(const char *str, ...)
355 367
356static void __init cpuid_init_hwcaps(void) 368static void __init cpuid_init_hwcaps(void)
357{ 369{
358 unsigned int divide_instrs; 370 unsigned int divide_instrs, vmsa;
359 371
360 if (cpu_architecture() < CPU_ARCH_ARMv7) 372 if (cpu_architecture() < CPU_ARCH_ARMv7)
361 return; 373 return;
@@ -368,6 +380,11 @@ static void __init cpuid_init_hwcaps(void)
368 case 1: 380 case 1:
369 elf_hwcap |= HWCAP_IDIVT; 381 elf_hwcap |= HWCAP_IDIVT;
370 } 382 }
383
384 /* LPAE implies atomic ldrd/strd instructions */
385 vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
386 if (vmsa >= 5)
387 elf_hwcap |= HWCAP_LPAE;
371} 388}
372 389
373static void __init feat_v6_fixup(void) 390static void __init feat_v6_fixup(void)
@@ -392,6 +409,7 @@ static void __init feat_v6_fixup(void)
392 */ 409 */
393void notrace cpu_init(void) 410void notrace cpu_init(void)
394{ 411{
412#ifndef CONFIG_CPU_V7M
395 unsigned int cpu = smp_processor_id(); 413 unsigned int cpu = smp_processor_id();
396 struct stack *stk = &stacks[cpu]; 414 struct stack *stk = &stacks[cpu];
397 415
@@ -442,6 +460,7 @@ void notrace cpu_init(void)
442 "I" (offsetof(struct stack, und[0])), 460 "I" (offsetof(struct stack, und[0])),
443 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 461 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
444 : "r14"); 462 : "r14");
463#endif
445} 464}
446 465
447u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; 466u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
@@ -466,6 +485,72 @@ void __init smp_setup_processor_id(void)
466 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); 485 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
467} 486}
468 487
488struct mpidr_hash mpidr_hash;
489#ifdef CONFIG_SMP
490/**
491 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
492 * level in order to build a linear index from an
493 * MPIDR value. Resulting algorithm is a collision
494 * free hash carried out through shifting and ORing
495 */
496static void __init smp_build_mpidr_hash(void)
497{
498 u32 i, affinity;
499 u32 fs[3], bits[3], ls, mask = 0;
500 /*
501 * Pre-scan the list of MPIDRS and filter out bits that do
502 * not contribute to affinity levels, ie they never toggle.
503 */
504 for_each_possible_cpu(i)
505 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
506 pr_debug("mask of set bits 0x%x\n", mask);
507 /*
508 * Find and stash the last and first bit set at all affinity levels to
509 * check how many bits are required to represent them.
510 */
511 for (i = 0; i < 3; i++) {
512 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
513 /*
514 * Find the MSB bit and LSB bits position
515 * to determine how many bits are required
516 * to express the affinity level.
517 */
518 ls = fls(affinity);
519 fs[i] = affinity ? ffs(affinity) - 1 : 0;
520 bits[i] = ls - fs[i];
521 }
522 /*
523 * An index can be created from the MPIDR by isolating the
524 * significant bits at each affinity level and by shifting
525 * them in order to compress the 24 bits values space to a
526 * compressed set of values. This is equivalent to hashing
527 * the MPIDR through shifting and ORing. It is a collision free
528 * hash though not minimal since some levels might contain a number
529 * of CPUs that is not an exact power of 2 and their bit
530 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
531 */
532 mpidr_hash.shift_aff[0] = fs[0];
533 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
534 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
535 (bits[1] + bits[0]);
536 mpidr_hash.mask = mask;
537 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
538 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
539 mpidr_hash.shift_aff[0],
540 mpidr_hash.shift_aff[1],
541 mpidr_hash.shift_aff[2],
542 mpidr_hash.mask,
543 mpidr_hash.bits);
544 /*
545 * 4x is an arbitrary value used to warn on a hash table much bigger
546 * than expected on most systems.
547 */
548 if (mpidr_hash_size() > 4 * num_possible_cpus())
549 pr_warn("Large number of MPIDR hash buckets detected\n");
550 sync_cache_w(&mpidr_hash);
551}
552#endif
553
469static void __init setup_processor(void) 554static void __init setup_processor(void)
470{ 555{
471 struct proc_info_list *list; 556 struct proc_info_list *list;
@@ -803,10 +888,17 @@ void __init setup_arch(char **cmdline_p)
803 unflatten_device_tree(); 888 unflatten_device_tree();
804 889
805 arm_dt_init_cpu_maps(); 890 arm_dt_init_cpu_maps();
891 psci_init();
806#ifdef CONFIG_SMP 892#ifdef CONFIG_SMP
807 if (is_smp()) { 893 if (is_smp()) {
808 smp_set_ops(mdesc->smp); 894 if (!mdesc->smp_init || !mdesc->smp_init()) {
895 if (psci_smp_available())
896 smp_set_ops(&psci_smp_ops);
897 else if (mdesc->smp)
898 smp_set_ops(mdesc->smp);
899 }
809 smp_init_cpus(); 900 smp_init_cpus();
901 smp_build_mpidr_hash();
810 } 902 }
811#endif 903#endif
812 904
@@ -879,6 +971,7 @@ static const char *hwcap_str[] = {
879 "vfpv4", 971 "vfpv4",
880 "idiva", 972 "idiva",
881 "idivt", 973 "idivt",
974 "lpae",
882 NULL 975 NULL
883}; 976};
884 977
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 296786bdbb73..1c16c35c271a 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -392,14 +392,19 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
392 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 392 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
393 idx += 3; 393 idx += 3;
394 394
395 /*
396 * Put the sigreturn code on the stack no matter which return
397 * mechanism we use in order to remain ABI compliant
398 */
395 if (__put_user(sigreturn_codes[idx], rc) || 399 if (__put_user(sigreturn_codes[idx], rc) ||
396 __put_user(sigreturn_codes[idx+1], rc+1)) 400 __put_user(sigreturn_codes[idx+1], rc+1))
397 return 1; 401 return 1;
398 402
399 if (cpsr & MODE32_BIT) { 403 if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) {
400 /* 404 /*
401 * 32-bit code can use the new high-page 405 * 32-bit code can use the new high-page
402 * signal return code support. 406 * signal return code support except when the MPU has
407 * protected the vectors page from PL0
403 */ 408 */
404 retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb; 409 retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
405 } else { 410 } else {
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 987dcf33415c..db1536b8b30b 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -7,6 +7,49 @@
7 .text 7 .text
8 8
9/* 9/*
10 * Implementation of MPIDR hash algorithm through shifting
11 * and OR'ing.
12 *
13 * @dst: register containing hash result
14 * @rs0: register containing affinity level 0 bit shift
15 * @rs1: register containing affinity level 1 bit shift
16 * @rs2: register containing affinity level 2 bit shift
17 * @mpidr: register containing MPIDR value
18 * @mask: register containing MPIDR mask
19 *
20 * Pseudo C-code:
21 *
22 *u32 dst;
23 *
24 *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 mpidr, u32 mask) {
25 * u32 aff0, aff1, aff2;
26 * u32 mpidr_masked = mpidr & mask;
27 * aff0 = mpidr_masked & 0xff;
28 * aff1 = mpidr_masked & 0xff00;
29 * aff2 = mpidr_masked & 0xff0000;
30 * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2);
31 *}
32 * Input registers: rs0, rs1, rs2, mpidr, mask
33 * Output register: dst
34 * Note: input and output registers must be disjoint register sets
35 (eg: a macro instance with mpidr = r1 and dst = r1 is invalid)
36 */
37 .macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask
38 and \mpidr, \mpidr, \mask @ mask out MPIDR bits
39 and \dst, \mpidr, #0xff @ mask=aff0
40 ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0
41 THUMB( lsr \dst, \dst, \rs0 )
42 and \mask, \mpidr, #0xff00 @ mask = aff1
43 ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1)
44 THUMB( lsr \mask, \mask, \rs1 )
45 THUMB( orr \dst, \dst, \mask )
46 and \mask, \mpidr, #0xff0000 @ mask = aff2
47 ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2)
48 THUMB( lsr \mask, \mask, \rs2 )
49 THUMB( orr \dst, \dst, \mask )
50 .endm
51
52/*
10 * Save CPU state for a suspend. This saves the CPU general purpose 53 * Save CPU state for a suspend. This saves the CPU general purpose
11 * registers, and allocates space on the kernel stack to save the CPU 54 * registers, and allocates space on the kernel stack to save the CPU
12 * specific registers and some other data for resume. 55 * specific registers and some other data for resume.
@@ -29,12 +72,18 @@ ENTRY(__cpu_suspend)
29 mov r1, r4 @ size of save block 72 mov r1, r4 @ size of save block
30 mov r2, r5 @ virtual SP 73 mov r2, r5 @ virtual SP
31 ldr r3, =sleep_save_sp 74 ldr r3, =sleep_save_sp
32#ifdef CONFIG_SMP 75 ldr r3, [r3, #SLEEP_SAVE_SP_VIRT]
33 ALT_SMP(mrc p15, 0, lr, c0, c0, 5) 76 ALT_SMP(mrc p15, 0, r9, c0, c0, 5)
34 ALT_UP(mov lr, #0) 77 ALT_UP_B(1f)
35 and lr, lr, #15 78 ldr r8, =mpidr_hash
79 /*
80 * This ldmia relies on the memory layout of the mpidr_hash
81 * struct mpidr_hash.
82 */
83 ldmia r8, {r4-r7} @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts
84 compute_mpidr_hash lr, r5, r6, r7, r9, r4
36 add r3, r3, lr, lsl #2 85 add r3, r3, lr, lsl #2
37#endif 861:
38 bl __cpu_suspend_save 87 bl __cpu_suspend_save
39 adr lr, BSYM(cpu_suspend_abort) 88 adr lr, BSYM(cpu_suspend_abort)
40 ldmfd sp!, {r0, pc} @ call suspend fn 89 ldmfd sp!, {r0, pc} @ call suspend fn
@@ -81,15 +130,23 @@ ENDPROC(cpu_resume_after_mmu)
81 .data 130 .data
82 .align 131 .align
83ENTRY(cpu_resume) 132ENTRY(cpu_resume)
84#ifdef CONFIG_SMP 133 mov r1, #0
85 adr r0, sleep_save_sp 134 ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
86 ALT_SMP(mrc p15, 0, r1, c0, c0, 5) 135 ALT_UP_B(1f)
87 ALT_UP(mov r1, #0) 136 adr r2, mpidr_hash_ptr
88 and r1, r1, #15 137 ldr r3, [r2]
89 ldr r0, [r0, r1, lsl #2] @ stack phys addr 138 add r2, r2, r3 @ r2 = struct mpidr_hash phys address
90#else 139 /*
91 ldr r0, sleep_save_sp @ stack phys addr 140 * This ldmia relies on the memory layout of the mpidr_hash
92#endif 141 * struct mpidr_hash.
142 */
143 ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts
144 compute_mpidr_hash r1, r4, r5, r6, r0, r3
1451:
146 adr r0, _sleep_save_sp
147 ldr r0, [r0, #SLEEP_SAVE_SP_PHYS]
148 ldr r0, [r0, r1, lsl #2]
149
93 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off 150 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
94 @ load phys pgd, stack, resume fn 151 @ load phys pgd, stack, resume fn
95 ARM( ldmia r0!, {r1, sp, pc} ) 152 ARM( ldmia r0!, {r1, sp, pc} )
@@ -98,7 +155,11 @@ THUMB( mov sp, r2 )
98THUMB( bx r3 ) 155THUMB( bx r3 )
99ENDPROC(cpu_resume) 156ENDPROC(cpu_resume)
100 157
101sleep_save_sp: 158 .align 2
102 .rept CONFIG_NR_CPUS 159mpidr_hash_ptr:
103 .long 0 @ preserve stack phys ptr here 160 .long mpidr_hash - . @ mpidr_hash struct offset
104 .endr 161
162 .type sleep_save_sp, #object
163ENTRY(sleep_save_sp)
164_sleep_save_sp:
165 .space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5919eb451bb9..c5fb5469054b 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -45,6 +45,7 @@
45#include <asm/smp_plat.h> 45#include <asm/smp_plat.h>
46#include <asm/virt.h> 46#include <asm/virt.h>
47#include <asm/mach/arch.h> 47#include <asm/mach/arch.h>
48#include <asm/mpu.h>
48 49
49/* 50/*
50 * as from 2.5, kernels no longer have an init_tasks structure 51 * as from 2.5, kernels no longer have an init_tasks structure
@@ -78,6 +79,13 @@ void __init smp_set_ops(struct smp_operations *ops)
78 smp_ops = *ops; 79 smp_ops = *ops;
79}; 80};
80 81
82static unsigned long get_arch_pgd(pgd_t *pgd)
83{
84 phys_addr_t pgdir = virt_to_phys(pgd);
85 BUG_ON(pgdir & ARCH_PGD_MASK);
86 return pgdir >> ARCH_PGD_SHIFT;
87}
88
81int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 89int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
82{ 90{
83 int ret; 91 int ret;
@@ -87,8 +95,14 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
87 * its stack and the page tables. 95 * its stack and the page tables.
88 */ 96 */
89 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 97 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
90 secondary_data.pgdir = virt_to_phys(idmap_pgd); 98#ifdef CONFIG_ARM_MPU
91 secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); 99 secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
100#endif
101
102#ifdef CONFIG_MMU
103 secondary_data.pgdir = get_arch_pgd(idmap_pgd);
104 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
105#endif
92 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); 106 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
93 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); 107 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
94 108
@@ -112,9 +126,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
112 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); 126 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
113 } 127 }
114 128
115 secondary_data.stack = NULL;
116 secondary_data.pgdir = 0;
117 129
130 memset(&secondary_data, 0, sizeof(secondary_data));
118 return ret; 131 return ret;
119} 132}
120 133
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index c59c97ea8268..41cf3cbf756d 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -1,15 +1,54 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/slab.h>
2 3
4#include <asm/cacheflush.h>
3#include <asm/idmap.h> 5#include <asm/idmap.h>
4#include <asm/pgalloc.h> 6#include <asm/pgalloc.h>
5#include <asm/pgtable.h> 7#include <asm/pgtable.h>
6#include <asm/memory.h> 8#include <asm/memory.h>
9#include <asm/smp_plat.h>
7#include <asm/suspend.h> 10#include <asm/suspend.h>
8#include <asm/tlbflush.h> 11#include <asm/tlbflush.h>
9 12
10extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); 13extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
11extern void cpu_resume_mmu(void); 14extern void cpu_resume_mmu(void);
12 15
16#ifdef CONFIG_MMU
17/*
18 * Hide the first two arguments to __cpu_suspend - these are an implementation
19 * detail which platform code shouldn't have to know about.
20 */
21int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
22{
23 struct mm_struct *mm = current->active_mm;
24 int ret;
25
26 if (!idmap_pgd)
27 return -EINVAL;
28
29 /*
30 * Provide a temporary page table with an identity mapping for
31 * the MMU-enable code, required for resuming. On successful
32 * resume (indicated by a zero return code), we need to switch
33 * back to the correct page tables.
34 */
35 ret = __cpu_suspend(arg, fn);
36 if (ret == 0) {
37 cpu_switch_mm(mm->pgd, mm);
38 local_flush_bp_all();
39 local_flush_tlb_all();
40 }
41
42 return ret;
43}
44#else
45int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
46{
47 return __cpu_suspend(arg, fn);
48}
49#define idmap_pgd NULL
50#endif
51
13/* 52/*
14 * This is called by __cpu_suspend() to save the state, and do whatever 53 * This is called by __cpu_suspend() to save the state, and do whatever
15 * flushing is required to ensure that when the CPU goes to sleep we have 54 * flushing is required to ensure that when the CPU goes to sleep we have
@@ -47,30 +86,19 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
47 virt_to_phys(save_ptr) + sizeof(*save_ptr)); 86 virt_to_phys(save_ptr) + sizeof(*save_ptr));
48} 87}
49 88
50/* 89extern struct sleep_save_sp sleep_save_sp;
51 * Hide the first two arguments to __cpu_suspend - these are an implementation
52 * detail which platform code shouldn't have to know about.
53 */
54int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
55{
56 struct mm_struct *mm = current->active_mm;
57 int ret;
58
59 if (!idmap_pgd)
60 return -EINVAL;
61 90
62 /* 91static int cpu_suspend_alloc_sp(void)
63 * Provide a temporary page table with an identity mapping for 92{
64 * the MMU-enable code, required for resuming. On successful 93 void *ctx_ptr;
65 * resume (indicated by a zero return code), we need to switch 94 /* ctx_ptr is an array of physical addresses */
66 * back to the correct page tables. 95 ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL);
67 */
68 ret = __cpu_suspend(arg, fn);
69 if (ret == 0) {
70 cpu_switch_mm(mm->pgd, mm);
71 local_flush_bp_all();
72 local_flush_tlb_all();
73 }
74 96
75 return ret; 97 if (WARN_ON(!ctx_ptr))
98 return -ENOMEM;
99 sleep_save_sp.save_ptr_stash = ctx_ptr;
100 sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
101 sync_cache_w(&sleep_save_sp);
102 return 0;
76} 103}
104early_initcall(cpu_suspend_alloc_sp);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 517bfd4da1c9..cab094c234ee 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -812,6 +812,7 @@ static void __init kuser_get_tls_init(unsigned long vectors)
812 812
813void __init early_trap_init(void *vectors_base) 813void __init early_trap_init(void *vectors_base)
814{ 814{
815#ifndef CONFIG_CPU_V7M
815 unsigned long vectors = (unsigned long)vectors_base; 816 unsigned long vectors = (unsigned long)vectors_base;
816 extern char __stubs_start[], __stubs_end[]; 817 extern char __stubs_start[], __stubs_end[];
817 extern char __vectors_start[], __vectors_end[]; 818 extern char __vectors_start[], __vectors_end[];
@@ -843,4 +844,11 @@ void __init early_trap_init(void *vectors_base)
843 844
844 flush_icache_range(vectors, vectors + PAGE_SIZE); 845 flush_icache_range(vectors, vectors + PAGE_SIZE);
845 modify_domain(DOMAIN_USER, DOMAIN_CLIENT); 846 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
847#else /* ifndef CONFIG_CPU_V7M */
848 /*
849 * on V7-M there is no need to copy the vector table to a dedicated
850 * memory area. The address is configurable and so a table in the kernel
851 * image can be used.
852 */
853#endif
846} 854}
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 3c8f2f0b4c5e..d43cfb5b37c4 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -497,6 +497,10 @@ vcpu .req r0 @ vcpu pointer always in r0
497 add r5, vcpu, r4 497 add r5, vcpu, r4
498 strd r2, r3, [r5] 498 strd r2, r3, [r5]
499 499
500 @ Ensure host CNTVCT == CNTPCT
501 mov r2, #0
502 mcrr p15, 4, r2, r2, c14 @ CNTVOFF
503
5001: 5041:
501#endif 505#endif
502 @ Allow physical timer/counter access for the host 506 @ Allow physical timer/counter access for the host
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index ff18fc2ea46f..756970f6bd10 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -76,7 +76,7 @@ config SOC_EXYNOS5440
76 default y 76 default y
77 depends on ARCH_EXYNOS5 77 depends on ARCH_EXYNOS5
78 select ARCH_HAS_OPP 78 select ARCH_HAS_OPP
79 select ARM_ARCH_TIMER 79 select HAVE_ARM_ARCH_TIMER
80 select AUTO_ZRELADDR 80 select AUTO_ZRELADDR
81 select PINCTRL 81 select PINCTRL
82 select PINCTRL_EXYNOS5440 82 select PINCTRL_EXYNOS5440
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 1a517e2fe449..767a6c3c27a4 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -23,7 +23,7 @@ config ARCH_R8A73A4
23 select ARCH_WANT_OPTIONAL_GPIOLIB 23 select ARCH_WANT_OPTIONAL_GPIOLIB
24 select ARM_GIC 24 select ARM_GIC
25 select CPU_V7 25 select CPU_V7
26 select ARM_ARCH_TIMER 26 select HAVE_ARM_ARCH_TIMER
27 select SH_CLK_CPG 27 select SH_CLK_CPG
28 select RENESAS_IRQC 28 select RENESAS_IRQC
29 29
@@ -56,7 +56,7 @@ config ARCH_R8A7790
56 select ARCH_WANT_OPTIONAL_GPIOLIB 56 select ARCH_WANT_OPTIONAL_GPIOLIB
57 select ARM_GIC 57 select ARM_GIC
58 select CPU_V7 58 select CPU_V7
59 select ARM_ARCH_TIMER 59 select HAVE_ARM_ARCH_TIMER
60 select SH_CLK_CPG 60 select SH_CLK_CPG
61 select RENESAS_IRQC 61 select RENESAS_IRQC
62 62
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 84d72fc36dfe..65c5ae6fa386 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -60,7 +60,7 @@ config ARCH_TEGRA_3x_SOC
60 60
61config ARCH_TEGRA_114_SOC 61config ARCH_TEGRA_114_SOC
62 bool "Enable support for Tegra114 family" 62 bool "Enable support for Tegra114 family"
63 select ARM_ARCH_TIMER 63 select HAVE_ARM_ARCH_TIMER
64 select ARM_GIC 64 select ARM_GIC
65 select ARM_L1_CACHE_SHIFT_6 65 select ARM_L1_CACHE_SHIFT_6
66 select CPU_FREQ_TABLE if CPU_FREQ 66 select CPU_FREQ_TABLE if CPU_FREQ
diff --git a/arch/arm/mach-virt/Kconfig b/arch/arm/mach-virt/Kconfig
index 8958f0d896bc..081d46929436 100644
--- a/arch/arm/mach-virt/Kconfig
+++ b/arch/arm/mach-virt/Kconfig
@@ -2,7 +2,7 @@ config ARCH_VIRT
2 bool "Dummy Virtual Machine" if ARCH_MULTI_V7 2 bool "Dummy Virtual Machine" if ARCH_MULTI_V7
3 select ARCH_WANT_OPTIONAL_GPIOLIB 3 select ARCH_WANT_OPTIONAL_GPIOLIB
4 select ARM_GIC 4 select ARM_GIC
5 select ARM_ARCH_TIMER 5 select HAVE_ARM_ARCH_TIMER
6 select ARM_PSCI 6 select ARM_PSCI
7 select HAVE_SMP 7 select HAVE_SMP
8 select CPU_V7 8 select CPU_V7
diff --git a/arch/arm/mach-virt/Makefile b/arch/arm/mach-virt/Makefile
index 042afc1f8c44..7ddbfa60227f 100644
--- a/arch/arm/mach-virt/Makefile
+++ b/arch/arm/mach-virt/Makefile
@@ -3,4 +3,3 @@
3# 3#
4 4
5obj-y := virt.o 5obj-y := virt.o
6obj-$(CONFIG_SMP) += platsmp.o
diff --git a/arch/arm/mach-virt/platsmp.c b/arch/arm/mach-virt/platsmp.c
deleted file mode 100644
index f4143f5bfa5b..000000000000
--- a/arch/arm/mach-virt/platsmp.c
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * Dummy Virtual Machine - does what it says on the tin.
3 *
4 * Copyright (C) 2012 ARM Ltd
5 * Author: Will Deacon <will.deacon@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/init.h>
21#include <linux/smp.h>
22#include <linux/of.h>
23
24#include <asm/psci.h>
25#include <asm/smp_plat.h>
26
27extern void secondary_startup(void);
28
29static void __init virt_smp_init_cpus(void)
30{
31}
32
33static void __init virt_smp_prepare_cpus(unsigned int max_cpus)
34{
35}
36
37static int __cpuinit virt_boot_secondary(unsigned int cpu,
38 struct task_struct *idle)
39{
40 if (psci_ops.cpu_on)
41 return psci_ops.cpu_on(cpu_logical_map(cpu),
42 __pa(secondary_startup));
43 return -ENODEV;
44}
45
46struct smp_operations __initdata virt_smp_ops = {
47 .smp_init_cpus = virt_smp_init_cpus,
48 .smp_prepare_cpus = virt_smp_prepare_cpus,
49 .smp_boot_secondary = virt_boot_secondary,
50};
diff --git a/arch/arm/mach-virt/virt.c b/arch/arm/mach-virt/virt.c
index 061f283f579e..a67d2dd5bb60 100644
--- a/arch/arm/mach-virt/virt.c
+++ b/arch/arm/mach-virt/virt.c
@@ -36,11 +36,8 @@ static const char *virt_dt_match[] = {
36 NULL 36 NULL
37}; 37};
38 38
39extern struct smp_operations virt_smp_ops;
40
41DT_MACHINE_START(VIRT, "Dummy Virtual Machine") 39DT_MACHINE_START(VIRT, "Dummy Virtual Machine")
42 .init_irq = irqchip_init, 40 .init_irq = irqchip_init,
43 .init_machine = virt_init, 41 .init_machine = virt_init,
44 .smp = smp_ops(virt_smp_ops),
45 .dt_compat = virt_dt_match, 42 .dt_compat = virt_dt_match,
46MACHINE_END 43MACHINE_END
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 35955b54944c..6cacdc8dd654 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -392,11 +392,21 @@ config CPU_V7
392 select CPU_CACHE_V7 392 select CPU_CACHE_V7
393 select CPU_CACHE_VIPT 393 select CPU_CACHE_VIPT
394 select CPU_COPY_V6 if MMU 394 select CPU_COPY_V6 if MMU
395 select CPU_CP15_MMU 395 select CPU_CP15_MMU if MMU
396 select CPU_CP15_MPU if !MMU
396 select CPU_HAS_ASID if MMU 397 select CPU_HAS_ASID if MMU
397 select CPU_PABRT_V7 398 select CPU_PABRT_V7
398 select CPU_TLB_V7 if MMU 399 select CPU_TLB_V7 if MMU
399 400
401# ARMv7M
402config CPU_V7M
403 bool
404 select CPU_32v7M
405 select CPU_ABRT_NOMMU
406 select CPU_CACHE_NOP
407 select CPU_PABRT_LEGACY
408 select CPU_THUMBONLY
409
400config CPU_THUMBONLY 410config CPU_THUMBONLY
401 bool 411 bool
402 # There are no CPUs available with MMU that don't implement an ARM ISA: 412 # There are no CPUs available with MMU that don't implement an ARM ISA:
@@ -441,6 +451,9 @@ config CPU_32v6K
441config CPU_32v7 451config CPU_32v7
442 bool 452 bool
443 453
454config CPU_32v7M
455 bool
456
444# The abort model 457# The abort model
445config CPU_ABRT_NOMMU 458config CPU_ABRT_NOMMU
446 bool 459 bool
@@ -491,6 +504,9 @@ config CPU_CACHE_V6
491config CPU_CACHE_V7 504config CPU_CACHE_V7
492 bool 505 bool
493 506
507config CPU_CACHE_NOP
508 bool
509
494config CPU_CACHE_VIVT 510config CPU_CACHE_VIVT
495 bool 511 bool
496 512
@@ -613,7 +629,11 @@ config ARCH_DMA_ADDR_T_64BIT
613 629
614config ARM_THUMB 630config ARM_THUMB
615 bool "Support Thumb user binaries" if !CPU_THUMBONLY 631 bool "Support Thumb user binaries" if !CPU_THUMBONLY
616 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON 632 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || \
633 CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || \
634 CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \
635 CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || \
636 CPU_V7 || CPU_FEROCEON || CPU_V7M
617 default y 637 default y
618 help 638 help
619 Say Y if you want to include kernel support for running user space 639 Say Y if you want to include kernel support for running user space
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 9e51be96f635..ecfe6e53f6e0 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o
16 16
17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o 17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
18obj-$(CONFIG_HIGHMEM) += highmem.o 18obj-$(CONFIG_HIGHMEM) += highmem.o
19obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
19 20
20obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o 21obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
21obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o 22obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
39obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o 40obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
40obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o 41obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
41obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o 42obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
43obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o
42 44
43AFLAGS_cache-v6.o :=-Wa,-march=armv6 45AFLAGS_cache-v6.o :=-Wa,-march=armv6
44AFLAGS_cache-v7.o :=-Wa,-march=armv7-a 46AFLAGS_cache-v7.o :=-Wa,-march=armv7-a
@@ -87,6 +89,7 @@ obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
87obj-$(CONFIG_CPU_V6) += proc-v6.o 89obj-$(CONFIG_CPU_V6) += proc-v6.o
88obj-$(CONFIG_CPU_V6K) += proc-v6.o 90obj-$(CONFIG_CPU_V6K) += proc-v6.o
89obj-$(CONFIG_CPU_V7) += proc-v7.o 91obj-$(CONFIG_CPU_V7) += proc-v7.o
92obj-$(CONFIG_CPU_V7M) += proc-v7m.o
90 93
91AFLAGS_proc-v6.o :=-Wa,-march=armv6 94AFLAGS_proc-v6.o :=-Wa,-march=armv6
92AFLAGS_proc-v7.o :=-Wa,-march=armv7-a 95AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
diff --git a/arch/arm/mm/cache-nop.S b/arch/arm/mm/cache-nop.S
new file mode 100644
index 000000000000..8e12ddca0031
--- /dev/null
+++ b/arch/arm/mm/cache-nop.S
@@ -0,0 +1,50 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6#include <linux/linkage.h>
7#include <linux/init.h>
8
9#include "proc-macros.S"
10
11ENTRY(nop_flush_icache_all)
12 mov pc, lr
13ENDPROC(nop_flush_icache_all)
14
15 .globl nop_flush_kern_cache_all
16 .equ nop_flush_kern_cache_all, nop_flush_icache_all
17
18 .globl nop_flush_kern_cache_louis
19 .equ nop_flush_kern_cache_louis, nop_flush_icache_all
20
21 .globl nop_flush_user_cache_all
22 .equ nop_flush_user_cache_all, nop_flush_icache_all
23
24 .globl nop_flush_user_cache_range
25 .equ nop_flush_user_cache_range, nop_flush_icache_all
26
27 .globl nop_coherent_kern_range
28 .equ nop_coherent_kern_range, nop_flush_icache_all
29
30ENTRY(nop_coherent_user_range)
31 mov r0, 0
32 mov pc, lr
33ENDPROC(nop_coherent_user_range)
34
35 .globl nop_flush_kern_dcache_area
36 .equ nop_flush_kern_dcache_area, nop_flush_icache_all
37
38 .globl nop_dma_flush_range
39 .equ nop_dma_flush_range, nop_flush_icache_all
40
41 .globl nop_dma_map_area
42 .equ nop_dma_map_area, nop_flush_icache_all
43
44 .globl nop_dma_unmap_area
45 .equ nop_dma_unmap_area, nop_flush_icache_all
46
47 __INITDATA
48
49 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
50 define_cache_functions nop
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index eeab06ebd06e..b55b1015724b 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -20,6 +20,7 @@
20#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
21#include <asm/thread_notify.h> 21#include <asm/thread_notify.h>
22#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
23#include <asm/proc-fns.h>
23 24
24/* 25/*
25 * On ARMv6, we have the following structure in the Context ID: 26 * On ARMv6, we have the following structure in the Context ID:
@@ -79,17 +80,11 @@ void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
79#ifdef CONFIG_ARM_LPAE 80#ifdef CONFIG_ARM_LPAE
80static void cpu_set_reserved_ttbr0(void) 81static void cpu_set_reserved_ttbr0(void)
81{ 82{
82 unsigned long ttbl = __pa(swapper_pg_dir);
83 unsigned long ttbh = 0;
84
85 /* 83 /*
86 * Set TTBR0 to swapper_pg_dir which contains only global entries. The 84 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
87 * ASID is set to 0. 85 * ASID is set to 0.
88 */ 86 */
89 asm volatile( 87 cpu_set_ttbr(0, __pa(swapper_pg_dir));
90 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n"
91 :
92 : "r" (ttbl), "r" (ttbh));
93 isb(); 88 isb();
94} 89}
95#else 90#else
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c038ec0738ac..7ec02961dfa0 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -250,7 +250,7 @@ static void __dma_free_buffer(struct page *page, size_t size)
250 250
251#ifdef CONFIG_MMU 251#ifdef CONFIG_MMU
252#ifdef CONFIG_HUGETLB_PAGE 252#ifdef CONFIG_HUGETLB_PAGE
253#error ARM Coherent DMA allocator does not (yet) support huge TLB 253#warning ARM Coherent DMA allocator does not (yet) support huge TLB
254#endif 254#endif
255 255
256static void *__alloc_from_contiguous(struct device *dev, size_t size, 256static void *__alloc_from_contiguous(struct device *dev, size_t size,
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5dbf13f954f6..c97f7940cb95 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -491,12 +491,14 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
491 * Some section permission faults need to be handled gracefully. 491 * Some section permission faults need to be handled gracefully.
492 * They can happen due to a __{get,put}_user during an oops. 492 * They can happen due to a __{get,put}_user during an oops.
493 */ 493 */
494#ifndef CONFIG_ARM_LPAE
494static int 495static int
495do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 496do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
496{ 497{
497 do_bad_area(addr, fsr, regs); 498 do_bad_area(addr, fsr, regs);
498 return 0; 499 return 0;
499} 500}
501#endif /* CONFIG_ARM_LPAE */
500 502
501/* 503/*
502 * This abort handler always returns "fault". 504 * This abort handler always returns "fault".
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e4ac5d8278e1..6d5ba9afb16a 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -17,6 +17,7 @@
17#include <asm/highmem.h> 17#include <asm/highmem.h>
18#include <asm/smp_plat.h> 18#include <asm/smp_plat.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20#include <linux/hugetlb.h>
20 21
21#include "mm.h" 22#include "mm.h"
22 23
@@ -168,19 +169,23 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
168 * coherent with the kernels mapping. 169 * coherent with the kernels mapping.
169 */ 170 */
170 if (!PageHighMem(page)) { 171 if (!PageHighMem(page)) {
171 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 172 size_t page_size = PAGE_SIZE << compound_order(page);
173 __cpuc_flush_dcache_area(page_address(page), page_size);
172 } else { 174 } else {
173 void *addr; 175 unsigned long i;
174
175 if (cache_is_vipt_nonaliasing()) { 176 if (cache_is_vipt_nonaliasing()) {
176 addr = kmap_atomic(page); 177 for (i = 0; i < (1 << compound_order(page)); i++) {
177 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 178 void *addr = kmap_atomic(page);
178 kunmap_atomic(addr);
179 } else {
180 addr = kmap_high_get(page);
181 if (addr) {
182 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 179 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
183 kunmap_high(page); 180 kunmap_atomic(addr);
181 }
182 } else {
183 for (i = 0; i < (1 << compound_order(page)); i++) {
184 void *addr = kmap_high_get(page);
185 if (addr) {
186 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
187 kunmap_high(page);
188 }
184 } 189 }
185 } 190 }
186 } 191 }
diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c
index 05a4e9431836..ab4409a2307e 100644
--- a/arch/arm/mm/fsr-3level.c
+++ b/arch/arm/mm/fsr-3level.c
@@ -9,11 +9,11 @@ static struct fsr_info fsr_info[] = {
9 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, 9 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
10 { do_bad, SIGBUS, 0, "reserved access flag fault" }, 10 { do_bad, SIGBUS, 0, "reserved access flag fault" },
11 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, 11 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
12 { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 12 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
13 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, 13 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
14 { do_bad, SIGBUS, 0, "reserved permission fault" }, 14 { do_bad, SIGBUS, 0, "reserved permission fault" },
15 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, 15 { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
16 { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, 16 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
17 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, 17 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
18 { do_bad, SIGBUS, 0, "synchronous external abort" }, 18 { do_bad, SIGBUS, 0, "synchronous external abort" },
19 { do_bad, SIGBUS, 0, "asynchronous external abort" }, 19 { do_bad, SIGBUS, 0, "asynchronous external abort" },
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
new file mode 100644
index 000000000000..3d1e4a205b0b
--- /dev/null
+++ b/arch/arm/mm/hugetlbpage.c
@@ -0,0 +1,101 @@
1/*
2 * arch/arm/mm/hugetlbpage.c
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/init.h>
23#include <linux/fs.h>
24#include <linux/mm.h>
25#include <linux/hugetlb.h>
26#include <linux/pagemap.h>
27#include <linux/err.h>
28#include <linux/sysctl.h>
29#include <asm/mman.h>
30#include <asm/tlb.h>
31#include <asm/tlbflush.h>
32#include <asm/pgalloc.h>
33
34/*
35 * On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot
36 * of type casting from pmd_t * to pte_t *.
37 */
38
39pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
40{
41 pgd_t *pgd;
42 pud_t *pud;
43 pmd_t *pmd = NULL;
44
45 pgd = pgd_offset(mm, addr);
46 if (pgd_present(*pgd)) {
47 pud = pud_offset(pgd, addr);
48 if (pud_present(*pud))
49 pmd = pmd_offset(pud, addr);
50 }
51
52 return (pte_t *)pmd;
53}
54
55struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
56 int write)
57{
58 return ERR_PTR(-EINVAL);
59}
60
61int pud_huge(pud_t pud)
62{
63 return 0;
64}
65
66int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
67{
68 return 0;
69}
70
71pte_t *huge_pte_alloc(struct mm_struct *mm,
72 unsigned long addr, unsigned long sz)
73{
74 pgd_t *pgd;
75 pud_t *pud;
76 pte_t *pte = NULL;
77
78 pgd = pgd_offset(mm, addr);
79 pud = pud_alloc(mm, pgd, addr);
80 if (pud)
81 pte = (pte_t *)pmd_alloc(mm, pud, addr);
82
83 return pte;
84}
85
86struct page *
87follow_huge_pmd(struct mm_struct *mm, unsigned long address,
88 pmd_t *pmd, int write)
89{
90 struct page *page;
91
92 page = pte_page(*(pte_t *)pmd);
93 if (page)
94 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
95 return page;
96}
97
98int pmd_huge(pmd_t pmd)
99{
100 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
101}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9a5cdc01fcdf..2ffee02d1d5c 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -36,12 +36,13 @@
36 36
37#include "mm.h" 37#include "mm.h"
38 38
39static unsigned long phys_initrd_start __initdata = 0; 39static phys_addr_t phys_initrd_start __initdata = 0;
40static unsigned long phys_initrd_size __initdata = 0; 40static unsigned long phys_initrd_size __initdata = 0;
41 41
42static int __init early_initrd(char *p) 42static int __init early_initrd(char *p)
43{ 43{
44 unsigned long start, size; 44 phys_addr_t start;
45 unsigned long size;
45 char *endp; 46 char *endp;
46 47
47 start = memparse(p, &endp); 48 start = memparse(p, &endp);
@@ -350,14 +351,14 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
350#ifdef CONFIG_BLK_DEV_INITRD 351#ifdef CONFIG_BLK_DEV_INITRD
351 if (phys_initrd_size && 352 if (phys_initrd_size &&
352 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { 353 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
353 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", 354 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
354 phys_initrd_start, phys_initrd_size); 355 (u64)phys_initrd_start, phys_initrd_size);
355 phys_initrd_start = phys_initrd_size = 0; 356 phys_initrd_start = phys_initrd_size = 0;
356 } 357 }
357 if (phys_initrd_size && 358 if (phys_initrd_size &&
358 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { 359 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
359 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", 360 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
360 phys_initrd_start, phys_initrd_size); 361 (u64)phys_initrd_start, phys_initrd_size);
361 phys_initrd_start = phys_initrd_size = 0; 362 phys_initrd_start = phys_initrd_size = 0;
362 } 363 }
363 if (phys_initrd_size) { 364 if (phys_initrd_size) {
@@ -442,7 +443,7 @@ static inline void
442free_memmap(unsigned long start_pfn, unsigned long end_pfn) 443free_memmap(unsigned long start_pfn, unsigned long end_pfn)
443{ 444{
444 struct page *start_pg, *end_pg; 445 struct page *start_pg, *end_pg;
445 unsigned long pg, pgend; 446 phys_addr_t pg, pgend;
446 447
447 /* 448 /*
448 * Convert start_pfn/end_pfn to a struct page pointer. 449 * Convert start_pfn/end_pfn to a struct page pointer.
@@ -454,8 +455,8 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
454 * Convert to physical addresses, and 455 * Convert to physical addresses, and
455 * round start upwards and end downwards. 456 * round start upwards and end downwards.
456 */ 457 */
457 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); 458 pg = PAGE_ALIGN(__pa(start_pg));
458 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; 459 pgend = __pa(end_pg) & PAGE_MASK;
459 460
460 /* 461 /*
461 * If there are free pages between these, 462 * If there are free pages between these,
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4d409e6a552d..01c03366c45e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -675,7 +675,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
675} 675}
676 676
677static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, 677static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
678 unsigned long end, unsigned long phys, const struct mem_type *type) 678 unsigned long end, phys_addr_t phys,
679 const struct mem_type *type)
679{ 680{
680 pud_t *pud = pud_offset(pgd, addr); 681 pud_t *pud = pud_offset(pgd, addr);
681 unsigned long next; 682 unsigned long next;
@@ -989,27 +990,28 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
989void __init sanity_check_meminfo(void) 990void __init sanity_check_meminfo(void)
990{ 991{
991 int i, j, highmem = 0; 992 int i, j, highmem = 0;
993 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
992 994
993 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 995 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
994 struct membank *bank = &meminfo.bank[j]; 996 struct membank *bank = &meminfo.bank[j];
995 *bank = meminfo.bank[i]; 997 phys_addr_t size_limit;
996 998
997 if (bank->start > ULONG_MAX) 999 *bank = meminfo.bank[i];
998 highmem = 1; 1000 size_limit = bank->size;
999 1001
1000#ifdef CONFIG_HIGHMEM 1002 if (bank->start >= vmalloc_limit)
1001 if (__va(bank->start) >= vmalloc_min ||
1002 __va(bank->start) < (void *)PAGE_OFFSET)
1003 highmem = 1; 1003 highmem = 1;
1004 else
1005 size_limit = vmalloc_limit - bank->start;
1004 1006
1005 bank->highmem = highmem; 1007 bank->highmem = highmem;
1006 1008
1009#ifdef CONFIG_HIGHMEM
1007 /* 1010 /*
1008 * Split those memory banks which are partially overlapping 1011 * Split those memory banks which are partially overlapping
1009 * the vmalloc area greatly simplifying things later. 1012 * the vmalloc area greatly simplifying things later.
1010 */ 1013 */
1011 if (!highmem && __va(bank->start) < vmalloc_min && 1014 if (!highmem && bank->size > size_limit) {
1012 bank->size > vmalloc_min - __va(bank->start)) {
1013 if (meminfo.nr_banks >= NR_BANKS) { 1015 if (meminfo.nr_banks >= NR_BANKS) {
1014 printk(KERN_CRIT "NR_BANKS too low, " 1016 printk(KERN_CRIT "NR_BANKS too low, "
1015 "ignoring high memory\n"); 1017 "ignoring high memory\n");
@@ -1018,16 +1020,14 @@ void __init sanity_check_meminfo(void)
1018 (meminfo.nr_banks - i) * sizeof(*bank)); 1020 (meminfo.nr_banks - i) * sizeof(*bank));
1019 meminfo.nr_banks++; 1021 meminfo.nr_banks++;
1020 i++; 1022 i++;
1021 bank[1].size -= vmalloc_min - __va(bank->start); 1023 bank[1].size -= size_limit;
1022 bank[1].start = __pa(vmalloc_min - 1) + 1; 1024 bank[1].start = vmalloc_limit;
1023 bank[1].highmem = highmem = 1; 1025 bank[1].highmem = highmem = 1;
1024 j++; 1026 j++;
1025 } 1027 }
1026 bank->size = vmalloc_min - __va(bank->start); 1028 bank->size = size_limit;
1027 } 1029 }
1028#else 1030#else
1029 bank->highmem = highmem;
1030
1031 /* 1031 /*
1032 * Highmem banks not allowed with !CONFIG_HIGHMEM. 1032 * Highmem banks not allowed with !CONFIG_HIGHMEM.
1033 */ 1033 */
@@ -1040,31 +1040,16 @@ void __init sanity_check_meminfo(void)
1040 } 1040 }
1041 1041
1042 /* 1042 /*
1043 * Check whether this memory bank would entirely overlap
1044 * the vmalloc area.
1045 */
1046 if (__va(bank->start) >= vmalloc_min ||
1047 __va(bank->start) < (void *)PAGE_OFFSET) {
1048 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1049 "(vmalloc region overlap).\n",
1050 (unsigned long long)bank->start,
1051 (unsigned long long)bank->start + bank->size - 1);
1052 continue;
1053 }
1054
1055 /*
1056 * Check whether this memory bank would partially overlap 1043 * Check whether this memory bank would partially overlap
1057 * the vmalloc area. 1044 * the vmalloc area.
1058 */ 1045 */
1059 if (__va(bank->start + bank->size - 1) >= vmalloc_min || 1046 if (bank->size > size_limit) {
1060 __va(bank->start + bank->size - 1) <= __va(bank->start)) {
1061 unsigned long newsize = vmalloc_min - __va(bank->start);
1062 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " 1047 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
1063 "to -%.8llx (vmalloc region overlap).\n", 1048 "to -%.8llx (vmalloc region overlap).\n",
1064 (unsigned long long)bank->start, 1049 (unsigned long long)bank->start,
1065 (unsigned long long)bank->start + bank->size - 1, 1050 (unsigned long long)bank->start + bank->size - 1,
1066 (unsigned long long)bank->start + newsize - 1); 1051 (unsigned long long)bank->start + size_limit - 1);
1067 bank->size = newsize; 1052 bank->size = size_limit;
1068 } 1053 }
1069#endif 1054#endif
1070 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) 1055 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 7fe0524a5449..1fa50100ab6a 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -8,6 +8,7 @@
8#include <linux/pagemap.h> 8#include <linux/pagemap.h>
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/memblock.h> 10#include <linux/memblock.h>
11#include <linux/kernel.h>
11 12
12#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
13#include <asm/sections.h> 14#include <asm/sections.h>
@@ -15,22 +16,282 @@
15#include <asm/setup.h> 16#include <asm/setup.h>
16#include <asm/traps.h> 17#include <asm/traps.h>
17#include <asm/mach/arch.h> 18#include <asm/mach/arch.h>
19#include <asm/cputype.h>
20#include <asm/mpu.h>
18 21
19#include "mm.h" 22#include "mm.h"
20 23
24#ifdef CONFIG_ARM_MPU
25struct mpu_rgn_info mpu_rgn_info;
26
27/* Region number */
28static void rgnr_write(u32 v)
29{
30 asm("mcr p15, 0, %0, c6, c2, 0" : : "r" (v));
31}
32
33/* Data-side / unified region attributes */
34
35/* Region access control register */
36static void dracr_write(u32 v)
37{
38 asm("mcr p15, 0, %0, c6, c1, 4" : : "r" (v));
39}
40
41/* Region size register */
42static void drsr_write(u32 v)
43{
44 asm("mcr p15, 0, %0, c6, c1, 2" : : "r" (v));
45}
46
47/* Region base address register */
48static void drbar_write(u32 v)
49{
50 asm("mcr p15, 0, %0, c6, c1, 0" : : "r" (v));
51}
52
53static u32 drbar_read(void)
54{
55 u32 v;
56 asm("mrc p15, 0, %0, c6, c1, 0" : "=r" (v));
57 return v;
58}
59/* Optional instruction-side region attributes */
60
61/* I-side Region access control register */
62static void iracr_write(u32 v)
63{
64 asm("mcr p15, 0, %0, c6, c1, 5" : : "r" (v));
65}
66
67/* I-side Region size register */
68static void irsr_write(u32 v)
69{
70 asm("mcr p15, 0, %0, c6, c1, 3" : : "r" (v));
71}
72
73/* I-side Region base address register */
74static void irbar_write(u32 v)
75{
76 asm("mcr p15, 0, %0, c6, c1, 1" : : "r" (v));
77}
78
79static unsigned long irbar_read(void)
80{
81 unsigned long v;
82 asm("mrc p15, 0, %0, c6, c1, 1" : "=r" (v));
83 return v;
84}
85
86/* MPU initialisation functions */
87void __init sanity_check_meminfo_mpu(void)
88{
89 int i;
90 struct membank *bank = meminfo.bank;
91 phys_addr_t phys_offset = PHYS_OFFSET;
92 phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
93
94 /* Initially only use memory continuous from PHYS_OFFSET */
95 if (bank_phys_start(&bank[0]) != phys_offset)
96 panic("First memory bank must be contiguous from PHYS_OFFSET");
97
98 /* Banks have already been sorted by start address */
99 for (i = 1; i < meminfo.nr_banks; i++) {
100 if (bank[i].start <= bank_phys_end(&bank[0]) &&
101 bank_phys_end(&bank[i]) > bank_phys_end(&bank[0])) {
102 bank[0].size = bank_phys_end(&bank[i]) - bank[0].start;
103 } else {
104 pr_notice("Ignoring RAM after 0x%.8lx. "
105 "First non-contiguous (ignored) bank start: 0x%.8lx\n",
106 (unsigned long)bank_phys_end(&bank[0]),
107 (unsigned long)bank_phys_start(&bank[i]));
108 break;
109 }
110 }
111 /* All contiguous banks are now merged in to the first bank */
112 meminfo.nr_banks = 1;
113 specified_mem_size = bank[0].size;
114
115 /*
116 * MPU has curious alignment requirements: Size must be power of 2, and
117 * region start must be aligned to the region size
118 */
119 if (phys_offset != 0)
120 pr_info("PHYS_OFFSET != 0 => MPU Region size constrained by alignment requirements\n");
121
122 /*
123 * Maximum aligned region might overflow phys_addr_t if phys_offset is
124 * 0. Hence we keep everything below 4G until we take the smaller of
125 * the aligned_region_size and rounded_mem_size, one of which is
126 * guaranteed to be smaller than the maximum physical address.
127 */
128 aligned_region_size = (phys_offset - 1) ^ (phys_offset);
129 /* Find the max power-of-two sized region that fits inside our bank */
130 rounded_mem_size = (1 << __fls(bank[0].size)) - 1;
131
132 /* The actual region size is the smaller of the two */
133 aligned_region_size = aligned_region_size < rounded_mem_size
134 ? aligned_region_size + 1
135 : rounded_mem_size + 1;
136
137 if (aligned_region_size != specified_mem_size)
138 pr_warn("Truncating memory from 0x%.8lx to 0x%.8lx (MPU region constraints)",
139 (unsigned long)specified_mem_size,
140 (unsigned long)aligned_region_size);
141
142 meminfo.bank[0].size = aligned_region_size;
143 pr_debug("MPU Region from 0x%.8lx size 0x%.8lx (end 0x%.8lx))\n",
144 (unsigned long)phys_offset,
145 (unsigned long)aligned_region_size,
146 (unsigned long)bank_phys_end(&bank[0]));
147
148}
149
150static int mpu_present(void)
151{
152 return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
153}
154
155static int mpu_max_regions(void)
156{
157 /*
158 * We don't support a different number of I/D side regions so if we
159 * have separate instruction and data memory maps then return
160 * whichever side has a smaller number of supported regions.
161 */
162 u32 dregions, iregions, mpuir;
163 mpuir = read_cpuid(CPUID_MPUIR);
164
165 dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
166
167 /* Check for separate d-side and i-side memory maps */
168 if (mpuir & MPUIR_nU)
169 iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
170
171 /* Use the smallest of the two maxima */
172 return min(dregions, iregions);
173}
174
175static int mpu_iside_independent(void)
176{
177 /* MPUIR.nU specifies whether there is *not* a unified memory map */
178 return read_cpuid(CPUID_MPUIR) & MPUIR_nU;
179}
180
181static int mpu_min_region_order(void)
182{
183 u32 drbar_result, irbar_result;
184 /* We've kept a region free for this probing */
185 rgnr_write(MPU_PROBE_REGION);
186 isb();
187 /*
188 * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
189 * region order
190 */
191 drbar_write(0xFFFFFFFC);
192 drbar_result = irbar_result = drbar_read();
193 drbar_write(0x0);
194 /* If the MPU is non-unified, we use the larger of the two minima*/
195 if (mpu_iside_independent()) {
196 irbar_write(0xFFFFFFFC);
197 irbar_result = irbar_read();
198 irbar_write(0x0);
199 }
200 isb(); /* Ensure that MPU region operations have completed */
201 /* Return whichever result is larger */
202 return __ffs(max(drbar_result, irbar_result));
203}
204
205static int mpu_setup_region(unsigned int number, phys_addr_t start,
206 unsigned int size_order, unsigned int properties)
207{
208 u32 size_data;
209
210 /* We kept a region free for probing resolution of MPU regions*/
211 if (number > mpu_max_regions() || number == MPU_PROBE_REGION)
212 return -ENOENT;
213
214 if (size_order > 32)
215 return -ENOMEM;
216
217 if (size_order < mpu_min_region_order())
218 return -ENOMEM;
219
220 /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
221 size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN;
222
223 dsb(); /* Ensure all previous data accesses occur with old mappings */
224 rgnr_write(number);
225 isb();
226 drbar_write(start);
227 dracr_write(properties);
228 isb(); /* Propagate properties before enabling region */
229 drsr_write(size_data);
230
231 /* Check for independent I-side registers */
232 if (mpu_iside_independent()) {
233 irbar_write(start);
234 iracr_write(properties);
235 isb();
236 irsr_write(size_data);
237 }
238 isb();
239
240 /* Store region info (we treat i/d side the same, so only store d) */
241 mpu_rgn_info.rgns[number].dracr = properties;
242 mpu_rgn_info.rgns[number].drbar = start;
243 mpu_rgn_info.rgns[number].drsr = size_data;
244 return 0;
245}
246
247/*
248* Set up default MPU regions, doing nothing if there is no MPU
249*/
250void __init mpu_setup(void)
251{
252 int region_err;
253 if (!mpu_present())
254 return;
255
256 region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET,
257 ilog2(meminfo.bank[0].size),
258 MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
259 if (region_err) {
260 panic("MPU region initialization failure! %d", region_err);
261 } else {
262 pr_info("Using ARMv7 PMSA Compliant MPU. "
263 "Region independence: %s, Max regions: %d\n",
264 mpu_iside_independent() ? "Yes" : "No",
265 mpu_max_regions());
266 }
267}
268#else
269static void sanity_check_meminfo_mpu(void) {}
270static void __init mpu_setup(void) {}
271#endif /* CONFIG_ARM_MPU */
272
21void __init arm_mm_memblock_reserve(void) 273void __init arm_mm_memblock_reserve(void)
22{ 274{
275#ifndef CONFIG_CPU_V7M
23 /* 276 /*
24 * Register the exception vector page. 277 * Register the exception vector page.
25 * some architectures which the DRAM is the exception vector to trap, 278 * some architectures which the DRAM is the exception vector to trap,
26 * alloc_page breaks with error, although it is not NULL, but "0." 279 * alloc_page breaks with error, although it is not NULL, but "0."
27 */ 280 */
28 memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); 281 memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
282#else /* ifndef CONFIG_CPU_V7M */
283 /*
284 * There is no dedicated vector page on V7-M. So nothing needs to be
285 * reserved here.
286 */
287#endif
29} 288}
30 289
31void __init sanity_check_meminfo(void) 290void __init sanity_check_meminfo(void)
32{ 291{
33 phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); 292 phys_addr_t end;
293 sanity_check_meminfo_mpu();
294 end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]);
34 high_memory = __va(end - 1) + 1; 295 high_memory = __va(end - 1) + 1;
35} 296}
36 297
@@ -41,6 +302,7 @@ void __init sanity_check_meminfo(void)
41void __init paging_init(struct machine_desc *mdesc) 302void __init paging_init(struct machine_desc *mdesc)
42{ 303{
43 early_trap_init((void *)CONFIG_VECTORS_BASE); 304 early_trap_init((void *)CONFIG_VECTORS_BASE);
305 mpu_setup();
44 bootmem_init(); 306 bootmem_init();
45} 307}
46 308
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 919405e20b80..2d1ef87328a1 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -140,8 +140,10 @@ ENTRY(cpu_v6_set_pte_ext)
140ENTRY(cpu_v6_do_suspend) 140ENTRY(cpu_v6_do_suspend)
141 stmfd sp!, {r4 - r9, lr} 141 stmfd sp!, {r4 - r9, lr}
142 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 142 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
143#ifdef CONFIG_MMU
143 mrc p15, 0, r5, c3, c0, 0 @ Domain ID 144 mrc p15, 0, r5, c3, c0, 0 @ Domain ID
144 mrc p15, 0, r6, c2, c0, 1 @ Translation table base 1 145 mrc p15, 0, r6, c2, c0, 1 @ Translation table base 1
146#endif
145 mrc p15, 0, r7, c1, c0, 1 @ auxiliary control register 147 mrc p15, 0, r7, c1, c0, 1 @ auxiliary control register
146 mrc p15, 0, r8, c1, c0, 2 @ co-processor access control 148 mrc p15, 0, r8, c1, c0, 2 @ co-processor access control
147 mrc p15, 0, r9, c1, c0, 0 @ control register 149 mrc p15, 0, r9, c1, c0, 0 @ control register
@@ -158,14 +160,16 @@ ENTRY(cpu_v6_do_resume)
158 mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID 160 mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID
159 ldmia r0, {r4 - r9} 161 ldmia r0, {r4 - r9}
160 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID 162 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
163#ifdef CONFIG_MMU
161 mcr p15, 0, r5, c3, c0, 0 @ Domain ID 164 mcr p15, 0, r5, c3, c0, 0 @ Domain ID
162 ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) 165 ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP)
163 ALT_UP(orr r1, r1, #TTB_FLAGS_UP) 166 ALT_UP(orr r1, r1, #TTB_FLAGS_UP)
164 mcr p15, 0, r1, c2, c0, 0 @ Translation table base 0 167 mcr p15, 0, r1, c2, c0, 0 @ Translation table base 0
165 mcr p15, 0, r6, c2, c0, 1 @ Translation table base 1 168 mcr p15, 0, r6, c2, c0, 1 @ Translation table base 1
169 mcr p15, 0, ip, c2, c0, 2 @ TTB control register
170#endif
166 mcr p15, 0, r7, c1, c0, 1 @ auxiliary control register 171 mcr p15, 0, r7, c1, c0, 1 @ auxiliary control register
167 mcr p15, 0, r8, c1, c0, 2 @ co-processor access control 172 mcr p15, 0, r8, c1, c0, 2 @ co-processor access control
168 mcr p15, 0, ip, c2, c0, 2 @ TTB control register
169 mcr p15, 0, ip, c7, c5, 4 @ ISB 173 mcr p15, 0, ip, c7, c5, 4 @ ISB
170 mov r0, r9 @ control register 174 mov r0, r9 @ control register
171 b cpu_resume_mmu 175 b cpu_resume_mmu
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 363027e811d6..5ffe1956c6d9 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -39,6 +39,14 @@
39#define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) 39#define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA)
40#define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) 40#define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S)
41 41
42#ifndef __ARMEB__
43# define rpgdl r0
44# define rpgdh r1
45#else
46# define rpgdl r1
47# define rpgdh r0
48#endif
49
42/* 50/*
43 * cpu_v7_switch_mm(pgd_phys, tsk) 51 * cpu_v7_switch_mm(pgd_phys, tsk)
44 * 52 *
@@ -47,10 +55,10 @@
47 */ 55 */
48ENTRY(cpu_v7_switch_mm) 56ENTRY(cpu_v7_switch_mm)
49#ifdef CONFIG_MMU 57#ifdef CONFIG_MMU
50 mmid r1, r1 @ get mm->context.id 58 mmid r2, r2
51 asid r3, r1 59 asid r2, r2
52 mov r3, r3, lsl #(48 - 32) @ ASID 60 orr rpgdh, rpgdh, r2, lsl #(48 - 32) @ upper 32-bits of pgd
53 mcrr p15, 0, r0, r3, c2 @ set TTB 0 61 mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0
54 isb 62 isb
55#endif 63#endif
56 mov pc, lr 64 mov pc, lr
@@ -106,7 +114,8 @@ ENDPROC(cpu_v7_set_pte_ext)
106 */ 114 */
107 .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp 115 .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
108 ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address 116 ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
109 cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below) 117 mov \tmp, \tmp, lsr #ARCH_PGD_SHIFT
118 cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET?
110 mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register 119 mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register
111 orr \tmp, \tmp, #TTB_EAE 120 orr \tmp, \tmp, #TTB_EAE
112 ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) 121 ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
@@ -114,27 +123,21 @@ ENDPROC(cpu_v7_set_pte_ext)
114 ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) 123 ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16)
115 ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) 124 ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16)
116 /* 125 /*
117 * TTBR0/TTBR1 split (PAGE_OFFSET): 126 * Only use split TTBRs if PHYS_OFFSET <= PAGE_OFFSET (cmp above),
118 * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) 127 * otherwise booting secondary CPUs would end up using TTBR1 for the
119 * 0x80000000: T0SZ = 0, T1SZ = 1 128 * identity mapping set up in TTBR0.
120 * 0xc0000000: T0SZ = 0, T1SZ = 2
121 *
122 * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
123 * booting secondary CPUs would end up using TTBR1 for the identity
124 * mapping set up in TTBR0.
125 */ 129 */
126 bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET? 130 orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ
127 orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ 131 mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR
128#if defined CONFIG_VMSPLIT_2G 132 mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
129 /* PAGE_OFFSET == 0x80000000, T1SZ == 1 */ 133 mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits
130 add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries 134 addls \ttbr1, \ttbr1, #TTBR1_OFFSET
131#elif defined CONFIG_VMSPLIT_3G 135 mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1
132 /* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */ 136 mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
133 add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd 137 mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits
134#endif 138 mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0
135 /* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */ 139 mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1
1369001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register 140 mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0
137 mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1
138 .endm 141 .endm
139 142
140 __CPUINIT 143 __CPUINIT
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index e35fec34453e..7ef3ad05df39 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -98,9 +98,11 @@ ENTRY(cpu_v7_do_suspend)
98 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 98 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
99 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID 99 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
100 stmia r0!, {r4 - r5} 100 stmia r0!, {r4 - r5}
101#ifdef CONFIG_MMU
101 mrc p15, 0, r6, c3, c0, 0 @ Domain ID 102 mrc p15, 0, r6, c3, c0, 0 @ Domain ID
102 mrc p15, 0, r7, c2, c0, 1 @ TTB 1 103 mrc p15, 0, r7, c2, c0, 1 @ TTB 1
103 mrc p15, 0, r11, c2, c0, 2 @ TTB control register 104 mrc p15, 0, r11, c2, c0, 2 @ TTB control register
105#endif
104 mrc p15, 0, r8, c1, c0, 0 @ Control register 106 mrc p15, 0, r8, c1, c0, 0 @ Control register
105 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register 107 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
106 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control 108 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
@@ -110,13 +112,14 @@ ENDPROC(cpu_v7_do_suspend)
110 112
111ENTRY(cpu_v7_do_resume) 113ENTRY(cpu_v7_do_resume)
112 mov ip, #0 114 mov ip, #0
113 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
114 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 115 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
115 mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID 116 mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID
116 ldmia r0!, {r4 - r5} 117 ldmia r0!, {r4 - r5}
117 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID 118 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
118 mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID 119 mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID
119 ldmia r0, {r6 - r11} 120 ldmia r0, {r6 - r11}
121#ifdef CONFIG_MMU
122 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
120 mcr p15, 0, r6, c3, c0, 0 @ Domain ID 123 mcr p15, 0, r6, c3, c0, 0 @ Domain ID
121#ifndef CONFIG_ARM_LPAE 124#ifndef CONFIG_ARM_LPAE
122 ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) 125 ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP)
@@ -125,14 +128,15 @@ ENTRY(cpu_v7_do_resume)
125 mcr p15, 0, r1, c2, c0, 0 @ TTB 0 128 mcr p15, 0, r1, c2, c0, 0 @ TTB 0
126 mcr p15, 0, r7, c2, c0, 1 @ TTB 1 129 mcr p15, 0, r7, c2, c0, 1 @ TTB 1
127 mcr p15, 0, r11, c2, c0, 2 @ TTB control register 130 mcr p15, 0, r11, c2, c0, 2 @ TTB control register
128 mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register
129 teq r4, r9 @ Is it already set?
130 mcrne p15, 0, r9, c1, c0, 1 @ No, so write it
131 mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control
132 ldr r4, =PRRR @ PRRR 131 ldr r4, =PRRR @ PRRR
133 ldr r5, =NMRR @ NMRR 132 ldr r5, =NMRR @ NMRR
134 mcr p15, 0, r4, c10, c2, 0 @ write PRRR 133 mcr p15, 0, r4, c10, c2, 0 @ write PRRR
135 mcr p15, 0, r5, c10, c2, 1 @ write NMRR 134 mcr p15, 0, r5, c10, c2, 1 @ write NMRR
135#endif /* CONFIG_MMU */
136 mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register
137 teq r4, r9 @ Is it already set?
138 mcrne p15, 0, r9, c1, c0, 1 @ No, so write it
139 mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control
136 isb 140 isb
137 dsb 141 dsb
138 mov r0, r8 @ control register 142 mov r0, r8 @ control register
@@ -178,7 +182,8 @@ ENDPROC(cpu_pj4b_do_idle)
178 */ 182 */
179__v7_ca5mp_setup: 183__v7_ca5mp_setup:
180__v7_ca9mp_setup: 184__v7_ca9mp_setup:
181 mov r10, #(1 << 0) @ TLB ops broadcasting 185__v7_cr7mp_setup:
186 mov r10, #(1 << 0) @ Cache/TLB ops broadcasting
182 b 1f 187 b 1f
183__v7_ca7mp_setup: 188__v7_ca7mp_setup:
184__v7_ca15mp_setup: 189__v7_ca15mp_setup:
@@ -443,6 +448,16 @@ __v7_pj4b_proc_info:
443#endif 448#endif
444 449
445 /* 450 /*
451 * ARM Ltd. Cortex R7 processor.
452 */
453 .type __v7_cr7mp_proc_info, #object
454__v7_cr7mp_proc_info:
455 .long 0x410fc170
456 .long 0xff0ffff0
457 __v7_proc __v7_cr7mp_setup
458 .size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info
459
460 /*
446 * ARM Ltd. Cortex A7 processor. 461 * ARM Ltd. Cortex A7 processor.
447 */ 462 */
448 .type __v7_ca7mp_proc_info, #object 463 .type __v7_ca7mp_proc_info, #object
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
new file mode 100644
index 000000000000..0c93588fcb91
--- /dev/null
+++ b/arch/arm/mm/proc-v7m.S
@@ -0,0 +1,157 @@
1/*
2 * linux/arch/arm/mm/proc-v7m.S
3 *
4 * Copyright (C) 2008 ARM Ltd.
5 * Copyright (C) 2001 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This is the "shell" of the ARMv7-M processor support.
12 */
13#include <linux/linkage.h>
14#include <asm/assembler.h>
15#include <asm/v7m.h>
16#include "proc-macros.S"
17
18ENTRY(cpu_v7m_proc_init)
19 mov pc, lr
20ENDPROC(cpu_v7m_proc_init)
21
22ENTRY(cpu_v7m_proc_fin)
23 mov pc, lr
24ENDPROC(cpu_v7m_proc_fin)
25
26/*
27 * cpu_v7m_reset(loc)
28 *
29 * Perform a soft reset of the system. Put the CPU into the
30 * same state as it would be if it had been reset, and branch
31 * to what would be the reset vector.
32 *
33 * - loc - location to jump to for soft reset
34 */
35 .align 5
36ENTRY(cpu_v7m_reset)
37 mov pc, r0
38ENDPROC(cpu_v7m_reset)
39
40/*
41 * cpu_v7m_do_idle()
42 *
43 * Idle the processor (eg, wait for interrupt).
44 *
45 * IRQs are already disabled.
46 */
47ENTRY(cpu_v7m_do_idle)
48 wfi
49 mov pc, lr
50ENDPROC(cpu_v7m_do_idle)
51
52ENTRY(cpu_v7m_dcache_clean_area)
53 mov pc, lr
54ENDPROC(cpu_v7m_dcache_clean_area)
55
56/*
57 * There is no MMU, so here is nothing to do.
58 */
59ENTRY(cpu_v7m_switch_mm)
60 mov pc, lr
61ENDPROC(cpu_v7m_switch_mm)
62
63.globl cpu_v7m_suspend_size
64.equ cpu_v7m_suspend_size, 0
65
66#ifdef CONFIG_ARM_CPU_SUSPEND
67ENTRY(cpu_v7m_do_suspend)
68 mov pc, lr
69ENDPROC(cpu_v7m_do_suspend)
70
71ENTRY(cpu_v7m_do_resume)
72 mov pc, lr
73ENDPROC(cpu_v7m_do_resume)
74#endif
75
76 .section ".text.init", #alloc, #execinstr
77
78/*
79 * __v7m_setup
80 *
81 * This should be able to cover all ARMv7-M cores.
82 */
83__v7m_setup:
84 @ Configure the vector table base address
85 ldr r0, =BASEADDR_V7M_SCB
86 ldr r12, =vector_table
87 str r12, [r0, V7M_SCB_VTOR]
88
89 @ enable UsageFault, BusFault and MemManage fault.
90 ldr r5, [r0, #V7M_SCB_SHCSR]
91 orr r5, #(V7M_SCB_SHCSR_USGFAULTENA | V7M_SCB_SHCSR_BUSFAULTENA | V7M_SCB_SHCSR_MEMFAULTENA)
92 str r5, [r0, #V7M_SCB_SHCSR]
93
94 @ Lower the priority of the SVC and PendSV exceptions
95 mov r5, #0x80000000
96 str r5, [r0, V7M_SCB_SHPR2] @ set SVC priority
97 mov r5, #0x00800000
98 str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority
99
100 @ SVC to run the kernel in this mode
101 adr r1, BSYM(1f)
102 ldr r5, [r12, #11 * 4] @ read the SVC vector entry
103 str r1, [r12, #11 * 4] @ write the temporary SVC vector entry
104 mov r6, lr @ save LR
105 mov r7, sp @ save SP
106 ldr sp, =__v7m_setup_stack_top
107 cpsie i
108 svc #0
1091: cpsid i
110 str r5, [r12, #11 * 4] @ restore the original SVC vector entry
111 mov lr, r6 @ restore LR
112 mov sp, r7 @ restore SP
113
114 @ Special-purpose control register
115 mov r1, #1
116 msr control, r1 @ Thread mode has unpriviledged access
117
118 @ Configure the System Control Register to ensure 8-byte stack alignment
119 @ Note the STKALIGN bit is either RW or RAO.
120 ldr r12, [r0, V7M_SCB_CCR] @ system control register
121 orr r12, #V7M_SCB_CCR_STKALIGN
122 str r12, [r0, V7M_SCB_CCR]
123 mov pc, lr
124ENDPROC(__v7m_setup)
125
126 define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
127
128 .section ".rodata"
129 string cpu_arch_name, "armv7m"
130 string cpu_elf_name "v7m"
131 string cpu_v7m_name "ARMv7-M"
132
133 .section ".proc.info.init", #alloc, #execinstr
134
135 /*
136 * Match any ARMv7-M processor core.
137 */
138 .type __v7m_proc_info, #object
139__v7m_proc_info:
140 .long 0x000f0000 @ Required ID value
141 .long 0x000f0000 @ Mask for ID
142 .long 0 @ proc_info_list.__cpu_mm_mmu_flags
143 .long 0 @ proc_info_list.__cpu_io_mmu_flags
144 b __v7m_setup @ proc_info_list.__cpu_flush
145 .long cpu_arch_name
146 .long cpu_elf_name
147 .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT
148 .long cpu_v7m_name
149 .long v7m_processor_functions @ proc_info_list.proc
150 .long 0 @ proc_info_list.tlb
151 .long 0 @ proc_info_list.user
152 .long nop_cache_fns @ proc_info_list.cache
153 .size __v7m_proc_info, . - __v7m_proc_info
154
155__v7m_setup_stack:
156 .space 4 * 8 @ 8 registers
157__v7m_setup_stack_top: