diff options
-rw-r--r-- | arch/arm/Kconfig | 4 | ||||
-rw-r--r-- | arch/arm/Kconfig-nommu | 2 | ||||
-rw-r--r-- | arch/arm/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/assembler.h | 17 | ||||
-rw-r--r-- | arch/arm/include/asm/cputype.h | 44 | ||||
-rw-r--r-- | arch/arm/include/asm/glue-cache.h | 27 | ||||
-rw-r--r-- | arch/arm/include/asm/glue-df.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/glue-proc.h | 9 | ||||
-rw-r--r-- | arch/arm/include/asm/irqflags.h | 22 | ||||
-rw-r--r-- | arch/arm/include/asm/ptrace.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/system_info.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/v7m.h | 44 | ||||
-rw-r--r-- | arch/arm/include/uapi/asm/ptrace.h | 35 | ||||
-rw-r--r-- | arch/arm/kernel/Makefile | 8 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/entry-header.S | 124 | ||||
-rw-r--r-- | arch/arm/kernel/entry-v7m.S | 143 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 10 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 17 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/Kconfig | 21 | ||||
-rw-r--r-- | arch/arm/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/mm/cache-nop.S | 50 | ||||
-rw-r--r-- | arch/arm/mm/nommu.c | 7 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7m.S | 157 |
25 files changed, 742 insertions, 27 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 49d993cee512..5543d36c2834 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -9,7 +9,7 @@ config ARM | |||
9 | select BUILDTIME_EXTABLE_SORT if MMU | 9 | select BUILDTIME_EXTABLE_SORT if MMU |
10 | select CPU_PM if (SUSPEND || CPU_IDLE) | 10 | select CPU_PM if (SUSPEND || CPU_IDLE) |
11 | select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN && MMU | 11 | select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN && MMU |
12 | select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) | 12 | select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI) |
13 | select GENERIC_CLOCKEVENTS_BROADCAST if SMP | 13 | select GENERIC_CLOCKEVENTS_BROADCAST if SMP |
14 | select GENERIC_IRQ_PROBE | 14 | select GENERIC_IRQ_PROBE |
15 | select GENERIC_IRQ_SHOW | 15 | select GENERIC_IRQ_SHOW |
@@ -1585,7 +1585,7 @@ config SCHED_HRTICK | |||
1585 | 1585 | ||
1586 | config THUMB2_KERNEL | 1586 | config THUMB2_KERNEL |
1587 | bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY | 1587 | bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY |
1588 | depends on CPU_V7 && !CPU_V6 && !CPU_V6K | 1588 | depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K |
1589 | default y if CPU_THUMBONLY | 1589 | default y if CPU_THUMBONLY |
1590 | select AEABI | 1590 | select AEABI |
1591 | select ARM_ASM_UNIFIED | 1591 | select ARM_ASM_UNIFIED |
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu index 2cef8e13f9f8..c859495da480 100644 --- a/arch/arm/Kconfig-nommu +++ b/arch/arm/Kconfig-nommu | |||
@@ -28,7 +28,7 @@ config FLASH_SIZE | |||
28 | config PROCESSOR_ID | 28 | config PROCESSOR_ID |
29 | hex 'Hard wire the processor ID' | 29 | hex 'Hard wire the processor ID' |
30 | default 0x00007700 | 30 | default 0x00007700 |
31 | depends on !CPU_CP15 | 31 | depends on !(CPU_CP15 || CPU_V7M) |
32 | help | 32 | help |
33 | If processor has no CP15 register, this processor ID is | 33 | If processor has no CP15 register, this processor ID is |
34 | used instead of the auto-probing which utilizes the register. | 34 | used instead of the auto-probing which utilizes the register. |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 1ba358ba16b8..3380c4f51757 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -59,6 +59,7 @@ comma = , | |||
59 | # Note that GCC does not numerically define an architecture version | 59 | # Note that GCC does not numerically define an architecture version |
60 | # macro, but instead defines a whole series of macros which makes | 60 | # macro, but instead defines a whole series of macros which makes |
61 | # testing for a specific architecture or later rather impossible. | 61 | # testing for a specific architecture or later rather impossible. |
62 | arch-$(CONFIG_CPU_32v7M) :=-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m | ||
62 | arch-$(CONFIG_CPU_32v7) :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) | 63 | arch-$(CONFIG_CPU_32v7) :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) |
63 | arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) | 64 | arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) |
64 | # Only override the compiler option if ARMv6. The ARMv6K extensions are | 65 | # Only override the compiler option if ARMv6. The ARMv6K extensions are |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 05ee9eebad6b..a5fef710af32 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -136,7 +136,11 @@ | |||
136 | * assumes FIQs are enabled, and that the processor is in SVC mode. | 136 | * assumes FIQs are enabled, and that the processor is in SVC mode. |
137 | */ | 137 | */ |
138 | .macro save_and_disable_irqs, oldcpsr | 138 | .macro save_and_disable_irqs, oldcpsr |
139 | #ifdef CONFIG_CPU_V7M | ||
140 | mrs \oldcpsr, primask | ||
141 | #else | ||
139 | mrs \oldcpsr, cpsr | 142 | mrs \oldcpsr, cpsr |
143 | #endif | ||
140 | disable_irq | 144 | disable_irq |
141 | .endm | 145 | .endm |
142 | 146 | ||
@@ -150,7 +154,11 @@ | |||
150 | * guarantee that this will preserve the flags. | 154 | * guarantee that this will preserve the flags. |
151 | */ | 155 | */ |
152 | .macro restore_irqs_notrace, oldcpsr | 156 | .macro restore_irqs_notrace, oldcpsr |
157 | #ifdef CONFIG_CPU_V7M | ||
158 | msr primask, \oldcpsr | ||
159 | #else | ||
153 | msr cpsr_c, \oldcpsr | 160 | msr cpsr_c, \oldcpsr |
161 | #endif | ||
154 | .endm | 162 | .endm |
155 | 163 | ||
156 | .macro restore_irqs, oldcpsr | 164 | .macro restore_irqs, oldcpsr |
@@ -229,7 +237,14 @@ | |||
229 | #endif | 237 | #endif |
230 | .endm | 238 | .endm |
231 | 239 | ||
232 | #ifdef CONFIG_THUMB2_KERNEL | 240 | #if defined(CONFIG_CPU_V7M) |
241 | /* | ||
242 | * setmode is used to assert to be in svc mode during boot. For v7-M | ||
243 | * this is done in __v7m_setup, so setmode can be empty here. | ||
244 | */ | ||
245 | .macro setmode, mode, reg | ||
246 | .endm | ||
247 | #elif defined(CONFIG_THUMB2_KERNEL) | ||
233 | .macro setmode, mode, reg | 248 | .macro setmode, mode, reg |
234 | mov \reg, #\mode | 249 | mov \reg, #\mode |
235 | msr cpsr_c, \reg | 250 | msr cpsr_c, \reg |
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 7652712d1d14..ec635ff32f49 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -10,6 +10,22 @@ | |||
10 | #define CPUID_TLBTYPE 3 | 10 | #define CPUID_TLBTYPE 3 |
11 | #define CPUID_MPIDR 5 | 11 | #define CPUID_MPIDR 5 |
12 | 12 | ||
13 | #ifdef CONFIG_CPU_V7M | ||
14 | #define CPUID_EXT_PFR0 0x40 | ||
15 | #define CPUID_EXT_PFR1 0x44 | ||
16 | #define CPUID_EXT_DFR0 0x48 | ||
17 | #define CPUID_EXT_AFR0 0x4c | ||
18 | #define CPUID_EXT_MMFR0 0x50 | ||
19 | #define CPUID_EXT_MMFR1 0x54 | ||
20 | #define CPUID_EXT_MMFR2 0x58 | ||
21 | #define CPUID_EXT_MMFR3 0x5c | ||
22 | #define CPUID_EXT_ISAR0 0x60 | ||
23 | #define CPUID_EXT_ISAR1 0x64 | ||
24 | #define CPUID_EXT_ISAR2 0x68 | ||
25 | #define CPUID_EXT_ISAR3 0x6c | ||
26 | #define CPUID_EXT_ISAR4 0x70 | ||
27 | #define CPUID_EXT_ISAR5 0x74 | ||
28 | #else | ||
13 | #define CPUID_EXT_PFR0 "c1, 0" | 29 | #define CPUID_EXT_PFR0 "c1, 0" |
14 | #define CPUID_EXT_PFR1 "c1, 1" | 30 | #define CPUID_EXT_PFR1 "c1, 1" |
15 | #define CPUID_EXT_DFR0 "c1, 2" | 31 | #define CPUID_EXT_DFR0 "c1, 2" |
@@ -24,6 +40,7 @@ | |||
24 | #define CPUID_EXT_ISAR3 "c2, 3" | 40 | #define CPUID_EXT_ISAR3 "c2, 3" |
25 | #define CPUID_EXT_ISAR4 "c2, 4" | 41 | #define CPUID_EXT_ISAR4 "c2, 4" |
26 | #define CPUID_EXT_ISAR5 "c2, 5" | 42 | #define CPUID_EXT_ISAR5 "c2, 5" |
43 | #endif | ||
27 | 44 | ||
28 | #define MPIDR_SMP_BITMASK (0x3 << 30) | 45 | #define MPIDR_SMP_BITMASK (0x3 << 30) |
29 | #define MPIDR_SMP_VALUE (0x2 << 30) | 46 | #define MPIDR_SMP_VALUE (0x2 << 30) |
@@ -79,7 +96,23 @@ extern unsigned int processor_id; | |||
79 | __val; \ | 96 | __val; \ |
80 | }) | 97 | }) |
81 | 98 | ||
82 | #else /* ifdef CONFIG_CPU_CP15 */ | 99 | #elif defined(CONFIG_CPU_V7M) |
100 | |||
101 | #include <asm/io.h> | ||
102 | #include <asm/v7m.h> | ||
103 | |||
104 | #define read_cpuid(reg) \ | ||
105 | ({ \ | ||
106 | WARN_ON_ONCE(1); \ | ||
107 | 0; \ | ||
108 | }) | ||
109 | |||
110 | static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset) | ||
111 | { | ||
112 | return readl(BASEADDR_V7M_SCB + offset); | ||
113 | } | ||
114 | |||
115 | #else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */ | ||
83 | 116 | ||
84 | /* | 117 | /* |
85 | * read_cpuid and read_cpuid_ext should only ever be called on machines that | 118 | * read_cpuid and read_cpuid_ext should only ever be called on machines that |
@@ -106,7 +139,14 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void) | |||
106 | return read_cpuid(CPUID_ID); | 139 | return read_cpuid(CPUID_ID); |
107 | } | 140 | } |
108 | 141 | ||
109 | #else /* ifdef CONFIG_CPU_CP15 */ | 142 | #elif defined(CONFIG_CPU_V7M) |
143 | |||
144 | static inline unsigned int __attribute_const__ read_cpuid_id(void) | ||
145 | { | ||
146 | return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID); | ||
147 | } | ||
148 | |||
149 | #else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */ | ||
110 | 150 | ||
111 | static inline unsigned int __attribute_const__ read_cpuid_id(void) | 151 | static inline unsigned int __attribute_const__ read_cpuid_id(void) |
112 | { | 152 | { |
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index ea289e1435e7..c81adc08b3fb 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h | |||
@@ -117,10 +117,37 @@ | |||
117 | # endif | 117 | # endif |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | #if defined(CONFIG_CPU_V7M) | ||
121 | # ifdef _CACHE | ||
122 | # define MULTI_CACHE 1 | ||
123 | # else | ||
124 | # define _CACHE nop | ||
125 | # endif | ||
126 | #endif | ||
127 | |||
120 | #if !defined(_CACHE) && !defined(MULTI_CACHE) | 128 | #if !defined(_CACHE) && !defined(MULTI_CACHE) |
121 | #error Unknown cache maintenance model | 129 | #error Unknown cache maintenance model |
122 | #endif | 130 | #endif |
123 | 131 | ||
132 | #ifndef __ASSEMBLER__ | ||
133 | extern inline void nop_flush_icache_all(void) { } | ||
134 | extern inline void nop_flush_kern_cache_all(void) { } | ||
135 | extern inline void nop_flush_kern_cache_louis(void) { } | ||
136 | extern inline void nop_flush_user_cache_all(void) { } | ||
137 | extern inline void nop_flush_user_cache_range(unsigned long a, | ||
138 | unsigned long b, unsigned int c) { } | ||
139 | |||
140 | extern inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { } | ||
141 | extern inline int nop_coherent_user_range(unsigned long a, | ||
142 | unsigned long b) { return 0; } | ||
143 | extern inline void nop_flush_kern_dcache_area(void *a, size_t s) { } | ||
144 | |||
145 | extern inline void nop_dma_flush_range(const void *a, const void *b) { } | ||
146 | |||
147 | extern inline void nop_dma_map_area(const void *s, size_t l, int f) { } | ||
148 | extern inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } | ||
149 | #endif | ||
150 | |||
124 | #ifndef MULTI_CACHE | 151 | #ifndef MULTI_CACHE |
125 | #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) | 152 | #define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) |
126 | #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) | 153 | #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) |
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h index b6e9f2c108b5..6b70f1b46a6e 100644 --- a/arch/arm/include/asm/glue-df.h +++ b/arch/arm/include/asm/glue-df.h | |||
@@ -95,6 +95,14 @@ | |||
95 | # endif | 95 | # endif |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | #ifdef CONFIG_CPU_ABRT_NOMMU | ||
99 | # ifdef CPU_DABORT_HANDLER | ||
100 | # define MULTI_DABORT 1 | ||
101 | # else | ||
102 | # define CPU_DABORT_HANDLER nommu_early_abort | ||
103 | # endif | ||
104 | #endif | ||
105 | |||
98 | #ifndef CPU_DABORT_HANDLER | 106 | #ifndef CPU_DABORT_HANDLER |
99 | #error Unknown data abort handler type | 107 | #error Unknown data abort handler type |
100 | #endif | 108 | #endif |
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h index ac1dd54724b6..f2f39bcf7945 100644 --- a/arch/arm/include/asm/glue-proc.h +++ b/arch/arm/include/asm/glue-proc.h | |||
@@ -230,6 +230,15 @@ | |||
230 | # endif | 230 | # endif |
231 | #endif | 231 | #endif |
232 | 232 | ||
233 | #ifdef CONFIG_CPU_V7M | ||
234 | # ifdef CPU_NAME | ||
235 | # undef MULTI_CPU | ||
236 | # define MULTI_CPU | ||
237 | # else | ||
238 | # define CPU_NAME cpu_v7m | ||
239 | # endif | ||
240 | #endif | ||
241 | |||
233 | #ifndef MULTI_CPU | 242 | #ifndef MULTI_CPU |
234 | #define cpu_proc_init __glue(CPU_NAME,_proc_init) | 243 | #define cpu_proc_init __glue(CPU_NAME,_proc_init) |
235 | #define cpu_proc_fin __glue(CPU_NAME,_proc_fin) | 244 | #define cpu_proc_fin __glue(CPU_NAME,_proc_fin) |
diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h index 1e6cca55c750..3b763d6652a0 100644 --- a/arch/arm/include/asm/irqflags.h +++ b/arch/arm/include/asm/irqflags.h | |||
@@ -8,6 +8,16 @@ | |||
8 | /* | 8 | /* |
9 | * CPU interrupt mask handling. | 9 | * CPU interrupt mask handling. |
10 | */ | 10 | */ |
11 | #ifdef CONFIG_CPU_V7M | ||
12 | #define IRQMASK_REG_NAME_R "primask" | ||
13 | #define IRQMASK_REG_NAME_W "primask" | ||
14 | #define IRQMASK_I_BIT 1 | ||
15 | #else | ||
16 | #define IRQMASK_REG_NAME_R "cpsr" | ||
17 | #define IRQMASK_REG_NAME_W "cpsr_c" | ||
18 | #define IRQMASK_I_BIT PSR_I_BIT | ||
19 | #endif | ||
20 | |||
11 | #if __LINUX_ARM_ARCH__ >= 6 | 21 | #if __LINUX_ARM_ARCH__ >= 6 |
12 | 22 | ||
13 | static inline unsigned long arch_local_irq_save(void) | 23 | static inline unsigned long arch_local_irq_save(void) |
@@ -15,7 +25,7 @@ static inline unsigned long arch_local_irq_save(void) | |||
15 | unsigned long flags; | 25 | unsigned long flags; |
16 | 26 | ||
17 | asm volatile( | 27 | asm volatile( |
18 | " mrs %0, cpsr @ arch_local_irq_save\n" | 28 | " mrs %0, " IRQMASK_REG_NAME_R " @ arch_local_irq_save\n" |
19 | " cpsid i" | 29 | " cpsid i" |
20 | : "=r" (flags) : : "memory", "cc"); | 30 | : "=r" (flags) : : "memory", "cc"); |
21 | return flags; | 31 | return flags; |
@@ -129,7 +139,7 @@ static inline unsigned long arch_local_save_flags(void) | |||
129 | { | 139 | { |
130 | unsigned long flags; | 140 | unsigned long flags; |
131 | asm volatile( | 141 | asm volatile( |
132 | " mrs %0, cpsr @ local_save_flags" | 142 | " mrs %0, " IRQMASK_REG_NAME_R " @ local_save_flags" |
133 | : "=r" (flags) : : "memory", "cc"); | 143 | : "=r" (flags) : : "memory", "cc"); |
134 | return flags; | 144 | return flags; |
135 | } | 145 | } |
@@ -140,7 +150,7 @@ static inline unsigned long arch_local_save_flags(void) | |||
140 | static inline void arch_local_irq_restore(unsigned long flags) | 150 | static inline void arch_local_irq_restore(unsigned long flags) |
141 | { | 151 | { |
142 | asm volatile( | 152 | asm volatile( |
143 | " msr cpsr_c, %0 @ local_irq_restore" | 153 | " msr " IRQMASK_REG_NAME_W ", %0 @ local_irq_restore" |
144 | : | 154 | : |
145 | : "r" (flags) | 155 | : "r" (flags) |
146 | : "memory", "cc"); | 156 | : "memory", "cc"); |
@@ -148,8 +158,8 @@ static inline void arch_local_irq_restore(unsigned long flags) | |||
148 | 158 | ||
149 | static inline int arch_irqs_disabled_flags(unsigned long flags) | 159 | static inline int arch_irqs_disabled_flags(unsigned long flags) |
150 | { | 160 | { |
151 | return flags & PSR_I_BIT; | 161 | return flags & IRQMASK_I_BIT; |
152 | } | 162 | } |
153 | 163 | ||
154 | #endif | 164 | #endif /* ifdef __KERNEL__ */ |
155 | #endif | 165 | #endif /* ifndef __ASM_ARM_IRQFLAGS_H */ |
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 3d52ee1bfb31..04c99f36ff7f 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h | |||
@@ -45,6 +45,7 @@ struct pt_regs { | |||
45 | */ | 45 | */ |
46 | static inline int valid_user_regs(struct pt_regs *regs) | 46 | static inline int valid_user_regs(struct pt_regs *regs) |
47 | { | 47 | { |
48 | #ifndef CONFIG_CPU_V7M | ||
48 | unsigned long mode = regs->ARM_cpsr & MODE_MASK; | 49 | unsigned long mode = regs->ARM_cpsr & MODE_MASK; |
49 | 50 | ||
50 | /* | 51 | /* |
@@ -67,6 +68,9 @@ static inline int valid_user_regs(struct pt_regs *regs) | |||
67 | regs->ARM_cpsr |= USR_MODE; | 68 | regs->ARM_cpsr |= USR_MODE; |
68 | 69 | ||
69 | return 0; | 70 | return 0; |
71 | #else /* ifndef CONFIG_CPU_V7M */ | ||
72 | return 1; | ||
73 | #endif | ||
70 | } | 74 | } |
71 | 75 | ||
72 | static inline long regs_return_value(struct pt_regs *regs) | 76 | static inline long regs_return_value(struct pt_regs *regs) |
diff --git a/arch/arm/include/asm/system_info.h b/arch/arm/include/asm/system_info.h index dfd386d0c022..720ea0320a6d 100644 --- a/arch/arm/include/asm/system_info.h +++ b/arch/arm/include/asm/system_info.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #define CPU_ARCH_ARMv5TEJ 7 | 11 | #define CPU_ARCH_ARMv5TEJ 7 |
12 | #define CPU_ARCH_ARMv6 8 | 12 | #define CPU_ARCH_ARMv6 8 |
13 | #define CPU_ARCH_ARMv7 9 | 13 | #define CPU_ARCH_ARMv7 9 |
14 | #define CPU_ARCH_ARMv7M 10 | ||
14 | 15 | ||
15 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
16 | 17 | ||
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h new file mode 100644 index 000000000000..fa88d09fa3d9 --- /dev/null +++ b/arch/arm/include/asm/v7m.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * Common defines for v7m cpus | ||
3 | */ | ||
4 | #define V7M_SCS_ICTR IOMEM(0xe000e004) | ||
5 | #define V7M_SCS_ICTR_INTLINESNUM_MASK 0x0000000f | ||
6 | |||
7 | #define BASEADDR_V7M_SCB IOMEM(0xe000ed00) | ||
8 | |||
9 | #define V7M_SCB_CPUID 0x00 | ||
10 | |||
11 | #define V7M_SCB_ICSR 0x04 | ||
12 | #define V7M_SCB_ICSR_PENDSVSET (1 << 28) | ||
13 | #define V7M_SCB_ICSR_PENDSVCLR (1 << 27) | ||
14 | #define V7M_SCB_ICSR_RETTOBASE (1 << 11) | ||
15 | |||
16 | #define V7M_SCB_VTOR 0x08 | ||
17 | |||
18 | #define V7M_SCB_SCR 0x10 | ||
19 | #define V7M_SCB_SCR_SLEEPDEEP (1 << 2) | ||
20 | |||
21 | #define V7M_SCB_CCR 0x14 | ||
22 | #define V7M_SCB_CCR_STKALIGN (1 << 9) | ||
23 | |||
24 | #define V7M_SCB_SHPR2 0x1c | ||
25 | #define V7M_SCB_SHPR3 0x20 | ||
26 | |||
27 | #define V7M_SCB_SHCSR 0x24 | ||
28 | #define V7M_SCB_SHCSR_USGFAULTENA (1 << 18) | ||
29 | #define V7M_SCB_SHCSR_BUSFAULTENA (1 << 17) | ||
30 | #define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16) | ||
31 | |||
32 | #define V7M_xPSR_FRAMEPTRALIGN 0x00000200 | ||
33 | #define V7M_xPSR_EXCEPTIONNO 0x000001ff | ||
34 | |||
35 | /* | ||
36 | * When branching to an address that has bits [31:28] == 0xf an exception return | ||
37 | * occurs. Bits [27:5] are reserved (SBOP). If the processor implements the FP | ||
38 | * extension Bit [4] defines if the exception frame has space allocated for FP | ||
39 | * state information, SBOP otherwise. Bit [3] defines the mode that is returned | ||
40 | * to (0 -> handler mode; 1 -> thread mode). Bit [2] defines which sp is used | ||
41 | * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01. | ||
42 | */ | ||
43 | #define EXC_RET_STACK_MASK 0x00000004 | ||
44 | #define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd | ||
diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h index 96ee0929790f..5af0ed1b825a 100644 --- a/arch/arm/include/uapi/asm/ptrace.h +++ b/arch/arm/include/uapi/asm/ptrace.h | |||
@@ -34,28 +34,47 @@ | |||
34 | 34 | ||
35 | /* | 35 | /* |
36 | * PSR bits | 36 | * PSR bits |
37 | * Note on V7M there is no mode contained in the PSR | ||
37 | */ | 38 | */ |
38 | #define USR26_MODE 0x00000000 | 39 | #define USR26_MODE 0x00000000 |
39 | #define FIQ26_MODE 0x00000001 | 40 | #define FIQ26_MODE 0x00000001 |
40 | #define IRQ26_MODE 0x00000002 | 41 | #define IRQ26_MODE 0x00000002 |
41 | #define SVC26_MODE 0x00000003 | 42 | #define SVC26_MODE 0x00000003 |
43 | #if defined(__KERNEL__) && defined(CONFIG_CPU_V7M) | ||
44 | /* | ||
45 | * Use 0 here to get code right that creates a userspace | ||
46 | * or kernel space thread. | ||
47 | */ | ||
48 | #define USR_MODE 0x00000000 | ||
49 | #define SVC_MODE 0x00000000 | ||
50 | #else | ||
42 | #define USR_MODE 0x00000010 | 51 | #define USR_MODE 0x00000010 |
52 | #define SVC_MODE 0x00000013 | ||
53 | #endif | ||
43 | #define FIQ_MODE 0x00000011 | 54 | #define FIQ_MODE 0x00000011 |
44 | #define IRQ_MODE 0x00000012 | 55 | #define IRQ_MODE 0x00000012 |
45 | #define SVC_MODE 0x00000013 | ||
46 | #define ABT_MODE 0x00000017 | 56 | #define ABT_MODE 0x00000017 |
47 | #define HYP_MODE 0x0000001a | 57 | #define HYP_MODE 0x0000001a |
48 | #define UND_MODE 0x0000001b | 58 | #define UND_MODE 0x0000001b |
49 | #define SYSTEM_MODE 0x0000001f | 59 | #define SYSTEM_MODE 0x0000001f |
50 | #define MODE32_BIT 0x00000010 | 60 | #define MODE32_BIT 0x00000010 |
51 | #define MODE_MASK 0x0000001f | 61 | #define MODE_MASK 0x0000001f |
52 | #define PSR_T_BIT 0x00000020 | 62 | |
53 | #define PSR_F_BIT 0x00000040 | 63 | #define V4_PSR_T_BIT 0x00000020 /* >= V4T, but not V7M */ |
54 | #define PSR_I_BIT 0x00000080 | 64 | #define V7M_PSR_T_BIT 0x01000000 |
55 | #define PSR_A_BIT 0x00000100 | 65 | #if defined(__KERNEL__) && defined(CONFIG_CPU_V7M) |
56 | #define PSR_E_BIT 0x00000200 | 66 | #define PSR_T_BIT V7M_PSR_T_BIT |
57 | #define PSR_J_BIT 0x01000000 | 67 | #else |
58 | #define PSR_Q_BIT 0x08000000 | 68 | /* for compatibility */ |
69 | #define PSR_T_BIT V4_PSR_T_BIT | ||
70 | #endif | ||
71 | |||
72 | #define PSR_F_BIT 0x00000040 /* >= V4, but not V7M */ | ||
73 | #define PSR_I_BIT 0x00000080 /* >= V4, but not V7M */ | ||
74 | #define PSR_A_BIT 0x00000100 /* >= V6, but not V7M */ | ||
75 | #define PSR_E_BIT 0x00000200 /* >= V6, but not V7M */ | ||
76 | #define PSR_J_BIT 0x01000000 /* >= V5J, but not V7M */ | ||
77 | #define PSR_Q_BIT 0x08000000 /* >= V5E, including V7M */ | ||
59 | #define PSR_V_BIT 0x10000000 | 78 | #define PSR_V_BIT 0x10000000 |
60 | #define PSR_C_BIT 0x20000000 | 79 | #define PSR_C_BIT 0x20000000 |
61 | #define PSR_Z_BIT 0x40000000 | 80 | #define PSR_Z_BIT 0x40000000 |
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5f3338eacad2..00d703c49f82 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -15,7 +15,7 @@ CFLAGS_REMOVE_return_address.o = -pg | |||
15 | 15 | ||
16 | # Object file lists. | 16 | # Object file lists. |
17 | 17 | ||
18 | obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \ | 18 | obj-y := elf.o entry-common.o irq.o opcodes.o \ |
19 | process.o ptrace.o return_address.o sched_clock.o \ | 19 | process.o ptrace.o return_address.o sched_clock.o \ |
20 | setup.o signal.o stacktrace.o sys_arm.o time.o traps.o | 20 | setup.o signal.o stacktrace.o sys_arm.o time.o traps.o |
21 | 21 | ||
@@ -23,6 +23,12 @@ obj-$(CONFIG_ATAGS) += atags_parse.o | |||
23 | obj-$(CONFIG_ATAGS_PROC) += atags_proc.o | 23 | obj-$(CONFIG_ATAGS_PROC) += atags_proc.o |
24 | obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o | 24 | obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o |
25 | 25 | ||
26 | ifeq ($(CONFIG_CPU_V7M),y) | ||
27 | obj-y += entry-v7m.o | ||
28 | else | ||
29 | obj-y += entry-armv.o | ||
30 | endif | ||
31 | |||
26 | obj-$(CONFIG_OC_ETM) += etm.o | 32 | obj-$(CONFIG_OC_ETM) += etm.o |
27 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o | 33 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o |
28 | obj-$(CONFIG_ISA_DMA_API) += dma.o | 34 | obj-$(CONFIG_ISA_DMA_API) += dma.o |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index bc5bc0a97131..85a72b0809ca 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -350,6 +350,9 @@ ENDPROC(ftrace_stub) | |||
350 | 350 | ||
351 | .align 5 | 351 | .align 5 |
352 | ENTRY(vector_swi) | 352 | ENTRY(vector_swi) |
353 | #ifdef CONFIG_CPU_V7M | ||
354 | v7m_exception_entry | ||
355 | #else | ||
353 | sub sp, sp, #S_FRAME_SIZE | 356 | sub sp, sp, #S_FRAME_SIZE |
354 | stmia sp, {r0 - r12} @ Calling r0 - r12 | 357 | stmia sp, {r0 - r12} @ Calling r0 - r12 |
355 | ARM( add r8, sp, #S_PC ) | 358 | ARM( add r8, sp, #S_PC ) |
@@ -360,6 +363,7 @@ ENTRY(vector_swi) | |||
360 | str lr, [sp, #S_PC] @ Save calling PC | 363 | str lr, [sp, #S_PC] @ Save calling PC |
361 | str r8, [sp, #S_PSR] @ Save CPSR | 364 | str r8, [sp, #S_PSR] @ Save CPSR |
362 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | 365 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 |
366 | #endif | ||
363 | zero_fp | 367 | zero_fp |
364 | 368 | ||
365 | /* | 369 | /* |
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 160f3376ba6d..de23a9beed13 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <asm/asm-offsets.h> | 5 | #include <asm/asm-offsets.h> |
6 | #include <asm/errno.h> | 6 | #include <asm/errno.h> |
7 | #include <asm/thread_info.h> | 7 | #include <asm/thread_info.h> |
8 | #include <asm/v7m.h> | ||
8 | 9 | ||
9 | @ Bad Abort numbers | 10 | @ Bad Abort numbers |
10 | @ ----------------- | 11 | @ ----------------- |
@@ -44,6 +45,116 @@ | |||
44 | #endif | 45 | #endif |
45 | .endm | 46 | .endm |
46 | 47 | ||
48 | #ifdef CONFIG_CPU_V7M | ||
49 | /* | ||
50 | * ARMv7-M exception entry/exit macros. | ||
51 | * | ||
52 | * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are | ||
53 | * automatically saved on the current stack (32 words) before | ||
54 | * switching to the exception stack (SP_main). | ||
55 | * | ||
56 | * If exception is taken while in user mode, SP_main is | ||
57 | * empty. Otherwise, SP_main is aligned to 64 bit automatically | ||
58 | * (CCR.STKALIGN set). | ||
59 | * | ||
60 | * Linux assumes that the interrupts are disabled when entering an | ||
61 | * exception handler and it may BUG if this is not the case. Interrupts | ||
62 | * are disabled during entry and reenabled in the exit macro. | ||
63 | * | ||
64 | * v7m_exception_slow_exit is used when returning from SVC or PendSV. | ||
65 | * When returning to kernel mode, we don't return from exception. | ||
66 | */ | ||
67 | .macro v7m_exception_entry | ||
68 | @ determine the location of the registers saved by the core during | ||
69 | @ exception entry. Depending on the mode the cpu was in when the | ||
70 | @ exception happend that is either on the main or the process stack. | ||
71 | @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack | ||
72 | @ was used. | ||
73 | tst lr, #EXC_RET_STACK_MASK | ||
74 | mrsne r12, psp | ||
75 | moveq r12, sp | ||
76 | |||
77 | @ we cannot rely on r0-r3 and r12 matching the value saved in the | ||
78 | @ exception frame because of tail-chaining. So these have to be | ||
79 | @ reloaded. | ||
80 | ldmia r12!, {r0-r3} | ||
81 | |||
82 | @ Linux expects to have irqs off. Do it here before taking stack space | ||
83 | cpsid i | ||
84 | |||
85 | sub sp, #S_FRAME_SIZE-S_IP | ||
86 | stmdb sp!, {r0-r11} | ||
87 | |||
88 | @ load saved r12, lr, return address and xPSR. | ||
89 | @ r0-r7 are used for signals and never touched from now on. Clobbering | ||
90 | @ r8-r12 is OK. | ||
91 | mov r9, r12 | ||
92 | ldmia r9!, {r8, r10-r12} | ||
93 | |||
94 | @ calculate the original stack pointer value. | ||
95 | @ r9 currently points to the memory location just above the auto saved | ||
96 | @ xPSR. | ||
97 | @ The cpu might automatically 8-byte align the stack. Bit 9 | ||
98 | @ of the saved xPSR specifies if stack aligning took place. In this case | ||
99 | @ another 32-bit value is included in the stack. | ||
100 | |||
101 | tst r12, V7M_xPSR_FRAMEPTRALIGN | ||
102 | addne r9, r9, #4 | ||
103 | |||
104 | @ store saved r12 using str to have a register to hold the base for stm | ||
105 | str r8, [sp, #S_IP] | ||
106 | add r8, sp, #S_SP | ||
107 | @ store r13-r15, xPSR | ||
108 | stmia r8!, {r9-r12} | ||
109 | @ store old_r0 | ||
110 | str r0, [r8] | ||
111 | .endm | ||
112 | |||
113 | /* | ||
114 | * PENDSV and SVCALL are configured to have the same exception | ||
115 | * priorities. As a kernel thread runs at SVCALL execution priority it | ||
116 | * can never be preempted and so we will never have to return to a | ||
117 | * kernel thread here. | ||
118 | */ | ||
119 | .macro v7m_exception_slow_exit ret_r0 | ||
120 | cpsid i | ||
121 | ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK | ||
122 | |||
123 | @ read original r12, sp, lr, pc and xPSR | ||
124 | add r12, sp, #S_IP | ||
125 | ldmia r12, {r1-r5} | ||
126 | |||
127 | @ an exception frame is always 8-byte aligned. To tell the hardware if | ||
128 | @ the sp to be restored is aligned or not set bit 9 of the saved xPSR | ||
129 | @ accordingly. | ||
130 | tst r2, #4 | ||
131 | subne r2, r2, #4 | ||
132 | orrne r5, V7M_xPSR_FRAMEPTRALIGN | ||
133 | biceq r5, V7M_xPSR_FRAMEPTRALIGN | ||
134 | |||
135 | @ write basic exception frame | ||
136 | stmdb r2!, {r1, r3-r5} | ||
137 | ldmia sp, {r1, r3-r5} | ||
138 | .if \ret_r0 | ||
139 | stmdb r2!, {r0, r3-r5} | ||
140 | .else | ||
141 | stmdb r2!, {r1, r3-r5} | ||
142 | .endif | ||
143 | |||
144 | @ restore process sp | ||
145 | msr psp, r2 | ||
146 | |||
147 | @ restore original r4-r11 | ||
148 | ldmia sp!, {r0-r11} | ||
149 | |||
150 | @ restore main sp | ||
151 | add sp, sp, #S_FRAME_SIZE-S_IP | ||
152 | |||
153 | cpsie i | ||
154 | bx lr | ||
155 | .endm | ||
156 | #endif /* CONFIG_CPU_V7M */ | ||
157 | |||
47 | @ | 158 | @ |
48 | @ Store/load the USER SP and LR registers by switching to the SYS | 159 | @ Store/load the USER SP and LR registers by switching to the SYS |
49 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not | 160 | @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not |
@@ -165,6 +276,18 @@ | |||
165 | rfeia sp! | 276 | rfeia sp! |
166 | .endm | 277 | .endm |
167 | 278 | ||
279 | #ifdef CONFIG_CPU_V7M | ||
280 | /* | ||
281 | * Note we don't need to do clrex here as clearing the local monitor is | ||
282 | * part of each exception entry and exit sequence. | ||
283 | */ | ||
284 | .macro restore_user_regs, fast = 0, offset = 0 | ||
285 | .if \offset | ||
286 | add sp, #\offset | ||
287 | .endif | ||
288 | v7m_exception_slow_exit ret_r0 = \fast | ||
289 | .endm | ||
290 | #else /* ifdef CONFIG_CPU_V7M */ | ||
168 | .macro restore_user_regs, fast = 0, offset = 0 | 291 | .macro restore_user_regs, fast = 0, offset = 0 |
169 | clrex @ clear the exclusive monitor | 292 | clrex @ clear the exclusive monitor |
170 | mov r2, sp | 293 | mov r2, sp |
@@ -181,6 +304,7 @@ | |||
181 | add sp, sp, #S_FRAME_SIZE - S_SP | 304 | add sp, sp, #S_FRAME_SIZE - S_SP |
182 | movs pc, lr @ return & move spsr_svc into cpsr | 305 | movs pc, lr @ return & move spsr_svc into cpsr |
183 | .endm | 306 | .endm |
307 | #endif /* ifdef CONFIG_CPU_V7M / else */ | ||
184 | 308 | ||
185 | .macro get_thread_info, rd | 309 | .macro get_thread_info, rd |
186 | mov \rd, sp | 310 | mov \rd, sp |
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S new file mode 100644 index 000000000000..e00621f1403f --- /dev/null +++ b/arch/arm/kernel/entry-v7m.S | |||
@@ -0,0 +1,143 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/entry-v7m.S | ||
3 | * | ||
4 | * Copyright (C) 2008 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Low-level vector interface routines for the ARMv7-M architecture | ||
11 | */ | ||
12 | #include <asm/memory.h> | ||
13 | #include <asm/glue.h> | ||
14 | #include <asm/thread_notify.h> | ||
15 | #include <asm/v7m.h> | ||
16 | |||
17 | #include <mach/entry-macro.S> | ||
18 | |||
19 | #include "entry-header.S" | ||
20 | |||
21 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
22 | #error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation" | ||
23 | #endif | ||
24 | |||
25 | __invalid_entry: | ||
26 | v7m_exception_entry | ||
27 | adr r0, strerr | ||
28 | mrs r1, ipsr | ||
29 | mov r2, lr | ||
30 | bl printk | ||
31 | mov r0, sp | ||
32 | bl show_regs | ||
33 | 1: b 1b | ||
34 | ENDPROC(__invalid_entry) | ||
35 | |||
36 | strerr: .asciz "\nUnhandled exception: IPSR = %08lx LR = %08lx\n" | ||
37 | |||
38 | .align 2 | ||
39 | __irq_entry: | ||
40 | v7m_exception_entry | ||
41 | |||
42 | @ | ||
43 | @ Invoke the IRQ handler | ||
44 | @ | ||
45 | mrs r0, ipsr | ||
46 | ldr r1, =V7M_xPSR_EXCEPTIONNO | ||
47 | and r0, r1 | ||
48 | sub r0, #16 | ||
49 | mov r1, sp | ||
50 | stmdb sp!, {lr} | ||
51 | @ routine called with r0 = irq number, r1 = struct pt_regs * | ||
52 | bl nvic_do_IRQ | ||
53 | |||
54 | pop {lr} | ||
55 | @ | ||
56 | @ Check for any pending work if returning to user | ||
57 | @ | ||
58 | ldr r1, =BASEADDR_V7M_SCB | ||
59 | ldr r0, [r1, V7M_SCB_ICSR] | ||
60 | tst r0, V7M_SCB_ICSR_RETTOBASE | ||
61 | beq 2f | ||
62 | |||
63 | get_thread_info tsk | ||
64 | ldr r2, [tsk, #TI_FLAGS] | ||
65 | tst r2, #_TIF_WORK_MASK | ||
66 | beq 2f @ no work pending | ||
67 | mov r0, #V7M_SCB_ICSR_PENDSVSET | ||
68 | str r0, [r1, V7M_SCB_ICSR] @ raise PendSV | ||
69 | |||
70 | 2: | ||
71 | @ registers r0-r3 and r12 are automatically restored on exception | ||
72 | @ return. r4-r7 were not clobbered in v7m_exception_entry so for | ||
73 | @ correctness they don't need to be restored. So only r8-r11 must be | ||
74 | @ restored here. The easiest way to do so is to restore r0-r7, too. | ||
75 | ldmia sp!, {r0-r11} | ||
76 | add sp, #S_FRAME_SIZE-S_IP | ||
77 | cpsie i | ||
78 | bx lr | ||
79 | ENDPROC(__irq_entry) | ||
80 | |||
81 | __pendsv_entry: | ||
82 | v7m_exception_entry | ||
83 | |||
84 | ldr r1, =BASEADDR_V7M_SCB | ||
85 | mov r0, #V7M_SCB_ICSR_PENDSVCLR | ||
86 | str r0, [r1, V7M_SCB_ICSR] @ clear PendSV | ||
87 | |||
88 | @ execute the pending work, including reschedule | ||
89 | get_thread_info tsk | ||
90 | mov why, #0 | ||
91 | b ret_to_user | ||
92 | ENDPROC(__pendsv_entry) | ||
93 | |||
94 | /* | ||
95 | * Register switch for ARMv7-M processors. | ||
96 | * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info | ||
97 | * previous and next are guaranteed not to be the same. | ||
98 | */ | ||
99 | ENTRY(__switch_to) | ||
100 | .fnstart | ||
101 | .cantunwind | ||
102 | add ip, r1, #TI_CPU_SAVE | ||
103 | stmia ip!, {r4 - r11} @ Store most regs on stack | ||
104 | str sp, [ip], #4 | ||
105 | str lr, [ip], #4 | ||
106 | mov r5, r0 | ||
107 | add r4, r2, #TI_CPU_SAVE | ||
108 | ldr r0, =thread_notify_head | ||
109 | mov r1, #THREAD_NOTIFY_SWITCH | ||
110 | bl atomic_notifier_call_chain | ||
111 | mov ip, r4 | ||
112 | mov r0, r5 | ||
113 | ldmia ip!, {r4 - r11} @ Load all regs saved previously | ||
114 | ldr sp, [ip] | ||
115 | ldr pc, [ip, #4]! | ||
116 | .fnend | ||
117 | ENDPROC(__switch_to) | ||
118 | |||
119 | .data | ||
120 | .align 8 | ||
121 | /* | ||
122 | * Vector table (64 words => 256 bytes natural alignment) | ||
123 | */ | ||
124 | ENTRY(vector_table) | ||
125 | .long 0 @ 0 - Reset stack pointer | ||
126 | .long __invalid_entry @ 1 - Reset | ||
127 | .long __invalid_entry @ 2 - NMI | ||
128 | .long __invalid_entry @ 3 - HardFault | ||
129 | .long __invalid_entry @ 4 - MemManage | ||
130 | .long __invalid_entry @ 5 - BusFault | ||
131 | .long __invalid_entry @ 6 - UsageFault | ||
132 | .long __invalid_entry @ 7 - Reserved | ||
133 | .long __invalid_entry @ 8 - Reserved | ||
134 | .long __invalid_entry @ 9 - Reserved | ||
135 | .long __invalid_entry @ 10 - Reserved | ||
136 | .long vector_swi @ 11 - SVCall | ||
137 | .long __invalid_entry @ 12 - Debug Monitor | ||
138 | .long __invalid_entry @ 13 - Reserved | ||
139 | .long __pendsv_entry @ 14 - PendSV | ||
140 | .long __invalid_entry @ 15 - SysTick | ||
141 | .rept 64 - 16 | ||
142 | .long __irq_entry @ 16..64 - External Interrupts | ||
143 | .endr | ||
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 6a2e09c952c7..8812ce88f7a1 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
20 | #include <asm/cp15.h> | 20 | #include <asm/cp15.h> |
21 | #include <asm/thread_info.h> | 21 | #include <asm/thread_info.h> |
22 | #include <asm/v7m.h> | ||
22 | 23 | ||
23 | /* | 24 | /* |
24 | * Kernel startup entry point. | 25 | * Kernel startup entry point. |
@@ -50,10 +51,13 @@ ENTRY(stext) | |||
50 | 51 | ||
51 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode | 52 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode |
52 | @ and irqs disabled | 53 | @ and irqs disabled |
53 | #ifndef CONFIG_CPU_CP15 | 54 | #if defined(CONFIG_CPU_CP15) |
54 | ldr r9, =CONFIG_PROCESSOR_ID | ||
55 | #else | ||
56 | mrc p15, 0, r9, c0, c0 @ get processor id | 55 | mrc p15, 0, r9, c0, c0 @ get processor id |
56 | #elif defined(CONFIG_CPU_V7M) | ||
57 | ldr r9, =BASEADDR_V7M_SCB | ||
58 | ldr r9, [r9, V7M_SCB_CPUID] | ||
59 | #else | ||
60 | ldr r9, =CONFIG_PROCESSOR_ID | ||
57 | #endif | 61 | #endif |
58 | bl __lookup_processor_type @ r5=procinfo r9=cpuid | 62 | bl __lookup_processor_type @ r5=procinfo r9=cpuid |
59 | movs r10, r5 @ invalid processor (r5=0)? | 63 | movs r10, r5 @ invalid processor (r5=0)? |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1522c7ae31b0..ea5cd6e3beb6 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -128,7 +128,9 @@ struct stack { | |||
128 | u32 und[3]; | 128 | u32 und[3]; |
129 | } ____cacheline_aligned; | 129 | } ____cacheline_aligned; |
130 | 130 | ||
131 | #ifndef CONFIG_CPU_V7M | ||
131 | static struct stack stacks[NR_CPUS]; | 132 | static struct stack stacks[NR_CPUS]; |
133 | #endif | ||
132 | 134 | ||
133 | char elf_platform[ELF_PLATFORM_SIZE]; | 135 | char elf_platform[ELF_PLATFORM_SIZE]; |
134 | EXPORT_SYMBOL(elf_platform); | 136 | EXPORT_SYMBOL(elf_platform); |
@@ -207,7 +209,7 @@ static const char *proc_arch[] = { | |||
207 | "5TEJ", | 209 | "5TEJ", |
208 | "6TEJ", | 210 | "6TEJ", |
209 | "7", | 211 | "7", |
210 | "?(11)", | 212 | "7M", |
211 | "?(12)", | 213 | "?(12)", |
212 | "?(13)", | 214 | "?(13)", |
213 | "?(14)", | 215 | "?(14)", |
@@ -216,6 +218,12 @@ static const char *proc_arch[] = { | |||
216 | "?(17)", | 218 | "?(17)", |
217 | }; | 219 | }; |
218 | 220 | ||
221 | #ifdef CONFIG_CPU_V7M | ||
222 | static int __get_cpu_architecture(void) | ||
223 | { | ||
224 | return CPU_ARCH_ARMv7M; | ||
225 | } | ||
226 | #else | ||
219 | static int __get_cpu_architecture(void) | 227 | static int __get_cpu_architecture(void) |
220 | { | 228 | { |
221 | int cpu_arch; | 229 | int cpu_arch; |
@@ -248,6 +256,7 @@ static int __get_cpu_architecture(void) | |||
248 | 256 | ||
249 | return cpu_arch; | 257 | return cpu_arch; |
250 | } | 258 | } |
259 | #endif | ||
251 | 260 | ||
252 | int __pure cpu_architecture(void) | 261 | int __pure cpu_architecture(void) |
253 | { | 262 | { |
@@ -293,7 +302,9 @@ static void __init cacheid_init(void) | |||
293 | { | 302 | { |
294 | unsigned int arch = cpu_architecture(); | 303 | unsigned int arch = cpu_architecture(); |
295 | 304 | ||
296 | if (arch >= CPU_ARCH_ARMv6) { | 305 | if (arch == CPU_ARCH_ARMv7M) { |
306 | cacheid = 0; | ||
307 | } else if (arch >= CPU_ARCH_ARMv6) { | ||
297 | unsigned int cachetype = read_cpuid_cachetype(); | 308 | unsigned int cachetype = read_cpuid_cachetype(); |
298 | if ((cachetype & (7 << 29)) == 4 << 29) { | 309 | if ((cachetype & (7 << 29)) == 4 << 29) { |
299 | /* ARMv7 register format */ | 310 | /* ARMv7 register format */ |
@@ -392,6 +403,7 @@ static void __init feat_v6_fixup(void) | |||
392 | */ | 403 | */ |
393 | void notrace cpu_init(void) | 404 | void notrace cpu_init(void) |
394 | { | 405 | { |
406 | #ifndef CONFIG_CPU_V7M | ||
395 | unsigned int cpu = smp_processor_id(); | 407 | unsigned int cpu = smp_processor_id(); |
396 | struct stack *stk = &stacks[cpu]; | 408 | struct stack *stk = &stacks[cpu]; |
397 | 409 | ||
@@ -442,6 +454,7 @@ void notrace cpu_init(void) | |||
442 | "I" (offsetof(struct stack, und[0])), | 454 | "I" (offsetof(struct stack, und[0])), |
443 | PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) | 455 | PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) |
444 | : "r14"); | 456 | : "r14"); |
457 | #endif | ||
445 | } | 458 | } |
446 | 459 | ||
447 | int __cpu_logical_map[NR_CPUS]; | 460 | int __cpu_logical_map[NR_CPUS]; |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 18b32e8e4497..486e12a0f26a 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -812,6 +812,7 @@ static void __init kuser_get_tls_init(unsigned long vectors) | |||
812 | 812 | ||
813 | void __init early_trap_init(void *vectors_base) | 813 | void __init early_trap_init(void *vectors_base) |
814 | { | 814 | { |
815 | #ifndef CONFIG_CPU_V7M | ||
815 | unsigned long vectors = (unsigned long)vectors_base; | 816 | unsigned long vectors = (unsigned long)vectors_base; |
816 | extern char __stubs_start[], __stubs_end[]; | 817 | extern char __stubs_start[], __stubs_end[]; |
817 | extern char __vectors_start[], __vectors_end[]; | 818 | extern char __vectors_start[], __vectors_end[]; |
@@ -843,4 +844,11 @@ void __init early_trap_init(void *vectors_base) | |||
843 | 844 | ||
844 | flush_icache_range(vectors, vectors + PAGE_SIZE); | 845 | flush_icache_range(vectors, vectors + PAGE_SIZE); |
845 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | 846 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); |
847 | #else /* ifndef CONFIG_CPU_V7M */ | ||
848 | /* | ||
849 | * on V7-M there is no need to copy the vector table to a dedicated | ||
850 | * memory area. The address is configurable and so a table in the kernel | ||
851 | * image can be used. | ||
852 | */ | ||
853 | #endif | ||
846 | } | 854 | } |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 35955b54944c..9e8101ecd63e 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -397,6 +397,15 @@ config CPU_V7 | |||
397 | select CPU_PABRT_V7 | 397 | select CPU_PABRT_V7 |
398 | select CPU_TLB_V7 if MMU | 398 | select CPU_TLB_V7 if MMU |
399 | 399 | ||
400 | # ARMv7M | ||
401 | config CPU_V7M | ||
402 | bool | ||
403 | select CPU_32v7M | ||
404 | select CPU_ABRT_NOMMU | ||
405 | select CPU_CACHE_NOP | ||
406 | select CPU_PABRT_LEGACY | ||
407 | select CPU_THUMBONLY | ||
408 | |||
400 | config CPU_THUMBONLY | 409 | config CPU_THUMBONLY |
401 | bool | 410 | bool |
402 | # There are no CPUs available with MMU that don't implement an ARM ISA: | 411 | # There are no CPUs available with MMU that don't implement an ARM ISA: |
@@ -441,6 +450,9 @@ config CPU_32v6K | |||
441 | config CPU_32v7 | 450 | config CPU_32v7 |
442 | bool | 451 | bool |
443 | 452 | ||
453 | config CPU_32v7M | ||
454 | bool | ||
455 | |||
444 | # The abort model | 456 | # The abort model |
445 | config CPU_ABRT_NOMMU | 457 | config CPU_ABRT_NOMMU |
446 | bool | 458 | bool |
@@ -491,6 +503,9 @@ config CPU_CACHE_V6 | |||
491 | config CPU_CACHE_V7 | 503 | config CPU_CACHE_V7 |
492 | bool | 504 | bool |
493 | 505 | ||
506 | config CPU_CACHE_NOP | ||
507 | bool | ||
508 | |||
494 | config CPU_CACHE_VIVT | 509 | config CPU_CACHE_VIVT |
495 | bool | 510 | bool |
496 | 511 | ||
@@ -613,7 +628,11 @@ config ARCH_DMA_ADDR_T_64BIT | |||
613 | 628 | ||
614 | config ARM_THUMB | 629 | config ARM_THUMB |
615 | bool "Support Thumb user binaries" if !CPU_THUMBONLY | 630 | bool "Support Thumb user binaries" if !CPU_THUMBONLY |
616 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON | 631 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || \ |
632 | CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || \ | ||
633 | CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \ | ||
634 | CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || \ | ||
635 | CPU_V7 || CPU_FEROCEON || CPU_V7M | ||
617 | default y | 636 | default y |
618 | help | 637 | help |
619 | Say Y if you want to include kernel support for running user space | 638 | Say Y if you want to include kernel support for running user space |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 9e51be96f635..ee558a01f390 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -39,6 +39,7 @@ obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o | |||
39 | obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o | 39 | obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o |
40 | obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o | 40 | obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o |
41 | obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o | 41 | obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o |
42 | obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o | ||
42 | 43 | ||
43 | AFLAGS_cache-v6.o :=-Wa,-march=armv6 | 44 | AFLAGS_cache-v6.o :=-Wa,-march=armv6 |
44 | AFLAGS_cache-v7.o :=-Wa,-march=armv7-a | 45 | AFLAGS_cache-v7.o :=-Wa,-march=armv7-a |
@@ -87,6 +88,7 @@ obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o | |||
87 | obj-$(CONFIG_CPU_V6) += proc-v6.o | 88 | obj-$(CONFIG_CPU_V6) += proc-v6.o |
88 | obj-$(CONFIG_CPU_V6K) += proc-v6.o | 89 | obj-$(CONFIG_CPU_V6K) += proc-v6.o |
89 | obj-$(CONFIG_CPU_V7) += proc-v7.o | 90 | obj-$(CONFIG_CPU_V7) += proc-v7.o |
91 | obj-$(CONFIG_CPU_V7M) += proc-v7m.o | ||
90 | 92 | ||
91 | AFLAGS_proc-v6.o :=-Wa,-march=armv6 | 93 | AFLAGS_proc-v6.o :=-Wa,-march=armv6 |
92 | AFLAGS_proc-v7.o :=-Wa,-march=armv7-a | 94 | AFLAGS_proc-v7.o :=-Wa,-march=armv7-a |
diff --git a/arch/arm/mm/cache-nop.S b/arch/arm/mm/cache-nop.S new file mode 100644 index 000000000000..8e12ddca0031 --- /dev/null +++ b/arch/arm/mm/cache-nop.S | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | */ | ||
6 | #include <linux/linkage.h> | ||
7 | #include <linux/init.h> | ||
8 | |||
9 | #include "proc-macros.S" | ||
10 | |||
11 | ENTRY(nop_flush_icache_all) | ||
12 | mov pc, lr | ||
13 | ENDPROC(nop_flush_icache_all) | ||
14 | |||
15 | .globl nop_flush_kern_cache_all | ||
16 | .equ nop_flush_kern_cache_all, nop_flush_icache_all | ||
17 | |||
18 | .globl nop_flush_kern_cache_louis | ||
19 | .equ nop_flush_kern_cache_louis, nop_flush_icache_all | ||
20 | |||
21 | .globl nop_flush_user_cache_all | ||
22 | .equ nop_flush_user_cache_all, nop_flush_icache_all | ||
23 | |||
24 | .globl nop_flush_user_cache_range | ||
25 | .equ nop_flush_user_cache_range, nop_flush_icache_all | ||
26 | |||
27 | .globl nop_coherent_kern_range | ||
28 | .equ nop_coherent_kern_range, nop_flush_icache_all | ||
29 | |||
30 | ENTRY(nop_coherent_user_range) | ||
31 | mov r0, 0 | ||
32 | mov pc, lr | ||
33 | ENDPROC(nop_coherent_user_range) | ||
34 | |||
35 | .globl nop_flush_kern_dcache_area | ||
36 | .equ nop_flush_kern_dcache_area, nop_flush_icache_all | ||
37 | |||
38 | .globl nop_dma_flush_range | ||
39 | .equ nop_dma_flush_range, nop_flush_icache_all | ||
40 | |||
41 | .globl nop_dma_map_area | ||
42 | .equ nop_dma_map_area, nop_flush_icache_all | ||
43 | |||
44 | .globl nop_dma_unmap_area | ||
45 | .equ nop_dma_unmap_area, nop_flush_icache_all | ||
46 | |||
47 | __INITDATA | ||
48 | |||
49 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | ||
50 | define_cache_functions nop | ||
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index d51225f90ae2..dd3a6c670f08 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -20,12 +20,19 @@ | |||
20 | 20 | ||
21 | void __init arm_mm_memblock_reserve(void) | 21 | void __init arm_mm_memblock_reserve(void) |
22 | { | 22 | { |
23 | #ifndef CONFIG_CPU_V7M | ||
23 | /* | 24 | /* |
24 | * Register the exception vector page. | 25 | * Register the exception vector page. |
25 | * some architectures which the DRAM is the exception vector to trap, | 26 | * some architectures which the DRAM is the exception vector to trap, |
26 | * alloc_page breaks with error, although it is not NULL, but "0." | 27 | * alloc_page breaks with error, although it is not NULL, but "0." |
27 | */ | 28 | */ |
28 | memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); | 29 | memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); |
30 | #else /* ifndef CONFIG_CPU_V7M */ | ||
31 | /* | ||
32 | * There is no dedicated vector page on V7-M. So nothing needs to be | ||
33 | * reserved here. | ||
34 | */ | ||
35 | #endif | ||
29 | } | 36 | } |
30 | 37 | ||
31 | void __init sanity_check_meminfo(void) | 38 | void __init sanity_check_meminfo(void) |
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S new file mode 100644 index 000000000000..0c93588fcb91 --- /dev/null +++ b/arch/arm/mm/proc-v7m.S | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/proc-v7m.S | ||
3 | * | ||
4 | * Copyright (C) 2008 ARM Ltd. | ||
5 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This is the "shell" of the ARMv7-M processor support. | ||
12 | */ | ||
13 | #include <linux/linkage.h> | ||
14 | #include <asm/assembler.h> | ||
15 | #include <asm/v7m.h> | ||
16 | #include "proc-macros.S" | ||
17 | |||
18 | ENTRY(cpu_v7m_proc_init) | ||
19 | mov pc, lr | ||
20 | ENDPROC(cpu_v7m_proc_init) | ||
21 | |||
22 | ENTRY(cpu_v7m_proc_fin) | ||
23 | mov pc, lr | ||
24 | ENDPROC(cpu_v7m_proc_fin) | ||
25 | |||
26 | /* | ||
27 | * cpu_v7m_reset(loc) | ||
28 | * | ||
29 | * Perform a soft reset of the system. Put the CPU into the | ||
30 | * same state as it would be if it had been reset, and branch | ||
31 | * to what would be the reset vector. | ||
32 | * | ||
33 | * - loc - location to jump to for soft reset | ||
34 | */ | ||
35 | .align 5 | ||
36 | ENTRY(cpu_v7m_reset) | ||
37 | mov pc, r0 | ||
38 | ENDPROC(cpu_v7m_reset) | ||
39 | |||
40 | /* | ||
41 | * cpu_v7m_do_idle() | ||
42 | * | ||
43 | * Idle the processor (eg, wait for interrupt). | ||
44 | * | ||
45 | * IRQs are already disabled. | ||
46 | */ | ||
47 | ENTRY(cpu_v7m_do_idle) | ||
48 | wfi | ||
49 | mov pc, lr | ||
50 | ENDPROC(cpu_v7m_do_idle) | ||
51 | |||
52 | ENTRY(cpu_v7m_dcache_clean_area) | ||
53 | mov pc, lr | ||
54 | ENDPROC(cpu_v7m_dcache_clean_area) | ||
55 | |||
56 | /* | ||
57 | * There is no MMU, so here is nothing to do. | ||
58 | */ | ||
59 | ENTRY(cpu_v7m_switch_mm) | ||
60 | mov pc, lr | ||
61 | ENDPROC(cpu_v7m_switch_mm) | ||
62 | |||
63 | .globl cpu_v7m_suspend_size | ||
64 | .equ cpu_v7m_suspend_size, 0 | ||
65 | |||
66 | #ifdef CONFIG_ARM_CPU_SUSPEND | ||
67 | ENTRY(cpu_v7m_do_suspend) | ||
68 | mov pc, lr | ||
69 | ENDPROC(cpu_v7m_do_suspend) | ||
70 | |||
71 | ENTRY(cpu_v7m_do_resume) | ||
72 | mov pc, lr | ||
73 | ENDPROC(cpu_v7m_do_resume) | ||
74 | #endif | ||
75 | |||
76 | .section ".text.init", #alloc, #execinstr | ||
77 | |||
78 | /* | ||
79 | * __v7m_setup | ||
80 | * | ||
81 | * This should be able to cover all ARMv7-M cores. | ||
82 | */ | ||
83 | __v7m_setup: | ||
84 | @ Configure the vector table base address | ||
85 | ldr r0, =BASEADDR_V7M_SCB | ||
86 | ldr r12, =vector_table | ||
87 | str r12, [r0, V7M_SCB_VTOR] | ||
88 | |||
89 | @ enable UsageFault, BusFault and MemManage fault. | ||
90 | ldr r5, [r0, #V7M_SCB_SHCSR] | ||
91 | orr r5, #(V7M_SCB_SHCSR_USGFAULTENA | V7M_SCB_SHCSR_BUSFAULTENA | V7M_SCB_SHCSR_MEMFAULTENA) | ||
92 | str r5, [r0, #V7M_SCB_SHCSR] | ||
93 | |||
94 | @ Lower the priority of the SVC and PendSV exceptions | ||
95 | mov r5, #0x80000000 | ||
96 | str r5, [r0, V7M_SCB_SHPR2] @ set SVC priority | ||
97 | mov r5, #0x00800000 | ||
98 | str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority | ||
99 | |||
100 | @ SVC to run the kernel in this mode | ||
101 | adr r1, BSYM(1f) | ||
102 | ldr r5, [r12, #11 * 4] @ read the SVC vector entry | ||
103 | str r1, [r12, #11 * 4] @ write the temporary SVC vector entry | ||
104 | mov r6, lr @ save LR | ||
105 | mov r7, sp @ save SP | ||
106 | ldr sp, =__v7m_setup_stack_top | ||
107 | cpsie i | ||
108 | svc #0 | ||
109 | 1: cpsid i | ||
110 | str r5, [r12, #11 * 4] @ restore the original SVC vector entry | ||
111 | mov lr, r6 @ restore LR | ||
112 | mov sp, r7 @ restore SP | ||
113 | |||
114 | @ Special-purpose control register | ||
115 | mov r1, #1 | ||
116 | msr control, r1 @ Thread mode has unpriviledged access | ||
117 | |||
118 | @ Configure the System Control Register to ensure 8-byte stack alignment | ||
119 | @ Note the STKALIGN bit is either RW or RAO. | ||
120 | ldr r12, [r0, V7M_SCB_CCR] @ system control register | ||
121 | orr r12, #V7M_SCB_CCR_STKALIGN | ||
122 | str r12, [r0, V7M_SCB_CCR] | ||
123 | mov pc, lr | ||
124 | ENDPROC(__v7m_setup) | ||
125 | |||
126 | define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 | ||
127 | |||
128 | .section ".rodata" | ||
129 | string cpu_arch_name, "armv7m" | ||
130 | string cpu_elf_name "v7m" | ||
131 | string cpu_v7m_name "ARMv7-M" | ||
132 | |||
133 | .section ".proc.info.init", #alloc, #execinstr | ||
134 | |||
135 | /* | ||
136 | * Match any ARMv7-M processor core. | ||
137 | */ | ||
138 | .type __v7m_proc_info, #object | ||
139 | __v7m_proc_info: | ||
140 | .long 0x000f0000 @ Required ID value | ||
141 | .long 0x000f0000 @ Mask for ID | ||
142 | .long 0 @ proc_info_list.__cpu_mm_mmu_flags | ||
143 | .long 0 @ proc_info_list.__cpu_io_mmu_flags | ||
144 | b __v7m_setup @ proc_info_list.__cpu_flush | ||
145 | .long cpu_arch_name | ||
146 | .long cpu_elf_name | ||
147 | .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT | ||
148 | .long cpu_v7m_name | ||
149 | .long v7m_processor_functions @ proc_info_list.proc | ||
150 | .long 0 @ proc_info_list.tlb | ||
151 | .long 0 @ proc_info_list.user | ||
152 | .long nop_cache_fns @ proc_info_list.cache | ||
153 | .size __v7m_proc_info, . - __v7m_proc_info | ||
154 | |||
155 | __v7m_setup_stack: | ||
156 | .space 4 * 8 @ 8 registers | ||
157 | __v7m_setup_stack_top: | ||