diff options
Diffstat (limited to 'arch/arm/boot/compressed')
-rw-r--r-- | arch/arm/boot/compressed/head.S | 52 |
1 files changed, 45 insertions, 7 deletions
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index c41a793b519c..2c45b5709fa4 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -10,8 +10,11 @@ | |||
10 | */ | 10 | */ |
11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
12 | #include <asm/assembler.h> | 12 | #include <asm/assembler.h> |
13 | #include <asm/v7m.h> | ||
14 | |||
15 | AR_CLASS( .arch armv7-a ) | ||
16 | M_CLASS( .arch armv7-m ) | ||
13 | 17 | ||
14 | .arch armv7-a | ||
15 | /* | 18 | /* |
16 | * Debugging stuff | 19 | * Debugging stuff |
17 | * | 20 | * |
@@ -114,7 +117,12 @@ | |||
114 | * sort out different calling conventions | 117 | * sort out different calling conventions |
115 | */ | 118 | */ |
116 | .align | 119 | .align |
117 | .arm @ Always enter in ARM state | 120 | /* |
121 | * Always enter in ARM state for CPUs that support the ARM ISA. | ||
122 | * As of today (2014) that's exactly the members of the A and R | ||
123 | * classes. | ||
124 | */ | ||
125 | AR_CLASS( .arm ) | ||
118 | start: | 126 | start: |
119 | .type start,#function | 127 | .type start,#function |
120 | .rept 7 | 128 | .rept 7 |
@@ -132,14 +140,15 @@ start: | |||
132 | 140 | ||
133 | THUMB( .thumb ) | 141 | THUMB( .thumb ) |
134 | 1: | 142 | 1: |
135 | ARM_BE8( setend be ) @ go BE8 if compiled for BE8 | 143 | ARM_BE8( setend be ) @ go BE8 if compiled for BE8 |
136 | mrs r9, cpsr | 144 | AR_CLASS( mrs r9, cpsr ) |
137 | #ifdef CONFIG_ARM_VIRT_EXT | 145 | #ifdef CONFIG_ARM_VIRT_EXT |
138 | bl __hyp_stub_install @ get into SVC mode, reversibly | 146 | bl __hyp_stub_install @ get into SVC mode, reversibly |
139 | #endif | 147 | #endif |
140 | mov r7, r1 @ save architecture ID | 148 | mov r7, r1 @ save architecture ID |
141 | mov r8, r2 @ save atags pointer | 149 | mov r8, r2 @ save atags pointer |
142 | 150 | ||
151 | #ifndef CONFIG_CPU_V7M | ||
143 | /* | 152 | /* |
144 | * Booting from Angel - need to enter SVC mode and disable | 153 | * Booting from Angel - need to enter SVC mode and disable |
145 | * FIQs/IRQs (numeric definitions from angel arm.h source). | 154 | * FIQs/IRQs (numeric definitions from angel arm.h source). |
@@ -155,6 +164,7 @@ not_angel: | |||
155 | safe_svcmode_maskall r0 | 164 | safe_svcmode_maskall r0 |
156 | msr spsr_cxsf, r9 @ Save the CPU boot mode in | 165 | msr spsr_cxsf, r9 @ Save the CPU boot mode in |
157 | @ SPSR | 166 | @ SPSR |
167 | #endif | ||
158 | /* | 168 | /* |
159 | * Note that some cache flushing and other stuff may | 169 | * Note that some cache flushing and other stuff may |
160 | * be needed here - is there an Angel SWI call for this? | 170 | * be needed here - is there an Angel SWI call for this? |
@@ -168,9 +178,26 @@ not_angel: | |||
168 | .text | 178 | .text |
169 | 179 | ||
170 | #ifdef CONFIG_AUTO_ZRELADDR | 180 | #ifdef CONFIG_AUTO_ZRELADDR |
171 | @ determine final kernel image address | 181 | /* |
182 | * Find the start of physical memory. As we are executing | ||
183 | * without the MMU on, we are in the physical address space. | ||
184 | * We just need to get rid of any offset by aligning the | ||
185 | * address. | ||
186 | * | ||
187 | * This alignment is a balance between the requirements of | ||
188 | * different platforms - we have chosen 128MB to allow | ||
189 | * platforms which align the start of their physical memory | ||
190 | * to 128MB to use this feature, while allowing the zImage | ||
191 | * to be placed within the first 128MB of memory on other | ||
192 | * platforms. Increasing the alignment means we place | ||
193 | * stricter alignment requirements on the start of physical | ||
194 | * memory, but relaxing it means that we break people who | ||
195 | * are already placing their zImage in (eg) the top 64MB | ||
196 | * of this range. | ||
197 | */ | ||
172 | mov r4, pc | 198 | mov r4, pc |
173 | and r4, r4, #0xf8000000 | 199 | and r4, r4, #0xf8000000 |
200 | /* Determine final kernel image address. */ | ||
174 | add r4, r4, #TEXT_OFFSET | 201 | add r4, r4, #TEXT_OFFSET |
175 | #else | 202 | #else |
176 | ldr r4, =zreladdr | 203 | ldr r4, =zreladdr |
@@ -810,6 +837,16 @@ __common_mmu_cache_on: | |||
810 | call_cache_fn: adr r12, proc_types | 837 | call_cache_fn: adr r12, proc_types |
811 | #ifdef CONFIG_CPU_CP15 | 838 | #ifdef CONFIG_CPU_CP15 |
812 | mrc p15, 0, r9, c0, c0 @ get processor ID | 839 | mrc p15, 0, r9, c0, c0 @ get processor ID |
840 | #elif defined(CONFIG_CPU_V7M) | ||
841 | /* | ||
842 | * On v7-M the processor id is located in the V7M_SCB_CPUID | ||
843 | * register, but as cache handling is IMPLEMENTATION DEFINED on | ||
844 | * v7-M (if existant at all) we just return early here. | ||
845 | * If V7M_SCB_CPUID were used the cpu ID functions (i.e. | ||
846 | * __armv7_mmu_cache_{on,off,flush}) would be selected which | ||
847 | * use cp15 registers that are not implemented on v7-M. | ||
848 | */ | ||
849 | bx lr | ||
813 | #else | 850 | #else |
814 | ldr r9, =CONFIG_PROCESSOR_ID | 851 | ldr r9, =CONFIG_PROCESSOR_ID |
815 | #endif | 852 | #endif |
@@ -1310,8 +1347,9 @@ __hyp_reentry_vectors: | |||
1310 | 1347 | ||
1311 | __enter_kernel: | 1348 | __enter_kernel: |
1312 | mov r0, #0 @ must be 0 | 1349 | mov r0, #0 @ must be 0 |
1313 | ARM( mov pc, r4 ) @ call kernel | 1350 | ARM( mov pc, r4 ) @ call kernel |
1314 | THUMB( bx r4 ) @ entry point is always ARM | 1351 | M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class |
1352 | THUMB( bx r4 ) @ entry point is always ARM for A/R classes | ||
1315 | 1353 | ||
1316 | reloc_code_end: | 1354 | reloc_code_end: |
1317 | 1355 | ||