diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2013-10-18 17:06:03 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-10-29 07:06:23 -0400 |
commit | 39792c7cf3111d69dc4aa0923859d8b929e9039f (patch) | |
tree | 97e96f823d34ba2324efeafa5541893b062f112c | |
parent | 3c8828f6a0cb3bf1bae04a98135da3c53e20c217 (diff) |
ARM: 7861/1: cacheflush: consolidate single-CPU ARMv7 cache disabling code
This code is becoming duplicated in many places. So let's consolidate
it into a handy macro that is known to be right and available for reuse.
Signed-off-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 46 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/dcscb.c | 56 | ||||
-rw-r--r-- | arch/arm/mach-vexpress/tc2_pm.c | 48 |
3 files changed, 52 insertions, 98 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 15f2d5bf8875..ee753f1749cd 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -435,4 +435,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size) | |||
435 | #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) | 435 | #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) |
436 | #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) | 436 | #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) |
437 | 437 | ||
438 | /* | ||
439 | * Disabling cache access for one CPU in an ARMv7 SMP system is tricky. | ||
440 | * To do so we must: | ||
441 | * | ||
442 | * - Clear the SCTLR.C bit to prevent further cache allocations | ||
443 | * - Flush the desired level of cache | ||
444 | * - Clear the ACTLR "SMP" bit to disable local coherency | ||
445 | * | ||
446 | * ... and so without any intervening memory access in between those steps, | ||
447 | * not even to the stack. | ||
448 | * | ||
449 | * WARNING -- After this has been called: | ||
450 | * | ||
451 | * - No ldrex/strex (and similar) instructions must be used. | ||
452 | * - The CPU is obviously no longer coherent with the other CPUs. | ||
453 | * - This is unlikely to work as expected if Linux is running non-secure. | ||
454 | * | ||
455 | * Note: | ||
456 | * | ||
457 | * - This is known to apply to several ARMv7 processor implementations, | ||
458 | * however some exceptions may exist. Caveat emptor. | ||
459 | * | ||
460 | * - The clobber list is dictated by the call to v7_flush_dcache_*. | ||
461 | * fp is preserved to the stack explicitly prior disabling the cache | ||
462 | * since adding it to the clobber list is incompatible with having | ||
463 | * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering | ||
464 | * trampoline are inserted by the linker and to keep sp 64-bit aligned. | ||
465 | */ | ||
466 | #define v7_exit_coherency_flush(level) \ | ||
467 | asm volatile( \ | ||
468 | "stmfd sp!, {fp, ip} \n\t" \ | ||
469 | "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ | ||
470 | "bic r0, r0, #"__stringify(CR_C)" \n\t" \ | ||
471 | "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \ | ||
472 | "isb \n\t" \ | ||
473 | "bl v7_flush_dcache_"__stringify(level)" \n\t" \ | ||
474 | "clrex \n\t" \ | ||
475 | "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \ | ||
476 | "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \ | ||
477 | "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \ | ||
478 | "isb \n\t" \ | ||
479 | "dsb \n\t" \ | ||
480 | "ldmfd sp!, {fp, ip}" \ | ||
481 | : : : "r0","r1","r2","r3","r4","r5","r6","r7", \ | ||
482 | "r9","r10","lr","memory" ) | ||
483 | |||
438 | #endif | 484 | #endif |
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c index 3a6384c6c435..14d499688736 100644 --- a/arch/arm/mach-vexpress/dcscb.c +++ b/arch/arm/mach-vexpress/dcscb.c | |||
@@ -133,38 +133,8 @@ static void dcscb_power_down(void) | |||
133 | if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { | 133 | if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { |
134 | arch_spin_unlock(&dcscb_lock); | 134 | arch_spin_unlock(&dcscb_lock); |
135 | 135 | ||
136 | /* | 136 | /* Flush all cache levels for this cluster. */ |
137 | * Flush all cache levels for this cluster. | 137 | v7_exit_coherency_flush(all); |
138 | * | ||
139 | * To do so we do: | ||
140 | * - Clear the SCTLR.C bit to prevent further cache allocations | ||
141 | * - Flush the whole cache | ||
142 | * - Clear the ACTLR "SMP" bit to disable local coherency | ||
143 | * | ||
144 | * Let's do it in the safest possible way i.e. with | ||
145 | * no memory access within the following sequence | ||
146 | * including to the stack. | ||
147 | * | ||
148 | * Note: fp is preserved to the stack explicitly prior doing | ||
149 | * this since adding it to the clobber list is incompatible | ||
150 | * with having CONFIG_FRAME_POINTER=y. | ||
151 | */ | ||
152 | asm volatile( | ||
153 | "str fp, [sp, #-4]! \n\t" | ||
154 | "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" | ||
155 | "bic r0, r0, #"__stringify(CR_C)" \n\t" | ||
156 | "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" | ||
157 | "isb \n\t" | ||
158 | "bl v7_flush_dcache_all \n\t" | ||
159 | "clrex \n\t" | ||
160 | "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" | ||
161 | "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" | ||
162 | "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" | ||
163 | "isb \n\t" | ||
164 | "dsb \n\t" | ||
165 | "ldr fp, [sp], #4" | ||
166 | : : : "r0","r1","r2","r3","r4","r5","r6","r7", | ||
167 | "r9","r10","lr","memory"); | ||
168 | 138 | ||
169 | /* | 139 | /* |
170 | * This is a harmless no-op. On platforms with a real | 140 | * This is a harmless no-op. On platforms with a real |
@@ -183,26 +153,8 @@ static void dcscb_power_down(void) | |||
183 | } else { | 153 | } else { |
184 | arch_spin_unlock(&dcscb_lock); | 154 | arch_spin_unlock(&dcscb_lock); |
185 | 155 | ||
186 | /* | 156 | /* Disable and flush the local CPU cache. */ |
187 | * Flush the local CPU cache. | 157 | v7_exit_coherency_flush(louis); |
188 | * Let's do it in the safest possible way as above. | ||
189 | */ | ||
190 | asm volatile( | ||
191 | "str fp, [sp, #-4]! \n\t" | ||
192 | "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" | ||
193 | "bic r0, r0, #"__stringify(CR_C)" \n\t" | ||
194 | "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" | ||
195 | "isb \n\t" | ||
196 | "bl v7_flush_dcache_louis \n\t" | ||
197 | "clrex \n\t" | ||
198 | "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" | ||
199 | "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" | ||
200 | "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" | ||
201 | "isb \n\t" | ||
202 | "dsb \n\t" | ||
203 | "ldr fp, [sp], #4" | ||
204 | : : : "r0","r1","r2","r3","r4","r5","r6","r7", | ||
205 | "r9","r10","lr","memory"); | ||
206 | } | 158 | } |
207 | 159 | ||
208 | __mcpm_cpu_down(cpu, cluster); | 160 | __mcpm_cpu_down(cpu, cluster); |
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c index e6eb48192912..4eb92ebfd953 100644 --- a/arch/arm/mach-vexpress/tc2_pm.c +++ b/arch/arm/mach-vexpress/tc2_pm.c | |||
@@ -156,32 +156,7 @@ static void tc2_pm_down(u64 residency) | |||
156 | : : "r" (0x400) ); | 156 | : : "r" (0x400) ); |
157 | } | 157 | } |
158 | 158 | ||
159 | /* | 159 | v7_exit_coherency_flush(all); |
160 | * We need to disable and flush the whole (L1 and L2) cache. | ||
161 | * Let's do it in the safest possible way i.e. with | ||
162 | * no memory access within the following sequence | ||
163 | * including the stack. | ||
164 | * | ||
165 | * Note: fp is preserved to the stack explicitly prior doing | ||
166 | * this since adding it to the clobber list is incompatible | ||
167 | * with having CONFIG_FRAME_POINTER=y. | ||
168 | */ | ||
169 | asm volatile( | ||
170 | "str fp, [sp, #-4]! \n\t" | ||
171 | "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" | ||
172 | "bic r0, r0, #"__stringify(CR_C)" \n\t" | ||
173 | "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" | ||
174 | "isb \n\t" | ||
175 | "bl v7_flush_dcache_all \n\t" | ||
176 | "clrex \n\t" | ||
177 | "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" | ||
178 | "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" | ||
179 | "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" | ||
180 | "isb \n\t" | ||
181 | "dsb \n\t" | ||
182 | "ldr fp, [sp], #4" | ||
183 | : : : "r0","r1","r2","r3","r4","r5","r6","r7", | ||
184 | "r9","r10","lr","memory"); | ||
185 | 160 | ||
186 | cci_disable_port_by_cpu(mpidr); | 161 | cci_disable_port_by_cpu(mpidr); |
187 | 162 | ||
@@ -197,26 +172,7 @@ static void tc2_pm_down(u64 residency) | |||
197 | 172 | ||
198 | arch_spin_unlock(&tc2_pm_lock); | 173 | arch_spin_unlock(&tc2_pm_lock); |
199 | 174 | ||
200 | /* | 175 | v7_exit_coherency_flush(louis); |
201 | * We need to disable and flush only the L1 cache. | ||
202 | * Let's do it in the safest possible way as above. | ||
203 | */ | ||
204 | asm volatile( | ||
205 | "str fp, [sp, #-4]! \n\t" | ||
206 | "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" | ||
207 | "bic r0, r0, #"__stringify(CR_C)" \n\t" | ||
208 | "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" | ||
209 | "isb \n\t" | ||
210 | "bl v7_flush_dcache_louis \n\t" | ||
211 | "clrex \n\t" | ||
212 | "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" | ||
213 | "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" | ||
214 | "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" | ||
215 | "isb \n\t" | ||
216 | "dsb \n\t" | ||
217 | "ldr fp, [sp], #4" | ||
218 | : : : "r0","r1","r2","r3","r4","r5","r6","r7", | ||
219 | "r9","r10","lr","memory"); | ||
220 | } | 176 | } |
221 | 177 | ||
222 | __mcpm_cpu_down(cpu, cluster); | 178 | __mcpm_cpu_down(cpu, cluster); |