aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/spinlock.h
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-01-15 11:22:12 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-02-02 16:23:25 -0500
commit000d9c78eb5cd7f18e3d6a381d66e606bc9b8196 (patch)
tree4a142d5c4d4b589ac696f3a6a932ba1965d29ed1 /arch/arm/include/asm/spinlock.h
parent6323f0ccedf756dfe5f46549cec69a2d6d97937b (diff)
ARM: v6k: remove CPU_32v6K dependencies in asm/spinlock.h
SMP requires at least the ARMv6K extensions to be present, so if we're running on SMP, the WFE and SEV instructions must be available. However, when we run on UP, the v6K extensions may not be available, and so we don't want WFE/SEV to be in the instruction stream. Use the SMP alternatives infrastructure to replace these instructions with NOPs if we build for SMP but run on UP. Tested-by: Tony Lindgren <tony@atomide.com> Tested-by: Sourav Poddar <sourav.poddar@ti.com> Tested-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/spinlock.h')
-rw-r--r--arch/arm/include/asm/spinlock.h37
1 files changed, 25 insertions, 12 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 17eb355707dd..da1af5240159 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,17 +5,36 @@
5#error SMP not supported on pre-ARMv6 CPUs 5#error SMP not supported on pre-ARMv6 CPUs
6#endif 6#endif
7 7
8/*
9 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
10 * extensions, so when running on UP, we have to patch these instructions away.
11 */
12#define ALT_SMP(smp, up) \
13 "9998: " smp "\n" \
14 " .pushsection \".alt.smp.init\", \"a\"\n" \
15 " .long 9998b\n" \
16 " " up "\n" \
17 " .popsection\n"
18
19#ifdef CONFIG_THUMB2_KERNEL
20#define SEV ALT_SMP("sev.w", "nop.w")
21#define WFE(cond) ALT_SMP("wfe" cond ".w", "nop.w")
22#else
23#define SEV ALT_SMP("sev", "nop")
24#define WFE(cond) ALT_SMP("wfe" cond, "nop")
25#endif
26
8static inline void dsb_sev(void) 27static inline void dsb_sev(void)
9{ 28{
10#if __LINUX_ARM_ARCH__ >= 7 29#if __LINUX_ARM_ARCH__ >= 7
11 __asm__ __volatile__ ( 30 __asm__ __volatile__ (
12 "dsb\n" 31 "dsb\n"
13 "sev" 32 SEV
14 ); 33 );
15#elif defined(CONFIG_CPU_32v6K) 34#else
16 __asm__ __volatile__ ( 35 __asm__ __volatile__ (
17 "mcr p15, 0, %0, c7, c10, 4\n" 36 "mcr p15, 0, %0, c7, c10, 4\n"
18 "sev" 37 SEV
19 : : "r" (0) 38 : : "r" (0)
20 ); 39 );
21#endif 40#endif
@@ -46,9 +65,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
46 __asm__ __volatile__( 65 __asm__ __volatile__(
47"1: ldrex %0, [%1]\n" 66"1: ldrex %0, [%1]\n"
48" teq %0, #0\n" 67" teq %0, #0\n"
49#ifdef CONFIG_CPU_32v6K 68 WFE("ne")
50" wfene\n"
51#endif
52" strexeq %0, %2, [%1]\n" 69" strexeq %0, %2, [%1]\n"
53" teqeq %0, #0\n" 70" teqeq %0, #0\n"
54" bne 1b" 71" bne 1b"
@@ -107,9 +124,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
107 __asm__ __volatile__( 124 __asm__ __volatile__(
108"1: ldrex %0, [%1]\n" 125"1: ldrex %0, [%1]\n"
109" teq %0, #0\n" 126" teq %0, #0\n"
110#ifdef CONFIG_CPU_32v6K 127 WFE("ne")
111" wfene\n"
112#endif
113" strexeq %0, %2, [%1]\n" 128" strexeq %0, %2, [%1]\n"
114" teq %0, #0\n" 129" teq %0, #0\n"
115" bne 1b" 130" bne 1b"
@@ -176,9 +191,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
176"1: ldrex %0, [%2]\n" 191"1: ldrex %0, [%2]\n"
177" adds %0, %0, #1\n" 192" adds %0, %0, #1\n"
178" strexpl %1, %0, [%2]\n" 193" strexpl %1, %0, [%2]\n"
179#ifdef CONFIG_CPU_32v6K 194 WFE("mi")
180" wfemi\n"
181#endif
182" rsbpls %0, %1, #0\n" 195" rsbpls %0, %1, #0\n"
183" bmi 1b" 196" bmi 1b"
184 : "=&r" (tmp), "=&r" (tmp2) 197 : "=&r" (tmp), "=&r" (tmp2)