aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/spinlock.h')
-rw-r--r--arch/arm/include/asm/spinlock.h53
1 files changed, 41 insertions, 12 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 17eb355707dd..fdd3820edff8 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,17 +5,52 @@
5#error SMP not supported on pre-ARMv6 CPUs 5#error SMP not supported on pre-ARMv6 CPUs
6#endif 6#endif
7 7
8/*
9 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
10 * extensions, so when running on UP, we have to patch these instructions away.
11 */
12#define ALT_SMP(smp, up) \
13 "9998: " smp "\n" \
14 " .pushsection \".alt.smp.init\", \"a\"\n" \
15 " .long 9998b\n" \
16 " " up "\n" \
17 " .popsection\n"
18
19#ifdef CONFIG_THUMB2_KERNEL
20#define SEV ALT_SMP("sev.w", "nop.w")
21/*
22 * For Thumb-2, special care is needed to ensure that the conditional WFE
23 * instruction really does assemble to exactly 4 bytes (as required by
24 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
25 * assembler to insert a extra (16-bit) IT instruction, depending on the
26 * presence or absence of neighbouring conditional instructions.
27 *
28 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
29 * the assembler won't change IT instructions which are explicitly present
30 * in the input.
31 */
32#define WFE(cond) ALT_SMP( \
33 "it " cond "\n\t" \
34 "wfe" cond ".n", \
35 \
36 "nop.w" \
37)
38#else
39#define SEV ALT_SMP("sev", "nop")
40#define WFE(cond) ALT_SMP("wfe" cond, "nop")
41#endif
42
8static inline void dsb_sev(void) 43static inline void dsb_sev(void)
9{ 44{
10#if __LINUX_ARM_ARCH__ >= 7 45#if __LINUX_ARM_ARCH__ >= 7
11 __asm__ __volatile__ ( 46 __asm__ __volatile__ (
12 "dsb\n" 47 "dsb\n"
13 "sev" 48 SEV
14 ); 49 );
15#elif defined(CONFIG_CPU_32v6K) 50#else
16 __asm__ __volatile__ ( 51 __asm__ __volatile__ (
17 "mcr p15, 0, %0, c7, c10, 4\n" 52 "mcr p15, 0, %0, c7, c10, 4\n"
18 "sev" 53 SEV
19 : : "r" (0) 54 : : "r" (0)
20 ); 55 );
21#endif 56#endif
@@ -46,9 +81,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
46 __asm__ __volatile__( 81 __asm__ __volatile__(
47"1: ldrex %0, [%1]\n" 82"1: ldrex %0, [%1]\n"
48" teq %0, #0\n" 83" teq %0, #0\n"
49#ifdef CONFIG_CPU_32v6K 84 WFE("ne")
50" wfene\n"
51#endif
52" strexeq %0, %2, [%1]\n" 85" strexeq %0, %2, [%1]\n"
53" teqeq %0, #0\n" 86" teqeq %0, #0\n"
54" bne 1b" 87" bne 1b"
@@ -107,9 +140,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
107 __asm__ __volatile__( 140 __asm__ __volatile__(
108"1: ldrex %0, [%1]\n" 141"1: ldrex %0, [%1]\n"
109" teq %0, #0\n" 142" teq %0, #0\n"
110#ifdef CONFIG_CPU_32v6K 143 WFE("ne")
111" wfene\n"
112#endif
113" strexeq %0, %2, [%1]\n" 144" strexeq %0, %2, [%1]\n"
114" teq %0, #0\n" 145" teq %0, #0\n"
115" bne 1b" 146" bne 1b"
@@ -176,9 +207,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
176"1: ldrex %0, [%2]\n" 207"1: ldrex %0, [%2]\n"
177" adds %0, %0, #1\n" 208" adds %0, %0, #1\n"
178" strexpl %1, %0, [%2]\n" 209" strexpl %1, %0, [%2]\n"
179#ifdef CONFIG_CPU_32v6K 210 WFE("mi")
180" wfemi\n"
181#endif
182" rsbpls %0, %1, #0\n" 211" rsbpls %0, %1, #0\n"
183" bmi 1b" 212" bmi 1b"
184 : "=&r" (tmp), "=&r" (tmp2) 213 : "=&r" (tmp), "=&r" (tmp2)