aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/spinlock.h')
-rw-r--r--arch/arm/include/asm/spinlock.h28
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 4f2c28060c9a..499900781d59 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,21 +5,13 @@
5#error SMP not supported on pre-ARMv6 CPUs 5#error SMP not supported on pre-ARMv6 CPUs
6#endif 6#endif
7 7
8#include <asm/processor.h> 8#include <linux/prefetch.h>
9 9
10/* 10/*
11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K 11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12 * extensions, so when running on UP, we have to patch these instructions away. 12 * extensions, so when running on UP, we have to patch these instructions away.
13 */ 13 */
14#define ALT_SMP(smp, up) \
15 "9998: " smp "\n" \
16 " .pushsection \".alt.smp.init\", \"a\"\n" \
17 " .long 9998b\n" \
18 " " up "\n" \
19 " .popsection\n"
20
21#ifdef CONFIG_THUMB2_KERNEL 14#ifdef CONFIG_THUMB2_KERNEL
22#define SEV ALT_SMP("sev.w", "nop.w")
23/* 15/*
24 * For Thumb-2, special care is needed to ensure that the conditional WFE 16 * For Thumb-2, special care is needed to ensure that the conditional WFE
25 * instruction really does assemble to exactly 4 bytes (as required by 17 * instruction really does assemble to exactly 4 bytes (as required by
@@ -31,17 +23,18 @@
31 * the assembler won't change IT instructions which are explicitly present 23 * the assembler won't change IT instructions which are explicitly present
32 * in the input. 24 * in the input.
33 */ 25 */
34#define WFE(cond) ALT_SMP( \ 26#define WFE(cond) __ALT_SMP_ASM( \
35 "it " cond "\n\t" \ 27 "it " cond "\n\t" \
36 "wfe" cond ".n", \ 28 "wfe" cond ".n", \
37 \ 29 \
38 "nop.w" \ 30 "nop.w" \
39) 31)
40#else 32#else
41#define SEV ALT_SMP("sev", "nop") 33#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
42#define WFE(cond) ALT_SMP("wfe" cond, "nop")
43#endif 34#endif
44 35
36#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
37
45static inline void dsb_sev(void) 38static inline void dsb_sev(void)
46{ 39{
47#if __LINUX_ARM_ARCH__ >= 7 40#if __LINUX_ARM_ARCH__ >= 7
@@ -77,6 +70,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
77 u32 newval; 70 u32 newval;
78 arch_spinlock_t lockval; 71 arch_spinlock_t lockval;
79 72
73 prefetchw(&lock->slock);
80 __asm__ __volatile__( 74 __asm__ __volatile__(
81"1: ldrex %0, [%3]\n" 75"1: ldrex %0, [%3]\n"
82" add %1, %0, %4\n" 76" add %1, %0, %4\n"
@@ -100,6 +94,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
100 unsigned long contended, res; 94 unsigned long contended, res;
101 u32 slock; 95 u32 slock;
102 96
97 prefetchw(&lock->slock);
103 do { 98 do {
104 __asm__ __volatile__( 99 __asm__ __volatile__(
105 " ldrex %0, [%3]\n" 100 " ldrex %0, [%3]\n"
@@ -152,6 +147,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
152{ 147{
153 unsigned long tmp; 148 unsigned long tmp;
154 149
150 prefetchw(&rw->lock);
155 __asm__ __volatile__( 151 __asm__ __volatile__(
156"1: ldrex %0, [%1]\n" 152"1: ldrex %0, [%1]\n"
157" teq %0, #0\n" 153" teq %0, #0\n"
@@ -170,6 +166,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
170{ 166{
171 unsigned long contended, res; 167 unsigned long contended, res;
172 168
169 prefetchw(&rw->lock);
173 do { 170 do {
174 __asm__ __volatile__( 171 __asm__ __volatile__(
175 " ldrex %0, [%2]\n" 172 " ldrex %0, [%2]\n"
@@ -203,7 +200,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
203} 200}
204 201
205/* write_can_lock - would write_trylock() succeed? */ 202/* write_can_lock - would write_trylock() succeed? */
206#define arch_write_can_lock(x) ((x)->lock == 0) 203#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
207 204
208/* 205/*
209 * Read locks are a bit more hairy: 206 * Read locks are a bit more hairy:
@@ -221,6 +218,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
221{ 218{
222 unsigned long tmp, tmp2; 219 unsigned long tmp, tmp2;
223 220
221 prefetchw(&rw->lock);
224 __asm__ __volatile__( 222 __asm__ __volatile__(
225"1: ldrex %0, [%2]\n" 223"1: ldrex %0, [%2]\n"
226" adds %0, %0, #1\n" 224" adds %0, %0, #1\n"
@@ -241,6 +239,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
241 239
242 smp_mb(); 240 smp_mb();
243 241
242 prefetchw(&rw->lock);
244 __asm__ __volatile__( 243 __asm__ __volatile__(
245"1: ldrex %0, [%2]\n" 244"1: ldrex %0, [%2]\n"
246" sub %0, %0, #1\n" 245" sub %0, %0, #1\n"
@@ -259,6 +258,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
259{ 258{
260 unsigned long contended, res; 259 unsigned long contended, res;
261 260
261 prefetchw(&rw->lock);
262 do { 262 do {
263 __asm__ __volatile__( 263 __asm__ __volatile__(
264 " ldrex %0, [%2]\n" 264 " ldrex %0, [%2]\n"
@@ -280,7 +280,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
280} 280}
281 281
282/* read_can_lock - would read_trylock() succeed? */ 282/* read_can_lock - would read_trylock() succeed? */
283#define arch_read_can_lock(x) ((x)->lock < 0x80000000) 283#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
284 284
285#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 285#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
286#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 286#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)