aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-arm/spinlock.h')
-rw-r--r--include/asm-arm/spinlock.h82
1 files changed, 54 insertions, 28 deletions
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h
index 182323619caa..1f906d09b688 100644
--- a/include/asm-arm/spinlock.h
+++ b/include/asm-arm/spinlock.h
@@ -8,9 +8,10 @@
8/* 8/*
9 * ARMv6 Spin-locking. 9 * ARMv6 Spin-locking.
10 * 10 *
11 * We (exclusively) read the old value, and decrement it. If it 11 * We exclusively read the old value. If it is zero, we may have
12 * hits zero, we may have won the lock, so we try (exclusively) 12 * won the lock, so we try exclusively storing it. A memory barrier
13 * storing it. 13 * is required after we get a lock, and before we release it, because
14 * V6 CPUs are assumed to have weakly ordered memory.
14 * 15 *
15 * Unlocked value: 0 16 * Unlocked value: 0
16 * Locked value: 1 17 * Locked value: 1
@@ -41,7 +42,9 @@ static inline void _raw_spin_lock(spinlock_t *lock)
41" bne 1b" 42" bne 1b"
42 : "=&r" (tmp) 43 : "=&r" (tmp)
43 : "r" (&lock->lock), "r" (1) 44 : "r" (&lock->lock), "r" (1)
44 : "cc", "memory"); 45 : "cc");
46
47 smp_mb();
45} 48}
46 49
47static inline int _raw_spin_trylock(spinlock_t *lock) 50static inline int _raw_spin_trylock(spinlock_t *lock)
@@ -54,18 +57,25 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
54" strexeq %0, %2, [%1]" 57" strexeq %0, %2, [%1]"
55 : "=&r" (tmp) 58 : "=&r" (tmp)
56 : "r" (&lock->lock), "r" (1) 59 : "r" (&lock->lock), "r" (1)
57 : "cc", "memory"); 60 : "cc");
58 61
59 return tmp == 0; 62 if (tmp == 0) {
63 smp_mb();
64 return 1;
65 } else {
66 return 0;
67 }
60} 68}
61 69
62static inline void _raw_spin_unlock(spinlock_t *lock) 70static inline void _raw_spin_unlock(spinlock_t *lock)
63{ 71{
72 smp_mb();
73
64 __asm__ __volatile__( 74 __asm__ __volatile__(
65" str %1, [%0]" 75" str %1, [%0]"
66 : 76 :
67 : "r" (&lock->lock), "r" (0) 77 : "r" (&lock->lock), "r" (0)
68 : "cc", "memory"); 78 : "cc");
69} 79}
70 80
71/* 81/*
@@ -79,7 +89,8 @@ typedef struct {
79} rwlock_t; 89} rwlock_t;
80 90
81#define RW_LOCK_UNLOCKED (rwlock_t) { 0 } 91#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
82#define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0) 92#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
93#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
83 94
84/* 95/*
85 * Write locks are easy - we just set bit 31. When unlocking, we can 96 * Write locks are easy - we just set bit 31. When unlocking, we can
@@ -97,16 +108,40 @@ static inline void _raw_write_lock(rwlock_t *rw)
97" bne 1b" 108" bne 1b"
98 : "=&r" (tmp) 109 : "=&r" (tmp)
99 : "r" (&rw->lock), "r" (0x80000000) 110 : "r" (&rw->lock), "r" (0x80000000)
100 : "cc", "memory"); 111 : "cc");
112
113 smp_mb();
114}
115
116static inline int _raw_write_trylock(rwlock_t *rw)
117{
118 unsigned long tmp;
119
120 __asm__ __volatile__(
121"1: ldrex %0, [%1]\n"
122" teq %0, #0\n"
123" strexeq %0, %2, [%1]"
124 : "=&r" (tmp)
125 : "r" (&rw->lock), "r" (0x80000000)
126 : "cc");
127
128 if (tmp == 0) {
129 smp_mb();
130 return 1;
131 } else {
132 return 0;
133 }
101} 134}
102 135
103static inline void _raw_write_unlock(rwlock_t *rw) 136static inline void _raw_write_unlock(rwlock_t *rw)
104{ 137{
138 smp_mb();
139
105 __asm__ __volatile__( 140 __asm__ __volatile__(
106 "str %1, [%0]" 141 "str %1, [%0]"
107 : 142 :
108 : "r" (&rw->lock), "r" (0) 143 : "r" (&rw->lock), "r" (0)
109 : "cc", "memory"); 144 : "cc");
110} 145}
111 146
112/* 147/*
@@ -133,11 +168,17 @@ static inline void _raw_read_lock(rwlock_t *rw)
133" bmi 1b" 168" bmi 1b"
134 : "=&r" (tmp), "=&r" (tmp2) 169 : "=&r" (tmp), "=&r" (tmp2)
135 : "r" (&rw->lock) 170 : "r" (&rw->lock)
136 : "cc", "memory"); 171 : "cc");
172
173 smp_mb();
137} 174}
138 175
139static inline void _raw_read_unlock(rwlock_t *rw) 176static inline void _raw_read_unlock(rwlock_t *rw)
140{ 177{
178 unsigned long tmp, tmp2;
179
180 smp_mb();
181
141 __asm__ __volatile__( 182 __asm__ __volatile__(
142"1: ldrex %0, [%2]\n" 183"1: ldrex %0, [%2]\n"
143" sub %0, %0, #1\n" 184" sub %0, %0, #1\n"
@@ -146,24 +187,9 @@ static inline void _raw_read_unlock(rwlock_t *rw)
146" bne 1b" 187" bne 1b"
147 : "=&r" (tmp), "=&r" (tmp2) 188 : "=&r" (tmp), "=&r" (tmp2)
148 : "r" (&rw->lock) 189 : "r" (&rw->lock)
149 : "cc", "memory"); 190 : "cc");
150} 191}
151 192
152#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) 193#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
153 194
154static inline int _raw_write_trylock(rwlock_t *rw)
155{
156 unsigned long tmp;
157
158 __asm__ __volatile__(
159"1: ldrex %0, [%1]\n"
160" teq %0, #0\n"
161" strexeq %0, %2, [%1]"
162 : "=&r" (tmp)
163 : "r" (&rw->lock), "r" (0x80000000)
164 : "cc", "memory");
165
166 return tmp == 0;
167}
168
169#endif /* __ASM_SPINLOCK_H */ 195#endif /* __ASM_SPINLOCK_H */