aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-arm')
-rw-r--r--include/asm-arm/bitops.h4
-rw-r--r--include/asm-arm/locks.h36
-rw-r--r--include/asm-arm/spinlock.h53
-rw-r--r--include/asm-arm/system.h5
4 files changed, 69 insertions, 29 deletions
diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h
index 4edd4dc40c5b..c1adc6b3e86d 100644
--- a/include/asm-arm/bitops.h
+++ b/include/asm-arm/bitops.h
@@ -21,8 +21,8 @@
21 21
22#include <asm/system.h> 22#include <asm/system.h>
23 23
24#define smp_mb__before_clear_bit() do { } while (0) 24#define smp_mb__before_clear_bit() mb()
25#define smp_mb__after_clear_bit() do { } while (0) 25#define smp_mb__after_clear_bit() mb()
26 26
27/* 27/*
28 * These functions are the basis of our bit ops. 28 * These functions are the basis of our bit ops.
diff --git a/include/asm-arm/locks.h b/include/asm-arm/locks.h
index 9cb33fcc06c1..f08dc8447913 100644
--- a/include/asm-arm/locks.h
+++ b/include/asm-arm/locks.h
@@ -28,7 +28,8 @@
28" blmi " #fail \ 28" blmi " #fail \
29 : \ 29 : \
30 : "r" (ptr), "I" (1) \ 30 : "r" (ptr), "I" (1) \
31 : "ip", "lr", "cc", "memory"); \ 31 : "ip", "lr", "cc"); \
32 smp_mb(); \
32 }) 33 })
33 34
34#define __down_op_ret(ptr,fail) \ 35#define __down_op_ret(ptr,fail) \
@@ -48,12 +49,14 @@
48" mov %0, ip" \ 49" mov %0, ip" \
49 : "=&r" (ret) \ 50 : "=&r" (ret) \
50 : "r" (ptr), "I" (1) \ 51 : "r" (ptr), "I" (1) \
51 : "ip", "lr", "cc", "memory"); \ 52 : "ip", "lr", "cc"); \
53 smp_mb(); \
52 ret; \ 54 ret; \
53 }) 55 })
54 56
55#define __up_op(ptr,wake) \ 57#define __up_op(ptr,wake) \
56 ({ \ 58 ({ \
59 smp_mb(); \
57 __asm__ __volatile__( \ 60 __asm__ __volatile__( \
58 "@ up_op\n" \ 61 "@ up_op\n" \
59"1: ldrex lr, [%0]\n" \ 62"1: ldrex lr, [%0]\n" \
@@ -66,7 +69,7 @@
66" blle " #wake \ 69" blle " #wake \
67 : \ 70 : \
68 : "r" (ptr), "I" (1) \ 71 : "r" (ptr), "I" (1) \
69 : "ip", "lr", "cc", "memory"); \ 72 : "ip", "lr", "cc"); \
70 }) 73 })
71 74
72/* 75/*
@@ -92,11 +95,13 @@
92" blne " #fail \ 95" blne " #fail \
93 : \ 96 : \
94 : "r" (ptr), "I" (RW_LOCK_BIAS) \ 97 : "r" (ptr), "I" (RW_LOCK_BIAS) \
95 : "ip", "lr", "cc", "memory"); \ 98 : "ip", "lr", "cc"); \
99 smp_mb(); \
96 }) 100 })
97 101
98#define __up_op_write(ptr,wake) \ 102#define __up_op_write(ptr,wake) \
99 ({ \ 103 ({ \
104 smp_mb(); \
100 __asm__ __volatile__( \ 105 __asm__ __volatile__( \
101 "@ up_op_read\n" \ 106 "@ up_op_read\n" \
102"1: ldrex lr, [%0]\n" \ 107"1: ldrex lr, [%0]\n" \
@@ -108,7 +113,7 @@
108" blcs " #wake \ 113" blcs " #wake \
109 : \ 114 : \
110 : "r" (ptr), "I" (RW_LOCK_BIAS) \ 115 : "r" (ptr), "I" (RW_LOCK_BIAS) \
111 : "ip", "lr", "cc", "memory"); \ 116 : "ip", "lr", "cc"); \
112 }) 117 })
113 118
114#define __down_op_read(ptr,fail) \ 119#define __down_op_read(ptr,fail) \
@@ -116,6 +121,7 @@
116 121
117#define __up_op_read(ptr,wake) \ 122#define __up_op_read(ptr,wake) \
118 ({ \ 123 ({ \
124 smp_mb(); \
119 __asm__ __volatile__( \ 125 __asm__ __volatile__( \
120 "@ up_op_read\n" \ 126 "@ up_op_read\n" \
121"1: ldrex lr, [%0]\n" \ 127"1: ldrex lr, [%0]\n" \
@@ -128,7 +134,7 @@
128" bleq " #wake \ 134" bleq " #wake \
129 : \ 135 : \
130 : "r" (ptr), "I" (1) \ 136 : "r" (ptr), "I" (1) \
131 : "ip", "lr", "cc", "memory"); \ 137 : "ip", "lr", "cc"); \
132 }) 138 })
133 139
134#else 140#else
@@ -148,7 +154,8 @@
148" blmi " #fail \ 154" blmi " #fail \
149 : \ 155 : \
150 : "r" (ptr), "I" (1) \ 156 : "r" (ptr), "I" (1) \
151 : "ip", "lr", "cc", "memory"); \ 157 : "ip", "lr", "cc"); \
158 smp_mb(); \
152 }) 159 })
153 160
154#define __down_op_ret(ptr,fail) \ 161#define __down_op_ret(ptr,fail) \
@@ -169,12 +176,14 @@
169" mov %0, ip" \ 176" mov %0, ip" \
170 : "=&r" (ret) \ 177 : "=&r" (ret) \
171 : "r" (ptr), "I" (1) \ 178 : "r" (ptr), "I" (1) \
172 : "ip", "lr", "cc", "memory"); \ 179 : "ip", "lr", "cc"); \
180 smp_mb(); \
173 ret; \ 181 ret; \
174 }) 182 })
175 183
176#define __up_op(ptr,wake) \ 184#define __up_op(ptr,wake) \
177 ({ \ 185 ({ \
186 smp_mb(); \
178 __asm__ __volatile__( \ 187 __asm__ __volatile__( \
179 "@ up_op\n" \ 188 "@ up_op\n" \
180" mrs ip, cpsr\n" \ 189" mrs ip, cpsr\n" \
@@ -188,7 +197,7 @@
188" blle " #wake \ 197" blle " #wake \
189 : \ 198 : \
190 : "r" (ptr), "I" (1) \ 199 : "r" (ptr), "I" (1) \
191 : "ip", "lr", "cc", "memory"); \ 200 : "ip", "lr", "cc"); \
192 }) 201 })
193 202
194/* 203/*
@@ -215,7 +224,8 @@
215" blne " #fail \ 224" blne " #fail \
216 : \ 225 : \
217 : "r" (ptr), "I" (RW_LOCK_BIAS) \ 226 : "r" (ptr), "I" (RW_LOCK_BIAS) \
218 : "ip", "lr", "cc", "memory"); \ 227 : "ip", "lr", "cc"); \
228 smp_mb(); \
219 }) 229 })
220 230
221#define __up_op_write(ptr,wake) \ 231#define __up_op_write(ptr,wake) \
@@ -233,7 +243,8 @@
233" blcs " #wake \ 243" blcs " #wake \
234 : \ 244 : \
235 : "r" (ptr), "I" (RW_LOCK_BIAS) \ 245 : "r" (ptr), "I" (RW_LOCK_BIAS) \
236 : "ip", "lr", "cc", "memory"); \ 246 : "ip", "lr", "cc"); \
247 smp_mb(); \
237 }) 248 })
238 249
239#define __down_op_read(ptr,fail) \ 250#define __down_op_read(ptr,fail) \
@@ -241,6 +252,7 @@
241 252
242#define __up_op_read(ptr,wake) \ 253#define __up_op_read(ptr,wake) \
243 ({ \ 254 ({ \
255 smp_mb(); \
244 __asm__ __volatile__( \ 256 __asm__ __volatile__( \
245 "@ up_op_read\n" \ 257 "@ up_op_read\n" \
246" mrs ip, cpsr\n" \ 258" mrs ip, cpsr\n" \
@@ -254,7 +266,7 @@
254" bleq " #wake \ 266" bleq " #wake \
255 : \ 267 : \
256 : "r" (ptr), "I" (1) \ 268 : "r" (ptr), "I" (1) \
257 : "ip", "lr", "cc", "memory"); \ 269 : "ip", "lr", "cc"); \
258 }) 270 })
259 271
260#endif 272#endif
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h
index 9705d5eec94c..1f906d09b688 100644
--- a/include/asm-arm/spinlock.h
+++ b/include/asm-arm/spinlock.h
@@ -8,9 +8,10 @@
8/* 8/*
9 * ARMv6 Spin-locking. 9 * ARMv6 Spin-locking.
10 * 10 *
11 * We (exclusively) read the old value, and decrement it. If it 11 * We exclusively read the old value. If it is zero, we may have
12 * hits zero, we may have won the lock, so we try (exclusively) 12 * won the lock, so we try exclusively storing it. A memory barrier
13 * storing it. 13 * is required after we get a lock, and before we release it, because
14 * V6 CPUs are assumed to have weakly ordered memory.
14 * 15 *
15 * Unlocked value: 0 16 * Unlocked value: 0
16 * Locked value: 1 17 * Locked value: 1
@@ -41,7 +42,9 @@ static inline void _raw_spin_lock(spinlock_t *lock)
41" bne 1b" 42" bne 1b"
42 : "=&r" (tmp) 43 : "=&r" (tmp)
43 : "r" (&lock->lock), "r" (1) 44 : "r" (&lock->lock), "r" (1)
44 : "cc", "memory"); 45 : "cc");
46
47 smp_mb();
45} 48}
46 49
47static inline int _raw_spin_trylock(spinlock_t *lock) 50static inline int _raw_spin_trylock(spinlock_t *lock)
@@ -54,18 +57,25 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
54" strexeq %0, %2, [%1]" 57" strexeq %0, %2, [%1]"
55 : "=&r" (tmp) 58 : "=&r" (tmp)
56 : "r" (&lock->lock), "r" (1) 59 : "r" (&lock->lock), "r" (1)
57 : "cc", "memory"); 60 : "cc");
58 61
59 return tmp == 0; 62 if (tmp == 0) {
63 smp_mb();
64 return 1;
65 } else {
66 return 0;
67 }
60} 68}
61 69
62static inline void _raw_spin_unlock(spinlock_t *lock) 70static inline void _raw_spin_unlock(spinlock_t *lock)
63{ 71{
72 smp_mb();
73
64 __asm__ __volatile__( 74 __asm__ __volatile__(
65" str %1, [%0]" 75" str %1, [%0]"
66 : 76 :
67 : "r" (&lock->lock), "r" (0) 77 : "r" (&lock->lock), "r" (0)
68 : "cc", "memory"); 78 : "cc");
69} 79}
70 80
71/* 81/*
@@ -98,7 +108,9 @@ static inline void _raw_write_lock(rwlock_t *rw)
98" bne 1b" 108" bne 1b"
99 : "=&r" (tmp) 109 : "=&r" (tmp)
100 : "r" (&rw->lock), "r" (0x80000000) 110 : "r" (&rw->lock), "r" (0x80000000)
101 : "cc", "memory"); 111 : "cc");
112
113 smp_mb();
102} 114}
103 115
104static inline int _raw_write_trylock(rwlock_t *rw) 116static inline int _raw_write_trylock(rwlock_t *rw)
@@ -111,18 +123,25 @@ static inline int _raw_write_trylock(rwlock_t *rw)
111" strexeq %0, %2, [%1]" 123" strexeq %0, %2, [%1]"
112 : "=&r" (tmp) 124 : "=&r" (tmp)
113 : "r" (&rw->lock), "r" (0x80000000) 125 : "r" (&rw->lock), "r" (0x80000000)
114 : "cc", "memory"); 126 : "cc");
115 127
116 return tmp == 0; 128 if (tmp == 0) {
129 smp_mb();
130 return 1;
131 } else {
132 return 0;
133 }
117} 134}
118 135
119static inline void _raw_write_unlock(rwlock_t *rw) 136static inline void _raw_write_unlock(rwlock_t *rw)
120{ 137{
138 smp_mb();
139
121 __asm__ __volatile__( 140 __asm__ __volatile__(
122 "str %1, [%0]" 141 "str %1, [%0]"
123 : 142 :
124 : "r" (&rw->lock), "r" (0) 143 : "r" (&rw->lock), "r" (0)
125 : "cc", "memory"); 144 : "cc");
126} 145}
127 146
128/* 147/*
@@ -149,13 +168,17 @@ static inline void _raw_read_lock(rwlock_t *rw)
149" bmi 1b" 168" bmi 1b"
150 : "=&r" (tmp), "=&r" (tmp2) 169 : "=&r" (tmp), "=&r" (tmp2)
151 : "r" (&rw->lock) 170 : "r" (&rw->lock)
152 : "cc", "memory"); 171 : "cc");
172
173 smp_mb();
153} 174}
154 175
155static inline void _raw_read_unlock(rwlock_t *rw) 176static inline void _raw_read_unlock(rwlock_t *rw)
156{ 177{
157 unsigned long tmp, tmp2; 178 unsigned long tmp, tmp2;
158 179
180 smp_mb();
181
159 __asm__ __volatile__( 182 __asm__ __volatile__(
160"1: ldrex %0, [%2]\n" 183"1: ldrex %0, [%2]\n"
161" sub %0, %0, #1\n" 184" sub %0, %0, #1\n"
@@ -164,7 +187,7 @@ static inline void _raw_read_unlock(rwlock_t *rw)
164" bne 1b" 187" bne 1b"
165 : "=&r" (tmp), "=&r" (tmp2) 188 : "=&r" (tmp), "=&r" (tmp2)
166 : "r" (&rw->lock) 189 : "r" (&rw->lock)
167 : "cc", "memory"); 190 : "cc");
168} 191}
169 192
170#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) 193#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 3f9c3626a73c..8efa4ebdcacb 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -139,7 +139,12 @@ extern unsigned int user_debug;
139#define vectors_high() (0) 139#define vectors_high() (0)
140#endif 140#endif
141 141
142#if __LINUX_ARM_ARCH__ >= 6
143#define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
144 : : "r" (0) : "memory")
145#else
142#define mb() __asm__ __volatile__ ("" : : : "memory") 146#define mb() __asm__ __volatile__ ("" : : : "memory")
147#endif
143#define rmb() mb() 148#define rmb() mb()
144#define wmb() mb() 149#define wmb() mb()
145#define read_barrier_depends() do { } while(0) 150#define read_barrier_depends() do { } while(0)