aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/include/asm/cmpxchg.h6
-rw-r--r--arch/alpha/include/asm/xchg.h38
-rw-r--r--include/linux/mutex.h5
3 files changed, 31 insertions, 18 deletions
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 46ebf14aed4e..8a2b331e43fe 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -6,7 +6,6 @@
6 * Atomic exchange routines. 6 * Atomic exchange routines.
7 */ 7 */
8 8
9#define __ASM__MB
10#define ____xchg(type, args...) __xchg ## type ## _local(args) 9#define ____xchg(type, args...) __xchg ## type ## _local(args)
11#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) 10#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
12#include <asm/xchg.h> 11#include <asm/xchg.h>
@@ -33,10 +32,6 @@
33 cmpxchg_local((ptr), (o), (n)); \ 32 cmpxchg_local((ptr), (o), (n)); \
34}) 33})
35 34
36#ifdef CONFIG_SMP
37#undef __ASM__MB
38#define __ASM__MB "\tmb\n"
39#endif
40#undef ____xchg 35#undef ____xchg
41#undef ____cmpxchg 36#undef ____cmpxchg
42#define ____xchg(type, args...) __xchg ##type(args) 37#define ____xchg(type, args...) __xchg ##type(args)
@@ -64,7 +59,6 @@
64 cmpxchg((ptr), (o), (n)); \ 59 cmpxchg((ptr), (o), (n)); \
65}) 60})
66 61
67#undef __ASM__MB
68#undef ____cmpxchg 62#undef ____cmpxchg
69 63
70#endif /* _ALPHA_CMPXCHG_H */ 64#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index 68dfb3cb7145..e2b59fac5257 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -12,6 +12,10 @@
12 * Atomic exchange. 12 * Atomic exchange.
13 * Since it can be used to implement critical sections 13 * Since it can be used to implement critical sections
14 * it must clobber "memory" (also for interrupts in UP). 14 * it must clobber "memory" (also for interrupts in UP).
15 *
16 * The leading and the trailing memory barriers guarantee that these
17 * operations are fully ordered.
18 *
15 */ 19 */
16 20
17static inline unsigned long 21static inline unsigned long
@@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
19{ 23{
20 unsigned long ret, tmp, addr64; 24 unsigned long ret, tmp, addr64;
21 25
26 smp_mb();
22 __asm__ __volatile__( 27 __asm__ __volatile__(
23 " andnot %4,7,%3\n" 28 " andnot %4,7,%3\n"
24 " insbl %1,%4,%1\n" 29 " insbl %1,%4,%1\n"
@@ -28,12 +33,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
28 " or %1,%2,%2\n" 33 " or %1,%2,%2\n"
29 " stq_c %2,0(%3)\n" 34 " stq_c %2,0(%3)\n"
30 " beq %2,2f\n" 35 " beq %2,2f\n"
31 __ASM__MB
32 ".subsection 2\n" 36 ".subsection 2\n"
33 "2: br 1b\n" 37 "2: br 1b\n"
34 ".previous" 38 ".previous"
35 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 39 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
36 : "r" ((long)m), "1" (val) : "memory"); 40 : "r" ((long)m), "1" (val) : "memory");
41 smp_mb();
37 42
38 return ret; 43 return ret;
39} 44}
@@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
43{ 48{
44 unsigned long ret, tmp, addr64; 49 unsigned long ret, tmp, addr64;
45 50
51 smp_mb();
46 __asm__ __volatile__( 52 __asm__ __volatile__(
47 " andnot %4,7,%3\n" 53 " andnot %4,7,%3\n"
48 " inswl %1,%4,%1\n" 54 " inswl %1,%4,%1\n"
@@ -52,12 +58,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
52 " or %1,%2,%2\n" 58 " or %1,%2,%2\n"
53 " stq_c %2,0(%3)\n" 59 " stq_c %2,0(%3)\n"
54 " beq %2,2f\n" 60 " beq %2,2f\n"
55 __ASM__MB
56 ".subsection 2\n" 61 ".subsection 2\n"
57 "2: br 1b\n" 62 "2: br 1b\n"
58 ".previous" 63 ".previous"
59 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) 64 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
60 : "r" ((long)m), "1" (val) : "memory"); 65 : "r" ((long)m), "1" (val) : "memory");
66 smp_mb();
61 67
62 return ret; 68 return ret;
63} 69}
@@ -67,17 +73,18 @@ ____xchg(_u32, volatile int *m, unsigned long val)
67{ 73{
68 unsigned long dummy; 74 unsigned long dummy;
69 75
76 smp_mb();
70 __asm__ __volatile__( 77 __asm__ __volatile__(
71 "1: ldl_l %0,%4\n" 78 "1: ldl_l %0,%4\n"
72 " bis $31,%3,%1\n" 79 " bis $31,%3,%1\n"
73 " stl_c %1,%2\n" 80 " stl_c %1,%2\n"
74 " beq %1,2f\n" 81 " beq %1,2f\n"
75 __ASM__MB
76 ".subsection 2\n" 82 ".subsection 2\n"
77 "2: br 1b\n" 83 "2: br 1b\n"
78 ".previous" 84 ".previous"
79 : "=&r" (val), "=&r" (dummy), "=m" (*m) 85 : "=&r" (val), "=&r" (dummy), "=m" (*m)
80 : "rI" (val), "m" (*m) : "memory"); 86 : "rI" (val), "m" (*m) : "memory");
87 smp_mb();
81 88
82 return val; 89 return val;
83} 90}
@@ -87,17 +94,18 @@ ____xchg(_u64, volatile long *m, unsigned long val)
87{ 94{
88 unsigned long dummy; 95 unsigned long dummy;
89 96
97 smp_mb();
90 __asm__ __volatile__( 98 __asm__ __volatile__(
91 "1: ldq_l %0,%4\n" 99 "1: ldq_l %0,%4\n"
92 " bis $31,%3,%1\n" 100 " bis $31,%3,%1\n"
93 " stq_c %1,%2\n" 101 " stq_c %1,%2\n"
94 " beq %1,2f\n" 102 " beq %1,2f\n"
95 __ASM__MB
96 ".subsection 2\n" 103 ".subsection 2\n"
97 "2: br 1b\n" 104 "2: br 1b\n"
98 ".previous" 105 ".previous"
99 : "=&r" (val), "=&r" (dummy), "=m" (*m) 106 : "=&r" (val), "=&r" (dummy), "=m" (*m)
100 : "rI" (val), "m" (*m) : "memory"); 107 : "rI" (val), "m" (*m) : "memory");
108 smp_mb();
101 109
102 return val; 110 return val;
103} 111}
@@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
128 * store NEW in MEM. Return the initial value in MEM. Success is 136 * store NEW in MEM. Return the initial value in MEM. Success is
129 * indicated by comparing RETURN with OLD. 137 * indicated by comparing RETURN with OLD.
130 * 138 *
131 * The memory barrier should be placed in SMP only when we actually 139 * The leading and the trailing memory barriers guarantee that these
132 * make the change. If we don't change anything (so if the returned 140 * operations are fully ordered.
133 * prev is equal to old) then we aren't acquiring anything new and 141 *
134 * we don't need any memory barrier as far I can tell. 142 * The trailing memory barrier is placed in SMP unconditionally, in
143 * order to guarantee that dependency ordering is preserved when a
144 * dependency is headed by an unsuccessful operation.
135 */ 145 */
136 146
137static inline unsigned long 147static inline unsigned long
@@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
139{ 149{
140 unsigned long prev, tmp, cmp, addr64; 150 unsigned long prev, tmp, cmp, addr64;
141 151
152 smp_mb();
142 __asm__ __volatile__( 153 __asm__ __volatile__(
143 " andnot %5,7,%4\n" 154 " andnot %5,7,%4\n"
144 " insbl %1,%5,%1\n" 155 " insbl %1,%5,%1\n"
@@ -150,13 +161,13 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
150 " or %1,%2,%2\n" 161 " or %1,%2,%2\n"
151 " stq_c %2,0(%4)\n" 162 " stq_c %2,0(%4)\n"
152 " beq %2,3f\n" 163 " beq %2,3f\n"
153 __ASM__MB
154 "2:\n" 164 "2:\n"
155 ".subsection 2\n" 165 ".subsection 2\n"
156 "3: br 1b\n" 166 "3: br 1b\n"
157 ".previous" 167 ".previous"
158 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 168 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
159 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 169 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
170 smp_mb();
160 171
161 return prev; 172 return prev;
162} 173}
@@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
166{ 177{
167 unsigned long prev, tmp, cmp, addr64; 178 unsigned long prev, tmp, cmp, addr64;
168 179
180 smp_mb();
169 __asm__ __volatile__( 181 __asm__ __volatile__(
170 " andnot %5,7,%4\n" 182 " andnot %5,7,%4\n"
171 " inswl %1,%5,%1\n" 183 " inswl %1,%5,%1\n"
@@ -177,13 +189,13 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
177 " or %1,%2,%2\n" 189 " or %1,%2,%2\n"
178 " stq_c %2,0(%4)\n" 190 " stq_c %2,0(%4)\n"
179 " beq %2,3f\n" 191 " beq %2,3f\n"
180 __ASM__MB
181 "2:\n" 192 "2:\n"
182 ".subsection 2\n" 193 ".subsection 2\n"
183 "3: br 1b\n" 194 "3: br 1b\n"
184 ".previous" 195 ".previous"
185 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) 196 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
186 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); 197 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
198 smp_mb();
187 199
188 return prev; 200 return prev;
189} 201}
@@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
193{ 205{
194 unsigned long prev, cmp; 206 unsigned long prev, cmp;
195 207
208 smp_mb();
196 __asm__ __volatile__( 209 __asm__ __volatile__(
197 "1: ldl_l %0,%5\n" 210 "1: ldl_l %0,%5\n"
198 " cmpeq %0,%3,%1\n" 211 " cmpeq %0,%3,%1\n"
@@ -200,13 +213,13 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
200 " mov %4,%1\n" 213 " mov %4,%1\n"
201 " stl_c %1,%2\n" 214 " stl_c %1,%2\n"
202 " beq %1,3f\n" 215 " beq %1,3f\n"
203 __ASM__MB
204 "2:\n" 216 "2:\n"
205 ".subsection 2\n" 217 ".subsection 2\n"
206 "3: br 1b\n" 218 "3: br 1b\n"
207 ".previous" 219 ".previous"
208 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 220 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
209 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 221 : "r"((long) old), "r"(new), "m"(*m) : "memory");
222 smp_mb();
210 223
211 return prev; 224 return prev;
212} 225}
@@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
216{ 229{
217 unsigned long prev, cmp; 230 unsigned long prev, cmp;
218 231
232 smp_mb();
219 __asm__ __volatile__( 233 __asm__ __volatile__(
220 "1: ldq_l %0,%5\n" 234 "1: ldq_l %0,%5\n"
221 " cmpeq %0,%3,%1\n" 235 " cmpeq %0,%3,%1\n"
@@ -223,13 +237,13 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
223 " mov %4,%1\n" 237 " mov %4,%1\n"
224 " stq_c %1,%2\n" 238 " stq_c %1,%2\n"
225 " beq %1,3f\n" 239 " beq %1,3f\n"
226 __ASM__MB
227 "2:\n" 240 "2:\n"
228 ".subsection 2\n" 241 ".subsection 2\n"
229 "3: br 1b\n" 242 "3: br 1b\n"
230 ".previous" 243 ".previous"
231 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 244 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
232 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 245 : "r"((long) old), "r"(new), "m"(*m) : "memory");
246 smp_mb();
233 247
234 return prev; 248 return prev;
235} 249}
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index f25c13423bd4..cb3bbed4e633 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -66,6 +66,11 @@ struct mutex {
66#endif 66#endif
67}; 67};
68 68
69/*
70 * Internal helper function; C doesn't allow us to hide it :/
71 *
72 * DO NOT USE (outside of mutex code).
73 */
69static inline struct task_struct *__mutex_owner(struct mutex *lock) 74static inline struct task_struct *__mutex_owner(struct mutex *lock)
70{ 75{
71 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07); 76 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);