aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-03-23 06:01:02 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-23 10:38:17 -0500
commit0b2fcfdb8b4e7e379192f24ea2203163ddf5df1d (patch)
tree1f3995e41ab12ff76e737389e0b59a40c0c73668 /include
parent713729e8b993cb880225e2ced50a3f5ac05c2b3f (diff)
[PATCH] atomic: add_unless cmpxchg optimise
Without branch hints, the very unlikely chance of the loop repeating due to cmpxchg failure is unrolled with gcc-4 that I have tested. Improve this for architectures with a native cas/cmpxchg. llsc archs should try to implement this natively. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Andi Kleen <ak@muc.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/atomic.h8
-rw-r--r--include/asm-ia64/atomic.h8
-rw-r--r--include/asm-m68k/atomic.h8
-rw-r--r--include/asm-s390/atomic.h18
-rw-r--r--include/asm-sparc64/atomic.h10
-rw-r--r--include/asm-x86_64/atomic.h8
6 files changed, 50 insertions, 10 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 78b0032d1f29..22d80ece95cb 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -225,8 +225,14 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
225({ \ 225({ \
226 int c, old; \ 226 int c, old; \
227 c = atomic_read(v); \ 227 c = atomic_read(v); \
228 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 228 for (;;) { \
229 if (unlikely(c == (u))) \
230 break; \
231 old = atomic_cmpxchg((v), c, c + (a)); \
232 if (likely(old == c)) \
233 break; \
229 c = old; \ 234 c = old; \
235 } \
230 c != (u); \ 236 c != (u); \
231}) 237})
232#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 238#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index d3e0dfa99e1f..569ec7574baf 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -95,8 +95,14 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
95({ \ 95({ \
96 int c, old; \ 96 int c, old; \
97 c = atomic_read(v); \ 97 c = atomic_read(v); \
98 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 98 for (;;) { \
99 if (unlikely(c == (u))) \
100 break; \
101 old = atomic_cmpxchg((v), c, c + (a)); \
102 if (likely(old == c)) \
103 break; \
99 c = old; \ 104 c = old; \
105 } \
100 c != (u); \ 106 c != (u); \
101}) 107})
102#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 108#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index 862e497c2645..732d696d31a6 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -175,8 +175,14 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
175({ \ 175({ \
176 int c, old; \ 176 int c, old; \
177 c = atomic_read(v); \ 177 c = atomic_read(v); \
178 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 178 for (;;) { \
179 if (unlikely(c == (u))) \
180 break; \
181 old = atomic_cmpxchg((v), c, c + (a)); \
182 if (likely(old == c)) \
183 break; \
179 c = old; \ 184 c = old; \
185 } \
180 c != (u); \ 186 c != (u); \
181}) 187})
182#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 188#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index be6fefe223d6..de1d9926aa60 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -89,10 +89,15 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
89static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 89static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
90{ 90{
91 int c, old; 91 int c, old;
92
93 c = atomic_read(v); 92 c = atomic_read(v);
94 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) 93 for (;;) {
94 if (unlikely(c == u))
95 break;
96 old = atomic_cmpxchg(v, c, c + a);
97 if (likely(old == c))
98 break;
95 c = old; 99 c = old;
100 }
96 return c != u; 101 return c != u;
97} 102}
98 103
@@ -167,10 +172,15 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
167 long long a, long long u) 172 long long a, long long u)
168{ 173{
169 long long c, old; 174 long long c, old;
170
171 c = atomic64_read(v); 175 c = atomic64_read(v);
172 while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c) 176 for (;;) {
177 if (unlikely(c == u))
178 break;
179 old = atomic64_cmpxchg(v, c, c + a);
180 if (likely(old == c))
181 break;
173 c = old; 182 c = old;
183 }
174 return c != u; 184 return c != u;
175} 185}
176 186
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 25256bdc8aae..468eb48d8142 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -78,9 +78,15 @@ extern int atomic64_sub_ret(int, atomic64_t *);
78({ \ 78({ \
79 int c, old; \ 79 int c, old; \
80 c = atomic_read(v); \ 80 c = atomic_read(v); \
81 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 81 for (;;) { \
82 if (unlikely(c == (u))) \
83 break; \
84 old = atomic_cmpxchg((v), c, c + (a)); \
85 if (likely(old == c)) \
86 break; \
82 c = old; \ 87 c = old; \
83 c != (u); \ 88 } \
89 likely(c != (u)); \
84}) 90})
85#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 91#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
86 92
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 4b5cd553e772..cecbf7baa6aa 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -405,8 +405,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
405({ \ 405({ \
406 int c, old; \ 406 int c, old; \
407 c = atomic_read(v); \ 407 c = atomic_read(v); \
408 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 408 for (;;) { \
409 if (unlikely(c == (u))) \
410 break; \
411 old = atomic_cmpxchg((v), c, c + (a)); \
412 if (likely(old == c)) \
413 break; \
409 c = old; \ 414 c = old; \
415 } \
410 c != (u); \ 416 c != (u); \
411}) 417})
412#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 418#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)