diff options
author | Tony Luck <tony.luck@intel.com> | 2010-08-13 19:41:07 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2010-08-13 19:41:07 -0400 |
commit | 01d69a82e1d3c938da16bf55aab000672243aa24 (patch) | |
tree | 9d9763c1a71e29111cab48632aa8c177a54e63c9 /arch/ia64 | |
parent | ad41a1e0cab07c5125456e8d38e5b1ab148d04aa (diff) |
[IA64] Fix 64-bit atomic routines to return "long"
These have been broken (returning "int") since the dawn of
time. But there were no users that needed the whole value
until commit
424acaaeb3a3932d64a9b4bd59df6cf72c22d8f3
rwsem: wake queued readers when writer blocks on active read lock
made this change:
- (rwsem_atomic_update(0, sem) & RWSEM_ACTIVE_MASK))
- /* Someone grabbed the sem already */
+ rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS)
+ /* Someone grabbed the sem for write already */
RWSEM_ACTIVE_MASK is 0xffffffffL, so the old code only looked
at the low order 32-bits. The new code needs to see all 64 bits.
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/atomic.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 4e1948447a00..446881439675 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h | |||
@@ -41,7 +41,7 @@ ia64_atomic_add (int i, atomic_t *v) | |||
41 | return new; | 41 | return new; |
42 | } | 42 | } |
43 | 43 | ||
44 | static __inline__ int | 44 | static __inline__ long |
45 | ia64_atomic64_add (__s64 i, atomic64_t *v) | 45 | ia64_atomic64_add (__s64 i, atomic64_t *v) |
46 | { | 46 | { |
47 | __s64 old, new; | 47 | __s64 old, new; |
@@ -69,7 +69,7 @@ ia64_atomic_sub (int i, atomic_t *v) | |||
69 | return new; | 69 | return new; |
70 | } | 70 | } |
71 | 71 | ||
72 | static __inline__ int | 72 | static __inline__ long |
73 | ia64_atomic64_sub (__s64 i, atomic64_t *v) | 73 | ia64_atomic64_sub (__s64 i, atomic64_t *v) |
74 | { | 74 | { |
75 | __s64 old, new; | 75 | __s64 old, new; |
@@ -107,7 +107,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
107 | 107 | ||
108 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 108 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
109 | 109 | ||
110 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | 110 | static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u) |
111 | { | 111 | { |
112 | long c, old; | 112 | long c, old; |
113 | c = atomic64_read(v); | 113 | c = atomic64_read(v); |
@@ -158,7 +158,7 @@ atomic_add_negative (int i, atomic_t *v) | |||
158 | return atomic_add_return(i, v) < 0; | 158 | return atomic_add_return(i, v) < 0; |
159 | } | 159 | } |
160 | 160 | ||
161 | static __inline__ int | 161 | static __inline__ long |
162 | atomic64_add_negative (__s64 i, atomic64_t *v) | 162 | atomic64_add_negative (__s64 i, atomic64_t *v) |
163 | { | 163 | { |
164 | return atomic64_add_return(i, v) < 0; | 164 | return atomic64_add_return(i, v) < 0; |