aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2010-08-13 19:41:07 -0400
committerTony Luck <tony.luck@intel.com>2010-08-13 19:41:07 -0400
commit01d69a82e1d3c938da16bf55aab000672243aa24 (patch)
tree9d9763c1a71e29111cab48632aa8c177a54e63c9 /arch/ia64
parentad41a1e0cab07c5125456e8d38e5b1ab148d04aa (diff)
[IA64] Fix 64-bit atomic routines to return "long"
These have been broken (returning "int") since the dawn of time. But there were no users that needed the whole value until commit 424acaaeb3a3932d64a9b4bd59df6cf72c22d8f3 rwsem: wake queued readers when writer blocks on active read lock made this change: - (rwsem_atomic_update(0, sem) & RWSEM_ACTIVE_MASK)) - /* Someone grabbed the sem already */ + rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS) + /* Someone grabbed the sem for write already */ RWSEM_ACTIVE_MASK is 0xffffffffL, so the old code only looked at the low order 32-bits. The new code needs to see all 64 bits. Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/atomic.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 4e1948447a00..446881439675 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -41,7 +41,7 @@ ia64_atomic_add (int i, atomic_t *v)
41 return new; 41 return new;
42} 42}
43 43
44static __inline__ int 44static __inline__ long
45ia64_atomic64_add (__s64 i, atomic64_t *v) 45ia64_atomic64_add (__s64 i, atomic64_t *v)
46{ 46{
47 __s64 old, new; 47 __s64 old, new;
@@ -69,7 +69,7 @@ ia64_atomic_sub (int i, atomic_t *v)
69 return new; 69 return new;
70} 70}
71 71
72static __inline__ int 72static __inline__ long
73ia64_atomic64_sub (__s64 i, atomic64_t *v) 73ia64_atomic64_sub (__s64 i, atomic64_t *v)
74{ 74{
75 __s64 old, new; 75 __s64 old, new;
@@ -107,7 +107,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
107 107
108#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 108#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
109 109
110static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 110static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
111{ 111{
112 long c, old; 112 long c, old;
113 c = atomic64_read(v); 113 c = atomic64_read(v);
@@ -158,7 +158,7 @@ atomic_add_negative (int i, atomic_t *v)
158 return atomic_add_return(i, v) < 0; 158 return atomic_add_return(i, v) < 0;
159} 159}
160 160
161static __inline__ int 161static __inline__ long
162atomic64_add_negative (__s64 i, atomic64_t *v) 162atomic64_add_negative (__s64 i, atomic64_t *v)
163{ 163{
164 return atomic64_add_return(i, v) < 0; 164 return atomic64_add_return(i, v) < 0;