aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-03-23 13:20:30 -0400
committerIngo Molnar <mingo@kernel.org>2014-08-14 06:48:07 -0400
commit08be2dab191431f23f5f98ba2db76513d0d853e7 (patch)
tree27c7b031d1e326bb94a7dd92c477be0bfc7990b8 /arch/ia64
parent50f853e38b0b90a5703ab14b70e20eb5a8ccd5de (diff)
locking,arch,ia64: Fold atomic_ops
Many of the atomic op implementations are the same except for one instruction; fold the lot into a few CPP macros and reduce LoC. This also prepares for easy addition of new ops. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-ia64@vger.kernel.org Link: http://lkml.kernel.org/r/20140508135852.245224472@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/atomic.h188
1 files changed, 86 insertions, 102 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 0f8bf48dadf3..42919a831c6c 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -27,62 +27,94 @@
27#define atomic_set(v,i) (((v)->counter) = (i)) 27#define atomic_set(v,i) (((v)->counter) = (i))
28#define atomic64_set(v,i) (((v)->counter) = (i)) 28#define atomic64_set(v,i) (((v)->counter) = (i))
29 29
30static __inline__ int 30#define ATOMIC_OP(op, c_op) \
31ia64_atomic_add (int i, atomic_t *v) 31static __inline__ int \
32{ 32ia64_atomic_##op (int i, atomic_t *v) \
33 __s32 old, new; 33{ \
34 CMPXCHG_BUGCHECK_DECL 34 __s32 old, new; \
35 35 CMPXCHG_BUGCHECK_DECL \
36 do { 36 \
37 CMPXCHG_BUGCHECK(v); 37 do { \
38 old = atomic_read(v); 38 CMPXCHG_BUGCHECK(v); \
39 new = old + i; 39 old = atomic_read(v); \
40 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); 40 new = old c_op i; \
41 return new; 41 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
42 return new; \
42} 43}
43 44
44static __inline__ long 45ATOMIC_OP(add, +)
45ia64_atomic64_add (__s64 i, atomic64_t *v) 46ATOMIC_OP(sub, -)
46{
47 __s64 old, new;
48 CMPXCHG_BUGCHECK_DECL
49
50 do {
51 CMPXCHG_BUGCHECK(v);
52 old = atomic64_read(v);
53 new = old + i;
54 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
55 return new;
56}
57 47
58static __inline__ int 48#undef ATOMIC_OP
59ia64_atomic_sub (int i, atomic_t *v)
60{
61 __s32 old, new;
62 CMPXCHG_BUGCHECK_DECL
63
64 do {
65 CMPXCHG_BUGCHECK(v);
66 old = atomic_read(v);
67 new = old - i;
68 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
69 return new;
70}
71 49
72static __inline__ long 50#define atomic_add_return(i,v) \
73ia64_atomic64_sub (__s64 i, atomic64_t *v) 51({ \
74{ 52 int __ia64_aar_i = (i); \
75 __s64 old, new; 53 (__builtin_constant_p(i) \
76 CMPXCHG_BUGCHECK_DECL 54 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
77 55 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
78 do { 56 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
79 CMPXCHG_BUGCHECK(v); 57 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
80 old = atomic64_read(v); 58 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
81 new = old - i; 59 : ia64_atomic_add(__ia64_aar_i, v); \
82 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); 60})
83 return new; 61
62#define atomic_sub_return(i,v) \
63({ \
64 int __ia64_asr_i = (i); \
65 (__builtin_constant_p(i) \
66 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
67 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
68 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
69 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
70 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
71 : ia64_atomic_sub(__ia64_asr_i, v); \
72})
73
74#define ATOMIC64_OP(op, c_op) \
75static __inline__ long \
76ia64_atomic64_##op (__s64 i, atomic64_t *v) \
77{ \
78 __s64 old, new; \
79 CMPXCHG_BUGCHECK_DECL \
80 \
81 do { \
82 CMPXCHG_BUGCHECK(v); \
83 old = atomic64_read(v); \
84 new = old c_op i; \
85 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
86 return new; \
84} 87}
85 88
89ATOMIC64_OP(add, +)
90ATOMIC64_OP(sub, -)
91
92#undef ATOMIC64_OP
93
94#define atomic64_add_return(i,v) \
95({ \
96 long __ia64_aar_i = (i); \
97 (__builtin_constant_p(i) \
98 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
99 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
100 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
101 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
102 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
103 : ia64_atomic64_add(__ia64_aar_i, v); \
104})
105
106#define atomic64_sub_return(i,v) \
107({ \
108 long __ia64_asr_i = (i); \
109 (__builtin_constant_p(i) \
110 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
111 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
112 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
113 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
114 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
115 : ia64_atomic64_sub(__ia64_asr_i, v); \
116})
117
86#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 118#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
87#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 119#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
88 120
@@ -123,30 +155,6 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
123 155
124#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 156#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
125 157
126#define atomic_add_return(i,v) \
127({ \
128 int __ia64_aar_i = (i); \
129 (__builtin_constant_p(i) \
130 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
131 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
132 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
133 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
134 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
135 : ia64_atomic_add(__ia64_aar_i, v); \
136})
137
138#define atomic64_add_return(i,v) \
139({ \
140 long __ia64_aar_i = (i); \
141 (__builtin_constant_p(i) \
142 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
143 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
144 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
145 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
146 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
147 : ia64_atomic64_add(__ia64_aar_i, v); \
148})
149
150/* 158/*
151 * Atomically add I to V and return TRUE if the resulting value is 159 * Atomically add I to V and return TRUE if the resulting value is
152 * negative. 160 * negative.
@@ -163,30 +171,6 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
163 return atomic64_add_return(i, v) < 0; 171 return atomic64_add_return(i, v) < 0;
164} 172}
165 173
166#define atomic_sub_return(i,v) \
167({ \
168 int __ia64_asr_i = (i); \
169 (__builtin_constant_p(i) \
170 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
171 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
172 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
173 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
174 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
175 : ia64_atomic_sub(__ia64_asr_i, v); \
176})
177
178#define atomic64_sub_return(i,v) \
179({ \
180 long __ia64_asr_i = (i); \
181 (__builtin_constant_p(i) \
182 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
183 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
184 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
185 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
186 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
187 : ia64_atomic64_sub(__ia64_asr_i, v); \
188})
189
190#define atomic_dec_return(v) atomic_sub_return(1, (v)) 174#define atomic_dec_return(v) atomic_sub_return(1, (v))
191#define atomic_inc_return(v) atomic_add_return(1, (v)) 175#define atomic_inc_return(v) atomic_add_return(1, (v))
192#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) 176#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
@@ -199,13 +183,13 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
199#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) 183#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
200#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) 184#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
201 185
202#define atomic_add(i,v) atomic_add_return((i), (v)) 186#define atomic_add(i,v) (void)atomic_add_return((i), (v))
203#define atomic_sub(i,v) atomic_sub_return((i), (v)) 187#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
204#define atomic_inc(v) atomic_add(1, (v)) 188#define atomic_inc(v) atomic_add(1, (v))
205#define atomic_dec(v) atomic_sub(1, (v)) 189#define atomic_dec(v) atomic_sub(1, (v))
206 190
207#define atomic64_add(i,v) atomic64_add_return((i), (v)) 191#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
208#define atomic64_sub(i,v) atomic64_sub_return((i), (v)) 192#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
209#define atomic64_inc(v) atomic64_add(1, (v)) 193#define atomic64_inc(v) atomic64_add(1, (v))
210#define atomic64_dec(v) atomic64_sub(1, (v)) 194#define atomic64_dec(v) atomic64_sub(1, (v))
211 195