aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/atomic.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2010-02-26 16:37:31 -0500
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2010-02-26 16:37:30 -0500
commit987bcdacb18a3adc2a48d85c9b005069c2f4dd7b (patch)
tree65da88589e2c95a4677090b570f49402e870e573 /arch/s390/include/asm/atomic.h
parentd1bf85902c28dd990c08f1703ea94109223549a7 (diff)
[S390] use inline assembly contraints available with gcc 3.3.3
Drop support to compile the kernel with gcc versions older than 3.3.3. This allows us to use the "Q" inline assembly contraint on some more inline assemblies without duplicating a lot of complex code (e.g. __xchg and __cmpxchg). The distinction for older gcc versions can be removed which saves a few lines and simplifies the code. Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/atomic.h')
-rw-r--r--arch/s390/include/asm/atomic.h86
1 files changed, 11 insertions, 75 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 2a113d6a7dfd..451bfbb9db3d 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -18,8 +18,6 @@
18 18
19#define ATOMIC_INIT(i) { (i) } 19#define ATOMIC_INIT(i) { (i) }
20 20
21#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
22
23#define __CS_LOOP(ptr, op_val, op_string) ({ \ 21#define __CS_LOOP(ptr, op_val, op_string) ({ \
24 int old_val, new_val; \ 22 int old_val, new_val; \
25 asm volatile( \ 23 asm volatile( \
@@ -35,26 +33,6 @@
35 new_val; \ 33 new_val; \
36}) 34})
37 35
38#else /* __GNUC__ */
39
40#define __CS_LOOP(ptr, op_val, op_string) ({ \
41 int old_val, new_val; \
42 asm volatile( \
43 " l %0,0(%3)\n" \
44 "0: lr %1,%0\n" \
45 op_string " %1,%4\n" \
46 " cs %0,%1,0(%3)\n" \
47 " jl 0b" \
48 : "=&d" (old_val), "=&d" (new_val), \
49 "=m" (((atomic_t *)(ptr))->counter) \
50 : "a" (ptr), "d" (op_val), \
51 "m" (((atomic_t *)(ptr))->counter) \
52 : "cc", "memory"); \
53 new_val; \
54})
55
56#endif /* __GNUC__ */
57
58static inline int atomic_read(const atomic_t *v) 36static inline int atomic_read(const atomic_t *v)
59{ 37{
60 barrier(); 38 barrier();
@@ -101,19 +79,11 @@ static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
101 79
102static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 80static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
103{ 81{
104#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
105 asm volatile( 82 asm volatile(
106 " cs %0,%2,%1" 83 " cs %0,%2,%1"
107 : "+d" (old), "=Q" (v->counter) 84 : "+d" (old), "=Q" (v->counter)
108 : "d" (new), "Q" (v->counter) 85 : "d" (new), "Q" (v->counter)
109 : "cc", "memory"); 86 : "cc", "memory");
110#else /* __GNUC__ */
111 asm volatile(
112 " cs %0,%3,0(%2)"
113 : "+d" (old), "=m" (v->counter)
114 : "a" (v), "d" (new), "m" (v->counter)
115 : "cc", "memory");
116#endif /* __GNUC__ */
117 return old; 87 return old;
118} 88}
119 89
@@ -140,8 +110,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
140 110
141#ifdef CONFIG_64BIT 111#ifdef CONFIG_64BIT
142 112
143#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
144
145#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 113#define __CSG_LOOP(ptr, op_val, op_string) ({ \
146 long long old_val, new_val; \ 114 long long old_val, new_val; \
147 asm volatile( \ 115 asm volatile( \
@@ -157,26 +125,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
157 new_val; \ 125 new_val; \
158}) 126})
159 127
160#else /* __GNUC__ */
161
162#define __CSG_LOOP(ptr, op_val, op_string) ({ \
163 long long old_val, new_val; \
164 asm volatile( \
165 " lg %0,0(%3)\n" \
166 "0: lgr %1,%0\n" \
167 op_string " %1,%4\n" \
168 " csg %0,%1,0(%3)\n" \
169 " jl 0b" \
170 : "=&d" (old_val), "=&d" (new_val), \
171 "=m" (((atomic_t *)(ptr))->counter) \
172 : "a" (ptr), "d" (op_val), \
173 "m" (((atomic_t *)(ptr))->counter) \
174 : "cc", "memory"); \
175 new_val; \
176})
177
178#endif /* __GNUC__ */
179
180static inline long long atomic64_read(const atomic64_t *v) 128static inline long long atomic64_read(const atomic64_t *v)
181{ 129{
182 barrier(); 130 barrier();
@@ -214,19 +162,11 @@ static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
214static inline long long atomic64_cmpxchg(atomic64_t *v, 162static inline long long atomic64_cmpxchg(atomic64_t *v,
215 long long old, long long new) 163 long long old, long long new)
216{ 164{
217#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
218 asm volatile( 165 asm volatile(
219 " csg %0,%2,%1" 166 " csg %0,%2,%1"
220 : "+d" (old), "=Q" (v->counter) 167 : "+d" (old), "=Q" (v->counter)
221 : "d" (new), "Q" (v->counter) 168 : "d" (new), "Q" (v->counter)
222 : "cc", "memory"); 169 : "cc", "memory");
223#else /* __GNUC__ */
224 asm volatile(
225 " csg %0,%3,0(%2)"
226 : "+d" (old), "=m" (v->counter)
227 : "a" (v), "d" (new), "m" (v->counter)
228 : "cc", "memory");
229#endif /* __GNUC__ */
230 return old; 170 return old;
231} 171}
232 172
@@ -243,10 +183,8 @@ static inline long long atomic64_read(const atomic64_t *v)
243 register_pair rp; 183 register_pair rp;
244 184
245 asm volatile( 185 asm volatile(
246 " lm %0,%N0,0(%1)" 186 " lm %0,%N0,%1"
247 : "=&d" (rp) 187 : "=&d" (rp) : "Q" (v->counter) );
248 : "a" (&v->counter), "m" (v->counter)
249 );
250 return rp.pair; 188 return rp.pair;
251} 189}
252 190
@@ -255,10 +193,8 @@ static inline void atomic64_set(atomic64_t *v, long long i)
255 register_pair rp = {.pair = i}; 193 register_pair rp = {.pair = i};
256 194
257 asm volatile( 195 asm volatile(
258 " stm %1,%N1,0(%2)" 196 " stm %1,%N1,%0"
259 : "=m" (v->counter) 197 : "=Q" (v->counter) : "d" (rp) );
260 : "d" (rp), "a" (&v->counter)
261 );
262} 198}
263 199
264static inline long long atomic64_xchg(atomic64_t *v, long long new) 200static inline long long atomic64_xchg(atomic64_t *v, long long new)
@@ -267,11 +203,11 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
267 register_pair rp_old; 203 register_pair rp_old;
268 204
269 asm volatile( 205 asm volatile(
270 " lm %0,%N0,0(%2)\n" 206 " lm %0,%N0,%1\n"
271 "0: cds %0,%3,0(%2)\n" 207 "0: cds %0,%2,%1\n"
272 " jl 0b\n" 208 " jl 0b\n"
273 : "=&d" (rp_old), "+m" (v->counter) 209 : "=&d" (rp_old), "=Q" (v->counter)
274 : "a" (&v->counter), "d" (rp_new) 210 : "d" (rp_new), "Q" (v->counter)
275 : "cc"); 211 : "cc");
276 return rp_old.pair; 212 return rp_old.pair;
277} 213}
@@ -283,9 +219,9 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
283 register_pair rp_new = {.pair = new}; 219 register_pair rp_new = {.pair = new};
284 220
285 asm volatile( 221 asm volatile(
286 " cds %0,%3,0(%2)" 222 " cds %0,%2,%1"
287 : "+&d" (rp_old), "+m" (v->counter) 223 : "+&d" (rp_old), "=Q" (v->counter)
288 : "a" (&v->counter), "d" (rp_new) 224 : "d" (rp_new), "Q" (v->counter)
289 : "cc"); 225 : "cc");
290 return rp_old.pair; 226 return rp_old.pair;
291} 227}