aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2009-09-11 04:28:34 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2009-09-11 04:29:43 -0400
commit12751058515860ed43c8f874ebcb2097b323736a (patch)
tree491ec6fe0b0f16da7f05f4edc924ba9a60986588 /arch
parent6ac2a4ddd10d6916785b4c566d521025c855f823 (diff)
[S390] atomic ops: add effecient atomic64 support for 31 bit
Use compare double and swap to implement efficient atomic64 ops for 31 bit. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/atomic.h164
2 files changed, 127 insertions, 38 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2ae5d72f47ed..47836b945d03 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -95,7 +95,6 @@ config S390
95 select HAVE_ARCH_TRACEHOOK 95 select HAVE_ARCH_TRACEHOOK
96 select INIT_ALL_POSSIBLE 96 select INIT_ALL_POSSIBLE
97 select HAVE_PERF_COUNTERS 97 select HAVE_PERF_COUNTERS
98 select GENERIC_ATOMIC64 if !64BIT
99 98
100config SCHED_OMIT_FRAME_POINTER 99config SCHED_OMIT_FRAME_POINTER
101 bool 100 bool
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index c7d0abfb0f00..b491d5e963cf 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -1,28 +1,20 @@
1#ifndef __ARCH_S390_ATOMIC__ 1#ifndef __ARCH_S390_ATOMIC__
2#define __ARCH_S390_ATOMIC__ 2#define __ARCH_S390_ATOMIC__
3 3
4#include <linux/compiler.h>
5#include <linux/types.h>
6
7/* 4/*
8 * include/asm-s390/atomic.h 5 * Copyright 1999,2009 IBM Corp.
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Denis Joseph Barrow,
8 * Arnd Bergmann <arndb@de.ibm.com>,
9 * 9 *
10 * S390 version 10 * Atomic operations that C can't guarantee us.
11 * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 11 * Useful for resource counting etc.
12 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 12 * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
13 * Denis Joseph Barrow,
14 * Arnd Bergmann (arndb@de.ibm.com)
15 *
16 * Derived from "include/asm-i386/bitops.h"
17 * Copyright (C) 1992, Linus Torvalds
18 * 13 *
19 */ 14 */
20 15
21/* 16#include <linux/compiler.h>
22 * Atomic operations that C can't guarantee us. Useful for 17#include <linux/types.h>
23 * resource counting etc..
24 * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
25 */
26 18
27#define ATOMIC_INIT(i) { (i) } 19#define ATOMIC_INIT(i) { (i) }
28 20
@@ -146,9 +138,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
146 138
147#undef __CS_LOOP 139#undef __CS_LOOP
148 140
149#ifdef __s390x__
150#define ATOMIC64_INIT(i) { (i) } 141#define ATOMIC64_INIT(i) { (i) }
151 142
143#ifdef CONFIG_64BIT
144
152#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 145#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
153 146
154#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 147#define __CSG_LOOP(ptr, op_val, op_string) ({ \
@@ -202,21 +195,11 @@ static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
202{ 195{
203 return __CSG_LOOP(v, i, "agr"); 196 return __CSG_LOOP(v, i, "agr");
204} 197}
205#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
206#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
207#define atomic64_inc(_v) atomic64_add_return(1, _v)
208#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
209#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
210 198
211static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) 199static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
212{ 200{
213 return __CSG_LOOP(v, i, "sgr"); 201 return __CSG_LOOP(v, i, "sgr");
214} 202}
215#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
216#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
217#define atomic64_dec(_v) atomic64_sub_return(1, _v)
218#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
219#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
220 203
221static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) 204static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
222{ 205{
@@ -249,6 +232,111 @@ static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
249 return old; 232 return old;
250} 233}
251 234
235#undef __CSG_LOOP
236
237#else /* CONFIG_64BIT */
238
239typedef struct {
240 long long counter;
241} atomic64_t;
242
243static inline long long atomic64_read(const atomic64_t *v)
244{
245 register_pair rp;
246
247 asm volatile(
248 " lm %0,%N0,0(%1)"
249 : "=&d" (rp)
250 : "a" (&v->counter), "m" (v->counter)
251 );
252 return rp.pair;
253}
254
255static inline void atomic64_set(atomic64_t *v, long long i)
256{
257 register_pair rp = {.pair = i};
258
259 asm volatile(
260 " stm %1,%N1,0(%2)"
261 : "=m" (v->counter)
262 : "d" (rp), "a" (&v->counter)
263 );
264}
265
266static inline long long atomic64_xchg(atomic64_t *v, long long new)
267{
268 register_pair rp_new = {.pair = new};
269 register_pair rp_old;
270
271 asm volatile(
272 " lm %0,%N0,0(%2)\n"
273 "0: cds %0,%3,0(%2)\n"
274 " jl 0b\n"
275 : "=&d" (rp_old), "+m" (v->counter)
276 : "a" (&v->counter), "d" (rp_new)
277 : "cc");
278 return rp_old.pair;
279}
280
281static inline long long atomic64_cmpxchg(atomic64_t *v,
282 long long old, long long new)
283{
284 register_pair rp_old = {.pair = old};
285 register_pair rp_new = {.pair = new};
286
287 asm volatile(
288 " cds %0,%3,0(%2)"
289 : "+&d" (rp_old), "+m" (v->counter)
290 : "a" (&v->counter), "d" (rp_new)
291 : "cc");
292 return rp_old.pair;
293}
294
295
296static inline long long atomic64_add_return(long long i, atomic64_t *v)
297{
298 long long old, new;
299
300 do {
301 old = atomic64_read(v);
302 new = old + i;
303 } while (atomic64_cmpxchg(v, old, new) != old);
304 return new;
305}
306
307static inline long long atomic64_sub_return(long long i, atomic64_t *v)
308{
309 long long old, new;
310
311 do {
312 old = atomic64_read(v);
313 new = old - i;
314 } while (atomic64_cmpxchg(v, old, new) != old);
315 return new;
316}
317
318static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
319{
320 long long old, new;
321
322 do {
323 old = atomic64_read(v);
324 new = old | mask;
325 } while (atomic64_cmpxchg(v, old, new) != old);
326}
327
328static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
329{
330 long long old, new;
331
332 do {
333 old = atomic64_read(v);
334 new = old & mask;
335 } while (atomic64_cmpxchg(v, old, new) != old);
336}
337
338#endif /* CONFIG_64BIT */
339
252static __inline__ int atomic64_add_unless(atomic64_t *v, 340static __inline__ int atomic64_add_unless(atomic64_t *v,
253 long long a, long long u) 341 long long a, long long u)
254{ 342{
@@ -265,15 +353,17 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
265 return c != u; 353 return c != u;
266} 354}
267 355
268#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 356#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
269 357#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
270#undef __CSG_LOOP 358#define atomic64_inc(_v) atomic64_add_return(1, _v)
271 359#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
272#else /* __s390x__ */ 360#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
273 361#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
274#include <asm-generic/atomic64.h> 362#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
275 363#define atomic64_dec(_v) atomic64_sub_return(1, _v)
276#endif /* __s390x__ */ 364#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
365#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
366#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
277 367
278#define smp_mb__before_atomic_dec() smp_mb() 368#define smp_mb__before_atomic_dec() smp_mb()
279#define smp_mb__after_atomic_dec() smp_mb() 369#define smp_mb__after_atomic_dec() smp_mb()