aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-s390
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-s390')
-rw-r--r--include/asm-s390/atomic.h173
1 files changed, 70 insertions, 103 deletions
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h
index 6d07c7df4b40..d82aedf616fe 100644
--- a/include/asm-s390/atomic.h
+++ b/include/asm-s390/atomic.h
@@ -5,7 +5,7 @@
5 * include/asm-s390/atomic.h 5 * include/asm-s390/atomic.h
6 * 6 *
7 * S390 version 7 * S390 version
8 * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 8 * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
10 * Denis Joseph Barrow, 10 * Denis Joseph Barrow,
11 * Arnd Bergmann (arndb@de.ibm.com) 11 * Arnd Bergmann (arndb@de.ibm.com)
@@ -45,59 +45,57 @@ typedef struct {
45#define atomic_read(v) ((v)->counter) 45#define atomic_read(v) ((v)->counter)
46#define atomic_set(v,i) (((v)->counter) = (i)) 46#define atomic_set(v,i) (((v)->counter) = (i))
47 47
48static __inline__ void atomic_add(int i, atomic_t * v)
49{
50 __CS_LOOP(v, i, "ar");
51}
52static __inline__ int atomic_add_return(int i, atomic_t * v) 48static __inline__ int atomic_add_return(int i, atomic_t * v)
53{ 49{
54 return __CS_LOOP(v, i, "ar"); 50 return __CS_LOOP(v, i, "ar");
55} 51}
56static __inline__ int atomic_add_negative(int i, atomic_t * v) 52#define atomic_add(_i, _v) atomic_add_return(_i, _v)
57{ 53#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
58 return __CS_LOOP(v, i, "ar") < 0; 54#define atomic_inc(_v) atomic_add_return(1, _v)
59} 55#define atomic_inc_return(_v) atomic_add_return(1, _v)
60static __inline__ void atomic_sub(int i, atomic_t * v) 56#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
61{ 57
62 __CS_LOOP(v, i, "sr");
63}
64static __inline__ int atomic_sub_return(int i, atomic_t * v) 58static __inline__ int atomic_sub_return(int i, atomic_t * v)
65{ 59{
66 return __CS_LOOP(v, i, "sr"); 60 return __CS_LOOP(v, i, "sr");
67} 61}
68static __inline__ void atomic_inc(volatile atomic_t * v) 62#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
69{ 63#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
70 __CS_LOOP(v, 1, "ar"); 64#define atomic_dec(_v) atomic_sub_return(1, _v)
71} 65#define atomic_dec_return(_v) atomic_sub_return(1, _v)
72static __inline__ int atomic_inc_return(volatile atomic_t * v) 66#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
73{
74 return __CS_LOOP(v, 1, "ar");
75}
76 67
77static __inline__ int atomic_inc_and_test(volatile atomic_t * v)
78{
79 return __CS_LOOP(v, 1, "ar") == 0;
80}
81static __inline__ void atomic_dec(volatile atomic_t * v)
82{
83 __CS_LOOP(v, 1, "sr");
84}
85static __inline__ int atomic_dec_return(volatile atomic_t * v)
86{
87 return __CS_LOOP(v, 1, "sr");
88}
89static __inline__ int atomic_dec_and_test(volatile atomic_t * v)
90{
91 return __CS_LOOP(v, 1, "sr") == 0;
92}
93static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) 68static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
94{ 69{
95 __CS_LOOP(v, ~mask, "nr"); 70 __CS_LOOP(v, ~mask, "nr");
96} 71}
72
97static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) 73static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
98{ 74{
99 __CS_LOOP(v, mask, "or"); 75 __CS_LOOP(v, mask, "or");
100} 76}
77
78static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
79{
80 __asm__ __volatile__(" cs %0,%3,0(%2)\n"
81 : "+d" (old), "=m" (v->counter)
82 : "a" (v), "d" (new), "m" (v->counter)
83 : "cc", "memory" );
84 return old;
85}
86
87static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
88{
89 int c, old;
90
91 c = atomic_read(v);
92 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
93 c = old;
94 return c != u;
95}
96
97#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
98
101#undef __CS_LOOP 99#undef __CS_LOOP
102 100
103#ifdef __s390x__ 101#ifdef __s390x__
@@ -123,92 +121,61 @@ typedef struct {
123#define atomic64_read(v) ((v)->counter) 121#define atomic64_read(v) ((v)->counter)
124#define atomic64_set(v,i) (((v)->counter) = (i)) 122#define atomic64_set(v,i) (((v)->counter) = (i))
125 123
126static __inline__ void atomic64_add(long long i, atomic64_t * v)
127{
128 __CSG_LOOP(v, i, "agr");
129}
130static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) 124static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
131{ 125{
132 return __CSG_LOOP(v, i, "agr"); 126 return __CSG_LOOP(v, i, "agr");
133} 127}
134static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v) 128#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
135{ 129#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
136 return __CSG_LOOP(v, i, "agr") < 0; 130#define atomic64_inc(_v) atomic64_add_return(1, _v)
137} 131#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
138static __inline__ void atomic64_sub(long long i, atomic64_t * v) 132#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
139{ 133
140 __CSG_LOOP(v, i, "sgr"); 134static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
141}
142static __inline__ void atomic64_inc(volatile atomic64_t * v)
143{
144 __CSG_LOOP(v, 1, "agr");
145}
146static __inline__ long long atomic64_inc_return(volatile atomic64_t * v)
147{
148 return __CSG_LOOP(v, 1, "agr");
149}
150static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v)
151{
152 return __CSG_LOOP(v, 1, "agr") == 0;
153}
154static __inline__ void atomic64_dec(volatile atomic64_t * v)
155{
156 __CSG_LOOP(v, 1, "sgr");
157}
158static __inline__ long long atomic64_dec_return(volatile atomic64_t * v)
159{
160 return __CSG_LOOP(v, 1, "sgr");
161}
162static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v)
163{ 135{
164 return __CSG_LOOP(v, 1, "sgr") == 0; 136 return __CSG_LOOP(v, i, "sgr");
165} 137}
138#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
139#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
140#define atomic64_dec(_v) atomic64_sub_return(1, _v)
141#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
142#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
143
166static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) 144static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
167{ 145{
168 __CSG_LOOP(v, ~mask, "ngr"); 146 __CSG_LOOP(v, ~mask, "ngr");
169} 147}
148
170static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) 149static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
171{ 150{
172 __CSG_LOOP(v, mask, "ogr"); 151 __CSG_LOOP(v, mask, "ogr");
173} 152}
174 153
175#undef __CSG_LOOP 154static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
176#endif 155 long long old, long long new)
177 156{
178/* 157 __asm__ __volatile__(" csg %0,%3,0(%2)\n"
179 returns 0 if expected_oldval==value in *v ( swap was successful ) 158 : "+d" (old), "=m" (v->counter)
180 returns 1 if unsuccessful. 159 : "a" (v), "d" (new), "m" (v->counter)
160 : "cc", "memory" );
161 return old;
162}
181 163
182 This is non-portable, use bitops or spinlocks instead! 164static __inline__ int atomic64_add_unless(atomic64_t *v,
183*/ 165 long long a, long long u)
184static __inline__ int
185atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
186{ 166{
187 int retval; 167 long long c, old;
188 168
189 __asm__ __volatile__( 169 c = atomic64_read(v);
190 " lr %0,%3\n" 170 while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c)
191 " cs %0,%4,0(%2)\n" 171 c = old;
192 " ipm %0\n" 172 return c != u;
193 " srl %0,28\n"
194 "0:"
195 : "=&d" (retval), "=m" (v->counter)
196 : "a" (v), "d" (expected_oldval) , "d" (new_val),
197 "m" (v->counter) : "cc", "memory" );
198 return retval;
199} 173}
200 174
201#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) 175#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
202 176
203#define atomic_add_unless(v, a, u) \ 177#undef __CSG_LOOP
204({ \ 178#endif
205 int c, old; \
206 c = atomic_read(v); \
207 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
208 c = old; \
209 c != (u); \
210})
211#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
212 179
213#define smp_mb__before_atomic_dec() smp_mb() 180#define smp_mb__before_atomic_dec() smp_mb()
214#define smp_mb__after_atomic_dec() smp_mb() 181#define smp_mb__after_atomic_dec() smp_mb()