diff options
Diffstat (limited to 'include/asm-s390/atomic.h')
-rw-r--r-- | include/asm-s390/atomic.h | 176 |
1 files changed, 73 insertions, 103 deletions
diff --git a/include/asm-s390/atomic.h b/include/asm-s390/atomic.h index b3bd4f679f72..be6fefe223d6 100644 --- a/include/asm-s390/atomic.h +++ b/include/asm-s390/atomic.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * include/asm-s390/atomic.h | 5 | * include/asm-s390/atomic.h |
6 | * | 6 | * |
7 | * S390 version | 7 | * S390 version |
8 | * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 8 | * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation |
9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
10 | * Denis Joseph Barrow, | 10 | * Denis Joseph Barrow, |
11 | * Arnd Bergmann (arndb@de.ibm.com) | 11 | * Arnd Bergmann (arndb@de.ibm.com) |
@@ -45,59 +45,59 @@ typedef struct { | |||
45 | #define atomic_read(v) ((v)->counter) | 45 | #define atomic_read(v) ((v)->counter) |
46 | #define atomic_set(v,i) (((v)->counter) = (i)) | 46 | #define atomic_set(v,i) (((v)->counter) = (i)) |
47 | 47 | ||
48 | static __inline__ void atomic_add(int i, atomic_t * v) | ||
49 | { | ||
50 | __CS_LOOP(v, i, "ar"); | ||
51 | } | ||
52 | static __inline__ int atomic_add_return(int i, atomic_t * v) | 48 | static __inline__ int atomic_add_return(int i, atomic_t * v) |
53 | { | 49 | { |
54 | return __CS_LOOP(v, i, "ar"); | 50 | return __CS_LOOP(v, i, "ar"); |
55 | } | 51 | } |
56 | static __inline__ int atomic_add_negative(int i, atomic_t * v) | 52 | #define atomic_add(_i, _v) atomic_add_return(_i, _v) |
57 | { | 53 | #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) |
58 | return __CS_LOOP(v, i, "ar") < 0; | 54 | #define atomic_inc(_v) atomic_add_return(1, _v) |
59 | } | 55 | #define atomic_inc_return(_v) atomic_add_return(1, _v) |
60 | static __inline__ void atomic_sub(int i, atomic_t * v) | 56 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) |
61 | { | 57 | |
62 | __CS_LOOP(v, i, "sr"); | ||
63 | } | ||
64 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | 58 | static __inline__ int atomic_sub_return(int i, atomic_t * v) |
65 | { | 59 | { |
66 | return __CS_LOOP(v, i, "sr"); | 60 | return __CS_LOOP(v, i, "sr"); |
67 | } | 61 | } |
68 | static __inline__ void atomic_inc(volatile atomic_t * v) | 62 | #define atomic_sub(_i, _v) atomic_sub_return(_i, _v) |
69 | { | 63 | #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) |
70 | __CS_LOOP(v, 1, "ar"); | 64 | #define atomic_dec(_v) atomic_sub_return(1, _v) |
71 | } | 65 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) |
72 | static __inline__ int atomic_inc_return(volatile atomic_t * v) | 66 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) |
73 | { | ||
74 | return __CS_LOOP(v, 1, "ar"); | ||
75 | } | ||
76 | 67 | ||
77 | static __inline__ int atomic_inc_and_test(volatile atomic_t * v) | ||
78 | { | ||
79 | return __CS_LOOP(v, 1, "ar") == 0; | ||
80 | } | ||
81 | static __inline__ void atomic_dec(volatile atomic_t * v) | ||
82 | { | ||
83 | __CS_LOOP(v, 1, "sr"); | ||
84 | } | ||
85 | static __inline__ int atomic_dec_return(volatile atomic_t * v) | ||
86 | { | ||
87 | return __CS_LOOP(v, 1, "sr"); | ||
88 | } | ||
89 | static __inline__ int atomic_dec_and_test(volatile atomic_t * v) | ||
90 | { | ||
91 | return __CS_LOOP(v, 1, "sr") == 0; | ||
92 | } | ||
93 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) | 68 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) |
94 | { | 69 | { |
95 | __CS_LOOP(v, ~mask, "nr"); | 70 | __CS_LOOP(v, ~mask, "nr"); |
96 | } | 71 | } |
72 | |||
97 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) | 73 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) |
98 | { | 74 | { |
99 | __CS_LOOP(v, mask, "or"); | 75 | __CS_LOOP(v, mask, "or"); |
100 | } | 76 | } |
77 | |||
78 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
79 | |||
80 | static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | ||
81 | { | ||
82 | __asm__ __volatile__(" cs %0,%3,0(%2)\n" | ||
83 | : "+d" (old), "=m" (v->counter) | ||
84 | : "a" (v), "d" (new), "m" (v->counter) | ||
85 | : "cc", "memory" ); | ||
86 | return old; | ||
87 | } | ||
88 | |||
89 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | ||
90 | { | ||
91 | int c, old; | ||
92 | |||
93 | c = atomic_read(v); | ||
94 | while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) | ||
95 | c = old; | ||
96 | return c != u; | ||
97 | } | ||
98 | |||
99 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
100 | |||
101 | #undef __CS_LOOP | 101 | #undef __CS_LOOP |
102 | 102 | ||
103 | #ifdef __s390x__ | 103 | #ifdef __s390x__ |
@@ -123,97 +123,67 @@ typedef struct { | |||
123 | #define atomic64_read(v) ((v)->counter) | 123 | #define atomic64_read(v) ((v)->counter) |
124 | #define atomic64_set(v,i) (((v)->counter) = (i)) | 124 | #define atomic64_set(v,i) (((v)->counter) = (i)) |
125 | 125 | ||
126 | static __inline__ void atomic64_add(long long i, atomic64_t * v) | ||
127 | { | ||
128 | __CSG_LOOP(v, i, "agr"); | ||
129 | } | ||
130 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) | 126 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) |
131 | { | 127 | { |
132 | return __CSG_LOOP(v, i, "agr"); | 128 | return __CSG_LOOP(v, i, "agr"); |
133 | } | 129 | } |
134 | static __inline__ long long atomic64_add_negative(long long i, atomic64_t * v) | 130 | #define atomic64_add(_i, _v) atomic64_add_return(_i, _v) |
135 | { | 131 | #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) |
136 | return __CSG_LOOP(v, i, "agr") < 0; | 132 | #define atomic64_inc(_v) atomic64_add_return(1, _v) |
137 | } | 133 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) |
138 | static __inline__ void atomic64_sub(long long i, atomic64_t * v) | 134 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) |
139 | { | 135 | |
140 | __CSG_LOOP(v, i, "sgr"); | 136 | static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) |
141 | } | ||
142 | static __inline__ void atomic64_inc(volatile atomic64_t * v) | ||
143 | { | ||
144 | __CSG_LOOP(v, 1, "agr"); | ||
145 | } | ||
146 | static __inline__ long long atomic64_inc_return(volatile atomic64_t * v) | ||
147 | { | ||
148 | return __CSG_LOOP(v, 1, "agr"); | ||
149 | } | ||
150 | static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v) | ||
151 | { | ||
152 | return __CSG_LOOP(v, 1, "agr") == 0; | ||
153 | } | ||
154 | static __inline__ void atomic64_dec(volatile atomic64_t * v) | ||
155 | { | ||
156 | __CSG_LOOP(v, 1, "sgr"); | ||
157 | } | ||
158 | static __inline__ long long atomic64_dec_return(volatile atomic64_t * v) | ||
159 | { | ||
160 | return __CSG_LOOP(v, 1, "sgr"); | ||
161 | } | ||
162 | static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v) | ||
163 | { | 137 | { |
164 | return __CSG_LOOP(v, 1, "sgr") == 0; | 138 | return __CSG_LOOP(v, i, "sgr"); |
165 | } | 139 | } |
140 | #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) | ||
141 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) | ||
142 | #define atomic64_dec(_v) atomic64_sub_return(1, _v) | ||
143 | #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) | ||
144 | #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) | ||
145 | |||
166 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) | 146 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) |
167 | { | 147 | { |
168 | __CSG_LOOP(v, ~mask, "ngr"); | 148 | __CSG_LOOP(v, ~mask, "ngr"); |
169 | } | 149 | } |
150 | |||
170 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) | 151 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) |
171 | { | 152 | { |
172 | __CSG_LOOP(v, mask, "ogr"); | 153 | __CSG_LOOP(v, mask, "ogr"); |
173 | } | 154 | } |
174 | 155 | ||
175 | #undef __CSG_LOOP | 156 | static __inline__ long long atomic64_cmpxchg(atomic64_t *v, |
176 | #endif | 157 | long long old, long long new) |
177 | 158 | { | |
178 | /* | 159 | __asm__ __volatile__(" csg %0,%3,0(%2)\n" |
179 | returns 0 if expected_oldval==value in *v ( swap was successful ) | 160 | : "+d" (old), "=m" (v->counter) |
180 | returns 1 if unsuccessful. | 161 | : "a" (v), "d" (new), "m" (v->counter) |
162 | : "cc", "memory" ); | ||
163 | return old; | ||
164 | } | ||
181 | 165 | ||
182 | This is non-portable, use bitops or spinlocks instead! | 166 | static __inline__ int atomic64_add_unless(atomic64_t *v, |
183 | */ | 167 | long long a, long long u) |
184 | static __inline__ int | ||
185 | atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) | ||
186 | { | 168 | { |
187 | int retval; | 169 | long long c, old; |
188 | 170 | ||
189 | __asm__ __volatile__( | 171 | c = atomic64_read(v); |
190 | " lr %0,%3\n" | 172 | while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c) |
191 | " cs %0,%4,0(%2)\n" | 173 | c = old; |
192 | " ipm %0\n" | 174 | return c != u; |
193 | " srl %0,28\n" | ||
194 | "0:" | ||
195 | : "=&d" (retval), "=m" (v->counter) | ||
196 | : "a" (v), "d" (expected_oldval) , "d" (new_val), | ||
197 | "m" (v->counter) : "cc", "memory" ); | ||
198 | return retval; | ||
199 | } | 175 | } |
200 | 176 | ||
201 | #define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) | 177 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
202 | 178 | ||
203 | #define atomic_add_unless(v, a, u) \ | 179 | #undef __CSG_LOOP |
204 | ({ \ | 180 | #endif |
205 | int c, old; \ | ||
206 | c = atomic_read(v); \ | ||
207 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | ||
208 | c = old; \ | ||
209 | c != (u); \ | ||
210 | }) | ||
211 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
212 | 181 | ||
213 | #define smp_mb__before_atomic_dec() smp_mb() | 182 | #define smp_mb__before_atomic_dec() smp_mb() |
214 | #define smp_mb__after_atomic_dec() smp_mb() | 183 | #define smp_mb__after_atomic_dec() smp_mb() |
215 | #define smp_mb__before_atomic_inc() smp_mb() | 184 | #define smp_mb__before_atomic_inc() smp_mb() |
216 | #define smp_mb__after_atomic_inc() smp_mb() | 185 | #define smp_mb__after_atomic_inc() smp_mb() |
217 | 186 | ||
187 | #include <asm-generic/atomic.h> | ||
218 | #endif /* __KERNEL__ */ | 188 | #endif /* __KERNEL__ */ |
219 | #endif /* __ARCH_S390_ATOMIC__ */ | 189 | #endif /* __ARCH_S390_ATOMIC__ */ |