diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2009-09-11 04:28:35 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-09-11 04:29:44 -0400 |
commit | bfe3349b516df011dcf6462b0fd748a6f5c2e8af (patch) | |
tree | 145d25ab22cf3974c0f5b8d6be711900fe730989 /arch/s390/include/asm/atomic.h | |
parent | 12751058515860ed43c8f874ebcb2097b323736a (diff) |
[S390] atomic ops: small cleanups
Couple of coding style fixes, replace __inline__ with inline and
remove #ifdef __KERNEL_- since the header file isn't exported.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/atomic.h')
-rw-r--r-- | arch/s390/include/asm/atomic.h | 41 |
1 files changed, 19 insertions, 22 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index b491d5e963cf..ae7c8f9f94a5 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -18,8 +18,6 @@ | |||
18 | 18 | ||
19 | #define ATOMIC_INIT(i) { (i) } | 19 | #define ATOMIC_INIT(i) { (i) } |
20 | 20 | ||
21 | #ifdef __KERNEL__ | ||
22 | |||
23 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 21 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
24 | 22 | ||
25 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ | 23 | #define __CS_LOOP(ptr, op_val, op_string) ({ \ |
@@ -69,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i) | |||
69 | barrier(); | 67 | barrier(); |
70 | } | 68 | } |
71 | 69 | ||
72 | static __inline__ int atomic_add_return(int i, atomic_t * v) | 70 | static inline int atomic_add_return(int i, atomic_t *v) |
73 | { | 71 | { |
74 | return __CS_LOOP(v, i, "ar"); | 72 | return __CS_LOOP(v, i, "ar"); |
75 | } | 73 | } |
@@ -79,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) | |||
79 | #define atomic_inc_return(_v) atomic_add_return(1, _v) | 77 | #define atomic_inc_return(_v) atomic_add_return(1, _v) |
80 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) | 78 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) |
81 | 79 | ||
82 | static __inline__ int atomic_sub_return(int i, atomic_t * v) | 80 | static inline int atomic_sub_return(int i, atomic_t *v) |
83 | { | 81 | { |
84 | return __CS_LOOP(v, i, "sr"); | 82 | return __CS_LOOP(v, i, "sr"); |
85 | } | 83 | } |
@@ -89,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) | |||
89 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) | 87 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) |
90 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) | 88 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) |
91 | 89 | ||
92 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) | 90 | static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) |
93 | { | 91 | { |
94 | __CS_LOOP(v, ~mask, "nr"); | 92 | __CS_LOOP(v, ~mask, "nr"); |
95 | } | 93 | } |
96 | 94 | ||
97 | static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) | 95 | static inline void atomic_set_mask(unsigned long mask, atomic_t *v) |
98 | { | 96 | { |
99 | __CS_LOOP(v, mask, "or"); | 97 | __CS_LOOP(v, mask, "or"); |
100 | } | 98 | } |
101 | 99 | ||
102 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 100 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
103 | 101 | ||
104 | static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | 102 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
105 | { | 103 | { |
106 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 104 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
107 | asm volatile( | 105 | asm volatile( |
@@ -119,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) | |||
119 | return old; | 117 | return old; |
120 | } | 118 | } |
121 | 119 | ||
122 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 120 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
123 | { | 121 | { |
124 | int c, old; | 122 | int c, old; |
125 | c = atomic_read(v); | 123 | c = atomic_read(v); |
@@ -155,7 +153,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
155 | : "=&d" (old_val), "=&d" (new_val), \ | 153 | : "=&d" (old_val), "=&d" (new_val), \ |
156 | "=Q" (((atomic_t *)(ptr))->counter) \ | 154 | "=Q" (((atomic_t *)(ptr))->counter) \ |
157 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ | 155 | : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ |
158 | : "cc", "memory" ); \ | 156 | : "cc", "memory"); \ |
159 | new_val; \ | 157 | new_val; \ |
160 | }) | 158 | }) |
161 | 159 | ||
@@ -173,7 +171,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
173 | "=m" (((atomic_t *)(ptr))->counter) \ | 171 | "=m" (((atomic_t *)(ptr))->counter) \ |
174 | : "a" (ptr), "d" (op_val), \ | 172 | : "a" (ptr), "d" (op_val), \ |
175 | "m" (((atomic_t *)(ptr))->counter) \ | 173 | "m" (((atomic_t *)(ptr))->counter) \ |
176 | : "cc", "memory" ); \ | 174 | : "cc", "memory"); \ |
177 | new_val; \ | 175 | new_val; \ |
178 | }) | 176 | }) |
179 | 177 | ||
@@ -191,29 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i) | |||
191 | barrier(); | 189 | barrier(); |
192 | } | 190 | } |
193 | 191 | ||
194 | static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) | 192 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
195 | { | 193 | { |
196 | return __CSG_LOOP(v, i, "agr"); | 194 | return __CSG_LOOP(v, i, "agr"); |
197 | } | 195 | } |
198 | 196 | ||
199 | static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) | 197 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) |
200 | { | 198 | { |
201 | return __CSG_LOOP(v, i, "sgr"); | 199 | return __CSG_LOOP(v, i, "sgr"); |
202 | } | 200 | } |
203 | 201 | ||
204 | static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) | 202 | static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) |
205 | { | 203 | { |
206 | __CSG_LOOP(v, ~mask, "ngr"); | 204 | __CSG_LOOP(v, ~mask, "ngr"); |
207 | } | 205 | } |
208 | 206 | ||
209 | static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) | 207 | static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) |
210 | { | 208 | { |
211 | __CSG_LOOP(v, mask, "ogr"); | 209 | __CSG_LOOP(v, mask, "ogr"); |
212 | } | 210 | } |
213 | 211 | ||
214 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 212 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
215 | 213 | ||
216 | static __inline__ long long atomic64_cmpxchg(atomic64_t *v, | 214 | static inline long long atomic64_cmpxchg(atomic64_t *v, |
217 | long long old, long long new) | 215 | long long old, long long new) |
218 | { | 216 | { |
219 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | 217 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) |
@@ -337,8 +335,7 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) | |||
337 | 335 | ||
338 | #endif /* CONFIG_64BIT */ | 336 | #endif /* CONFIG_64BIT */ |
339 | 337 | ||
340 | static __inline__ int atomic64_add_unless(atomic64_t *v, | 338 | static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
341 | long long a, long long u) | ||
342 | { | 339 | { |
343 | long long c, old; | 340 | long long c, old; |
344 | c = atomic64_read(v); | 341 | c = atomic64_read(v); |
@@ -371,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, | |||
371 | #define smp_mb__after_atomic_inc() smp_mb() | 368 | #define smp_mb__after_atomic_inc() smp_mb() |
372 | 369 | ||
373 | #include <asm-generic/atomic-long.h> | 370 | #include <asm-generic/atomic-long.h> |
374 | #endif /* __KERNEL__ */ | 371 | |
375 | #endif /* __ARCH_S390_ATOMIC__ */ | 372 | #endif /* __ARCH_S390_ATOMIC__ */ |