diff options
author | Chen Gang <gang.chen@asianux.com> | 2013-10-26 10:07:04 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2013-11-08 19:00:10 -0500 |
commit | 237f12337cfa2175474e4dd015bc07a25eb9080d (patch) | |
tree | c00fe4b87680684f292e043f1e67c4c1a900affa | |
parent | 4d8981f6b784ae445a28de909d77e27836c3ed44 (diff) |
ARM: 7866/1: include: asm: use 'long long' instead of 'u64' within atomic.h
atomic* value is signed value, and atomic* functions need also process
signed value (parameter value, and return value), so 32-bit arm need
use 'long long' instead of 'u64'.
After replacement, it will also fix a bug for atomic64_add_negative():
"u64 is never less than 0".
The modifications are:
in vim, use "1,% s/\<u64\>/long long/g" command.
remove '__aligned(8)' which is useless for 64-bit.
be sure of 80 column limitation after replacement.
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Chen Gang <gang.chen@asianux.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/include/asm/atomic.h | 49 |
1 files changed, 25 insertions, 24 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index da1c77d39327..a715ac049e4c 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h | |||
@@ -238,15 +238,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
238 | 238 | ||
239 | #ifndef CONFIG_GENERIC_ATOMIC64 | 239 | #ifndef CONFIG_GENERIC_ATOMIC64 |
240 | typedef struct { | 240 | typedef struct { |
241 | u64 __aligned(8) counter; | 241 | long long counter; |
242 | } atomic64_t; | 242 | } atomic64_t; |
243 | 243 | ||
244 | #define ATOMIC64_INIT(i) { (i) } | 244 | #define ATOMIC64_INIT(i) { (i) } |
245 | 245 | ||
246 | #ifdef CONFIG_ARM_LPAE | 246 | #ifdef CONFIG_ARM_LPAE |
247 | static inline u64 atomic64_read(const atomic64_t *v) | 247 | static inline long long atomic64_read(const atomic64_t *v) |
248 | { | 248 | { |
249 | u64 result; | 249 | long long result; |
250 | 250 | ||
251 | __asm__ __volatile__("@ atomic64_read\n" | 251 | __asm__ __volatile__("@ atomic64_read\n" |
252 | " ldrd %0, %H0, [%1]" | 252 | " ldrd %0, %H0, [%1]" |
@@ -257,7 +257,7 @@ static inline u64 atomic64_read(const atomic64_t *v) | |||
257 | return result; | 257 | return result; |
258 | } | 258 | } |
259 | 259 | ||
260 | static inline void atomic64_set(atomic64_t *v, u64 i) | 260 | static inline void atomic64_set(atomic64_t *v, long long i) |
261 | { | 261 | { |
262 | __asm__ __volatile__("@ atomic64_set\n" | 262 | __asm__ __volatile__("@ atomic64_set\n" |
263 | " strd %2, %H2, [%1]" | 263 | " strd %2, %H2, [%1]" |
@@ -266,9 +266,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i) | |||
266 | ); | 266 | ); |
267 | } | 267 | } |
268 | #else | 268 | #else |
269 | static inline u64 atomic64_read(const atomic64_t *v) | 269 | static inline long long atomic64_read(const atomic64_t *v) |
270 | { | 270 | { |
271 | u64 result; | 271 | long long result; |
272 | 272 | ||
273 | __asm__ __volatile__("@ atomic64_read\n" | 273 | __asm__ __volatile__("@ atomic64_read\n" |
274 | " ldrexd %0, %H0, [%1]" | 274 | " ldrexd %0, %H0, [%1]" |
@@ -279,9 +279,9 @@ static inline u64 atomic64_read(const atomic64_t *v) | |||
279 | return result; | 279 | return result; |
280 | } | 280 | } |
281 | 281 | ||
282 | static inline void atomic64_set(atomic64_t *v, u64 i) | 282 | static inline void atomic64_set(atomic64_t *v, long long i) |
283 | { | 283 | { |
284 | u64 tmp; | 284 | long long tmp; |
285 | 285 | ||
286 | __asm__ __volatile__("@ atomic64_set\n" | 286 | __asm__ __volatile__("@ atomic64_set\n" |
287 | "1: ldrexd %0, %H0, [%2]\n" | 287 | "1: ldrexd %0, %H0, [%2]\n" |
@@ -294,9 +294,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i) | |||
294 | } | 294 | } |
295 | #endif | 295 | #endif |
296 | 296 | ||
297 | static inline void atomic64_add(u64 i, atomic64_t *v) | 297 | static inline void atomic64_add(long long i, atomic64_t *v) |
298 | { | 298 | { |
299 | u64 result; | 299 | long long result; |
300 | unsigned long tmp; | 300 | unsigned long tmp; |
301 | 301 | ||
302 | __asm__ __volatile__("@ atomic64_add\n" | 302 | __asm__ __volatile__("@ atomic64_add\n" |
@@ -311,9 +311,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v) | |||
311 | : "cc"); | 311 | : "cc"); |
312 | } | 312 | } |
313 | 313 | ||
314 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | 314 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
315 | { | 315 | { |
316 | u64 result; | 316 | long long result; |
317 | unsigned long tmp; | 317 | unsigned long tmp; |
318 | 318 | ||
319 | smp_mb(); | 319 | smp_mb(); |
@@ -334,9 +334,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | |||
334 | return result; | 334 | return result; |
335 | } | 335 | } |
336 | 336 | ||
337 | static inline void atomic64_sub(u64 i, atomic64_t *v) | 337 | static inline void atomic64_sub(long long i, atomic64_t *v) |
338 | { | 338 | { |
339 | u64 result; | 339 | long long result; |
340 | unsigned long tmp; | 340 | unsigned long tmp; |
341 | 341 | ||
342 | __asm__ __volatile__("@ atomic64_sub\n" | 342 | __asm__ __volatile__("@ atomic64_sub\n" |
@@ -351,9 +351,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) | |||
351 | : "cc"); | 351 | : "cc"); |
352 | } | 352 | } |
353 | 353 | ||
354 | static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) | 354 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) |
355 | { | 355 | { |
356 | u64 result; | 356 | long long result; |
357 | unsigned long tmp; | 357 | unsigned long tmp; |
358 | 358 | ||
359 | smp_mb(); | 359 | smp_mb(); |
@@ -374,9 +374,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) | |||
374 | return result; | 374 | return result; |
375 | } | 375 | } |
376 | 376 | ||
377 | static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) | 377 | static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, |
378 | long long new) | ||
378 | { | 379 | { |
379 | u64 oldval; | 380 | long long oldval; |
380 | unsigned long res; | 381 | unsigned long res; |
381 | 382 | ||
382 | smp_mb(); | 383 | smp_mb(); |
@@ -398,9 +399,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) | |||
398 | return oldval; | 399 | return oldval; |
399 | } | 400 | } |
400 | 401 | ||
401 | static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) | 402 | static inline long long atomic64_xchg(atomic64_t *ptr, long long new) |
402 | { | 403 | { |
403 | u64 result; | 404 | long long result; |
404 | unsigned long tmp; | 405 | unsigned long tmp; |
405 | 406 | ||
406 | smp_mb(); | 407 | smp_mb(); |
@@ -419,9 +420,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) | |||
419 | return result; | 420 | return result; |
420 | } | 421 | } |
421 | 422 | ||
422 | static inline u64 atomic64_dec_if_positive(atomic64_t *v) | 423 | static inline long long atomic64_dec_if_positive(atomic64_t *v) |
423 | { | 424 | { |
424 | u64 result; | 425 | long long result; |
425 | unsigned long tmp; | 426 | unsigned long tmp; |
426 | 427 | ||
427 | smp_mb(); | 428 | smp_mb(); |
@@ -445,9 +446,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v) | |||
445 | return result; | 446 | return result; |
446 | } | 447 | } |
447 | 448 | ||
448 | static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | 449 | static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
449 | { | 450 | { |
450 | u64 val; | 451 | long long val; |
451 | unsigned long tmp; | 452 | unsigned long tmp; |
452 | int ret = 1; | 453 | int ret = 1; |
453 | 454 | ||