aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips/atomic.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:21:23 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:21:23 -0400
commitdd6d1844af33acb4edd0a40b1770d091a22c94be (patch)
treee6bd3549919773a13b770324a4dddb51b194b452 /include/asm-mips/atomic.h
parent19f71153b9be219756c6b2757921433a69b7975c (diff)
parentaaf76a3245c02faba51c96b9a340c14d6bb0dcc0 (diff)
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (80 commits) [MIPS] tlbex.c: Cleanup __init usage. [MIPS] WRPPMC serial support move to platform device [MIPS] R1: Fix hazard barriers to make kernels work on R2 also. [MIPS] VPE: reimplement ELF loader. [MIPS] cleanup WRPPMC include files [MIPS] Add BUG_ON assertion for attempt to run kernel on the wrong CPU type. [MIPS] SMP: Use ISO C struct initializer for local structs. [MIPS] SMP: Kill useless casts. [MIPS] Kill num_online_cpus() loops. [MIPS] SMP: Implement smp_call_function_mask(). [MIPS] Make facility to convert CPU types to strings generally available. [MIPS] Convert list of CPU types from #define to enum. [MIPS] Optimize get_unaligned / put_unaligned implementations. [MIPS] checkfiles: Fix "need space after that ','" errors. [MIPS] Fix "no space between function name and open parenthesis" warnings. [MIPS] Allow hardwiring of the CPU type to a single type for optimization. [MIPS] tlbex: Size optimize code by declaring a few functions inline. [MIPS] pg-r4k.c: Dump the generated code [MIPS] Cobalt: Remove cobalt_machine_power_off() [MIPS] Cobalt: Move reset port definition to arch/mips/cobalt/reset.c ...
Diffstat (limited to 'include/asm-mips/atomic.h')
-rw-r--r--include/asm-mips/atomic.h28
1 files changed, 14 insertions, 14 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 7d8003769a44..a798d6299a79 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -39,7 +39,7 @@ typedef struct { volatile int counter; } atomic_t;
39 * 39 *
40 * Atomically sets the value of @v to @i. 40 * Atomically sets the value of @v to @i.
41 */ 41 */
42#define atomic_set(v,i) ((v)->counter = (i)) 42#define atomic_set(v, i) ((v)->counter = (i))
43 43
44/* 44/*
45 * atomic_add - add integer to atomic variable 45 * atomic_add - add integer to atomic variable
@@ -335,8 +335,8 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
335} 335}
336#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 336#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
337 337
338#define atomic_dec_return(v) atomic_sub_return(1,(v)) 338#define atomic_dec_return(v) atomic_sub_return(1, (v))
339#define atomic_inc_return(v) atomic_add_return(1,(v)) 339#define atomic_inc_return(v) atomic_add_return(1, (v))
340 340
341/* 341/*
342 * atomic_sub_and_test - subtract value from variable and test result 342 * atomic_sub_and_test - subtract value from variable and test result
@@ -347,7 +347,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
347 * true if the result is zero, or false for all 347 * true if the result is zero, or false for all
348 * other cases. 348 * other cases.
349 */ 349 */
350#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 350#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
351 351
352/* 352/*
353 * atomic_inc_and_test - increment and test 353 * atomic_inc_and_test - increment and test
@@ -381,7 +381,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
381 * 381 *
382 * Atomically increments @v by 1. 382 * Atomically increments @v by 1.
383 */ 383 */
384#define atomic_inc(v) atomic_add(1,(v)) 384#define atomic_inc(v) atomic_add(1, (v))
385 385
386/* 386/*
387 * atomic_dec - decrement and test 387 * atomic_dec - decrement and test
@@ -389,7 +389,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
389 * 389 *
390 * Atomically decrements @v by 1. 390 * Atomically decrements @v by 1.
391 */ 391 */
392#define atomic_dec(v) atomic_sub(1,(v)) 392#define atomic_dec(v) atomic_sub(1, (v))
393 393
394/* 394/*
395 * atomic_add_negative - add and test if negative 395 * atomic_add_negative - add and test if negative
@@ -400,7 +400,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
400 * if the result is negative, or false when 400 * if the result is negative, or false when
401 * result is greater than or equal to zero. 401 * result is greater than or equal to zero.
402 */ 402 */
403#define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0) 403#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
404 404
405#ifdef CONFIG_64BIT 405#ifdef CONFIG_64BIT
406 406
@@ -420,7 +420,7 @@ typedef struct { volatile long counter; } atomic64_t;
420 * @v: pointer of type atomic64_t 420 * @v: pointer of type atomic64_t
421 * @i: required value 421 * @i: required value
422 */ 422 */
423#define atomic64_set(v,i) ((v)->counter = (i)) 423#define atomic64_set(v, i) ((v)->counter = (i))
424 424
425/* 425/*
426 * atomic64_add - add integer to atomic variable 426 * atomic64_add - add integer to atomic variable
@@ -718,8 +718,8 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
718 718
719#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 719#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
720 720
721#define atomic64_dec_return(v) atomic64_sub_return(1,(v)) 721#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
722#define atomic64_inc_return(v) atomic64_add_return(1,(v)) 722#define atomic64_inc_return(v) atomic64_add_return(1, (v))
723 723
724/* 724/*
725 * atomic64_sub_and_test - subtract value from variable and test result 725 * atomic64_sub_and_test - subtract value from variable and test result
@@ -730,7 +730,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
730 * true if the result is zero, or false for all 730 * true if the result is zero, or false for all
731 * other cases. 731 * other cases.
732 */ 732 */
733#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) 733#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
734 734
735/* 735/*
736 * atomic64_inc_and_test - increment and test 736 * atomic64_inc_and_test - increment and test
@@ -764,7 +764,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
764 * 764 *
765 * Atomically increments @v by 1. 765 * Atomically increments @v by 1.
766 */ 766 */
767#define atomic64_inc(v) atomic64_add(1,(v)) 767#define atomic64_inc(v) atomic64_add(1, (v))
768 768
769/* 769/*
770 * atomic64_dec - decrement and test 770 * atomic64_dec - decrement and test
@@ -772,7 +772,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
772 * 772 *
773 * Atomically decrements @v by 1. 773 * Atomically decrements @v by 1.
774 */ 774 */
775#define atomic64_dec(v) atomic64_sub(1,(v)) 775#define atomic64_dec(v) atomic64_sub(1, (v))
776 776
777/* 777/*
778 * atomic64_add_negative - add and test if negative 778 * atomic64_add_negative - add and test if negative
@@ -783,7 +783,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
783 * if the result is negative, or false when 783 * if the result is negative, or false when
784 * result is greater than or equal to zero. 784 * result is greater than or equal to zero.
785 */ 785 */
786#define atomic64_add_negative(i,v) (atomic64_add_return(i, (v)) < 0) 786#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
787 787
788#endif /* CONFIG_64BIT */ 788#endif /* CONFIG_64BIT */
789 789