diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-04-23 14:00:01 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-07-27 08:06:22 -0400 |
commit | 70ed47390d10a7351e44c3a07c5de376780cb6c1 (patch) | |
tree | c603f370153df8f57f654f87a25b9544a71afd7d | |
parent | 610f7ba93b1e4cda7370f367a6042cbda61227bb (diff) |
ia64: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}.
These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/ia64/include/asm/atomic.h | 26 |
1 files changed, 22 insertions, 4 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 0bf03501fe5c..0809ef5d6b9a 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h | |||
@@ -45,8 +45,6 @@ ia64_atomic_##op (int i, atomic_t *v) \ | |||
45 | ATOMIC_OP(add, +) | 45 | ATOMIC_OP(add, +) |
46 | ATOMIC_OP(sub, -) | 46 | ATOMIC_OP(sub, -) |
47 | 47 | ||
48 | #undef ATOMIC_OP | ||
49 | |||
50 | #define atomic_add_return(i,v) \ | 48 | #define atomic_add_return(i,v) \ |
51 | ({ \ | 49 | ({ \ |
52 | int __ia64_aar_i = (i); \ | 50 | int __ia64_aar_i = (i); \ |
@@ -71,6 +69,18 @@ ATOMIC_OP(sub, -) | |||
71 | : ia64_atomic_sub(__ia64_asr_i, v); \ | 69 | : ia64_atomic_sub(__ia64_asr_i, v); \ |
72 | }) | 70 | }) |
73 | 71 | ||
72 | #define CONFIG_ARCH_HAS_ATOMIC_OR | ||
73 | |||
74 | ATOMIC_OP(and, &) | ||
75 | ATOMIC_OP(or, |) | ||
76 | ATOMIC_OP(xor, ^) | ||
77 | |||
78 | #define atomic_and(i,v) (void)ia64_atomic_and(i,v) | ||
79 | #define atomic_or(i,v) (void)ia64_atomic_or(i,v) | ||
80 | #define atomic_xor(i,v) (void)ia64_atomic_xor(i,v) | ||
81 | |||
82 | #undef ATOMIC_OP | ||
83 | |||
74 | #define ATOMIC64_OP(op, c_op) \ | 84 | #define ATOMIC64_OP(op, c_op) \ |
75 | static __inline__ long \ | 85 | static __inline__ long \ |
76 | ia64_atomic64_##op (__s64 i, atomic64_t *v) \ | 86 | ia64_atomic64_##op (__s64 i, atomic64_t *v) \ |
@@ -89,8 +99,6 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \ | |||
89 | ATOMIC64_OP(add, +) | 99 | ATOMIC64_OP(add, +) |
90 | ATOMIC64_OP(sub, -) | 100 | ATOMIC64_OP(sub, -) |
91 | 101 | ||
92 | #undef ATOMIC64_OP | ||
93 | |||
94 | #define atomic64_add_return(i,v) \ | 102 | #define atomic64_add_return(i,v) \ |
95 | ({ \ | 103 | ({ \ |
96 | long __ia64_aar_i = (i); \ | 104 | long __ia64_aar_i = (i); \ |
@@ -115,6 +123,16 @@ ATOMIC64_OP(sub, -) | |||
115 | : ia64_atomic64_sub(__ia64_asr_i, v); \ | 123 | : ia64_atomic64_sub(__ia64_asr_i, v); \ |
116 | }) | 124 | }) |
117 | 125 | ||
126 | ATOMIC64_OP(and, &) | ||
127 | ATOMIC64_OP(or, |) | ||
128 | ATOMIC64_OP(xor, ^) | ||
129 | |||
130 | #define atomic64_and(i,v) (void)ia64_atomic64_and(i,v) | ||
131 | #define atomic64_or(i,v) (void)ia64_atomic64_or(i,v) | ||
132 | #define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v) | ||
133 | |||
134 | #undef ATOMIC64_OP | ||
135 | |||
118 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | 136 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) |
119 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 137 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
120 | 138 | ||