diff options
author | Ingo Molnar <mingo@kernel.org> | 2016-07-07 03:12:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-07-07 03:12:02 -0400 |
commit | 36e91aa2628e46c2146049eee8b9b7f773b0ffc3 (patch) | |
tree | c14e756181dca4e4e06d6aa1b1299a5f4dc5bf3d | |
parent | 03e3c2b7edbe1e8758196b2c7843333eb328063d (diff) | |
parent | b7271b9f3e18181559b96a610f4e42bdb04b07f5 (diff) |
Merge branch 'locking/arch-atomic' into locking/core, because the topic is ready
Signed-off-by: Ingo Molnar <mingo@kernel.org>
52 files changed, 2449 insertions, 620 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 572b228c44c7..498933a7df97 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h | |||
@@ -46,10 +46,9 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ | |||
46 | } \ | 46 | } \ |
47 | 47 | ||
48 | #define ATOMIC_OP_RETURN(op, asm_op) \ | 48 | #define ATOMIC_OP_RETURN(op, asm_op) \ |
49 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | 49 | static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ |
50 | { \ | 50 | { \ |
51 | long temp, result; \ | 51 | long temp, result; \ |
52 | smp_mb(); \ | ||
53 | __asm__ __volatile__( \ | 52 | __asm__ __volatile__( \ |
54 | "1: ldl_l %0,%1\n" \ | 53 | "1: ldl_l %0,%1\n" \ |
55 | " " #asm_op " %0,%3,%2\n" \ | 54 | " " #asm_op " %0,%3,%2\n" \ |
@@ -61,7 +60,23 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
61 | ".previous" \ | 60 | ".previous" \ |
62 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | 61 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
63 | :"Ir" (i), "m" (v->counter) : "memory"); \ | 62 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
64 | smp_mb(); \ | 63 | return result; \ |
64 | } | ||
65 | |||
66 | #define ATOMIC_FETCH_OP(op, asm_op) \ | ||
67 | static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ | ||
68 | { \ | ||
69 | long temp, result; \ | ||
70 | __asm__ __volatile__( \ | ||
71 | "1: ldl_l %2,%1\n" \ | ||
72 | " " #asm_op " %2,%3,%0\n" \ | ||
73 | " stl_c %0,%1\n" \ | ||
74 | " beq %0,2f\n" \ | ||
75 | ".subsection 2\n" \ | ||
76 | "2: br 1b\n" \ | ||
77 | ".previous" \ | ||
78 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | ||
79 | :"Ir" (i), "m" (v->counter) : "memory"); \ | ||
65 | return result; \ | 80 | return result; \ |
66 | } | 81 | } |
67 | 82 | ||
@@ -82,10 +97,9 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ | |||
82 | } \ | 97 | } \ |
83 | 98 | ||
84 | #define ATOMIC64_OP_RETURN(op, asm_op) \ | 99 | #define ATOMIC64_OP_RETURN(op, asm_op) \ |
85 | static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ | 100 | static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ |
86 | { \ | 101 | { \ |
87 | long temp, result; \ | 102 | long temp, result; \ |
88 | smp_mb(); \ | ||
89 | __asm__ __volatile__( \ | 103 | __asm__ __volatile__( \ |
90 | "1: ldq_l %0,%1\n" \ | 104 | "1: ldq_l %0,%1\n" \ |
91 | " " #asm_op " %0,%3,%2\n" \ | 105 | " " #asm_op " %0,%3,%2\n" \ |
@@ -97,34 +111,77 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ | |||
97 | ".previous" \ | 111 | ".previous" \ |
98 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | 112 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ |
99 | :"Ir" (i), "m" (v->counter) : "memory"); \ | 113 | :"Ir" (i), "m" (v->counter) : "memory"); \ |
100 | smp_mb(); \ | 114 | return result; \ |
115 | } | ||
116 | |||
117 | #define ATOMIC64_FETCH_OP(op, asm_op) \ | ||
118 | static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ | ||
119 | { \ | ||
120 | long temp, result; \ | ||
121 | __asm__ __volatile__( \ | ||
122 | "1: ldq_l %2,%1\n" \ | ||
123 | " " #asm_op " %2,%3,%0\n" \ | ||
124 | " stq_c %0,%1\n" \ | ||
125 | " beq %0,2f\n" \ | ||
126 | ".subsection 2\n" \ | ||
127 | "2: br 1b\n" \ | ||
128 | ".previous" \ | ||
129 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ | ||
130 | :"Ir" (i), "m" (v->counter) : "memory"); \ | ||
101 | return result; \ | 131 | return result; \ |
102 | } | 132 | } |
103 | 133 | ||
104 | #define ATOMIC_OPS(op) \ | 134 | #define ATOMIC_OPS(op) \ |
105 | ATOMIC_OP(op, op##l) \ | 135 | ATOMIC_OP(op, op##l) \ |
106 | ATOMIC_OP_RETURN(op, op##l) \ | 136 | ATOMIC_OP_RETURN(op, op##l) \ |
137 | ATOMIC_FETCH_OP(op, op##l) \ | ||
107 | ATOMIC64_OP(op, op##q) \ | 138 | ATOMIC64_OP(op, op##q) \ |
108 | ATOMIC64_OP_RETURN(op, op##q) | 139 | ATOMIC64_OP_RETURN(op, op##q) \ |
140 | ATOMIC64_FETCH_OP(op, op##q) | ||
109 | 141 | ||
110 | ATOMIC_OPS(add) | 142 | ATOMIC_OPS(add) |
111 | ATOMIC_OPS(sub) | 143 | ATOMIC_OPS(sub) |
112 | 144 | ||
145 | #define atomic_add_return_relaxed atomic_add_return_relaxed | ||
146 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed | ||
147 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed | ||
148 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed | ||
149 | |||
150 | #define atomic64_add_return_relaxed atomic64_add_return_relaxed | ||
151 | #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed | ||
152 | #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed | ||
153 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed | ||
154 | |||
113 | #define atomic_andnot atomic_andnot | 155 | #define atomic_andnot atomic_andnot |
114 | #define atomic64_andnot atomic64_andnot | 156 | #define atomic64_andnot atomic64_andnot |
115 | 157 | ||
116 | ATOMIC_OP(and, and) | 158 | #undef ATOMIC_OPS |
117 | ATOMIC_OP(andnot, bic) | 159 | #define ATOMIC_OPS(op, asm) \ |
118 | ATOMIC_OP(or, bis) | 160 | ATOMIC_OP(op, asm) \ |
119 | ATOMIC_OP(xor, xor) | 161 | ATOMIC_FETCH_OP(op, asm) \ |
120 | ATOMIC64_OP(and, and) | 162 | ATOMIC64_OP(op, asm) \ |
121 | ATOMIC64_OP(andnot, bic) | 163 | ATOMIC64_FETCH_OP(op, asm) |
122 | ATOMIC64_OP(or, bis) | 164 | |
123 | ATOMIC64_OP(xor, xor) | 165 | ATOMIC_OPS(and, and) |
166 | ATOMIC_OPS(andnot, bic) | ||
167 | ATOMIC_OPS(or, bis) | ||
168 | ATOMIC_OPS(xor, xor) | ||
169 | |||
170 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed | ||
171 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed | ||
172 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed | ||
173 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed | ||
174 | |||
175 | #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed | ||
176 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed | ||
177 | #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed | ||
178 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed | ||
124 | 179 | ||
125 | #undef ATOMIC_OPS | 180 | #undef ATOMIC_OPS |
181 | #undef ATOMIC64_FETCH_OP | ||
126 | #undef ATOMIC64_OP_RETURN | 182 | #undef ATOMIC64_OP_RETURN |
127 | #undef ATOMIC64_OP | 183 | #undef ATOMIC64_OP |
184 | #undef ATOMIC_FETCH_OP | ||
128 | #undef ATOMIC_OP_RETURN | 185 | #undef ATOMIC_OP_RETURN |
129 | #undef ATOMIC_OP | 186 | #undef ATOMIC_OP |
130 | 187 | ||
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index dd683995bc9d..4e3c1b6b0806 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h | |||
@@ -67,6 +67,33 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
67 | return val; \ | 67 | return val; \ |
68 | } | 68 | } |
69 | 69 | ||
70 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ | ||
71 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
72 | { \ | ||
73 | unsigned int val, orig; \ | ||
74 | \ | ||
75 | /* \ | ||
76 | * Explicit full memory barrier needed before/after as \ | ||
77 | * LLOCK/SCOND thmeselves don't provide any such semantics \ | ||
78 | */ \ | ||
79 | smp_mb(); \ | ||
80 | \ | ||
81 | __asm__ __volatile__( \ | ||
82 | "1: llock %[orig], [%[ctr]] \n" \ | ||
83 | " " #asm_op " %[val], %[orig], %[i] \n" \ | ||
84 | " scond %[val], [%[ctr]] \n" \ | ||
85 | " \n" \ | ||
86 | : [val] "=&r" (val), \ | ||
87 | [orig] "=&r" (orig) \ | ||
88 | : [ctr] "r" (&v->counter), \ | ||
89 | [i] "ir" (i) \ | ||
90 | : "cc"); \ | ||
91 | \ | ||
92 | smp_mb(); \ | ||
93 | \ | ||
94 | return orig; \ | ||
95 | } | ||
96 | |||
70 | #else /* !CONFIG_ARC_HAS_LLSC */ | 97 | #else /* !CONFIG_ARC_HAS_LLSC */ |
71 | 98 | ||
72 | #ifndef CONFIG_SMP | 99 | #ifndef CONFIG_SMP |
@@ -129,25 +156,44 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
129 | return temp; \ | 156 | return temp; \ |
130 | } | 157 | } |
131 | 158 | ||
159 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ | ||
160 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
161 | { \ | ||
162 | unsigned long flags; \ | ||
163 | unsigned long orig; \ | ||
164 | \ | ||
165 | /* \ | ||
166 | * spin lock/unlock provides the needed smp_mb() before/after \ | ||
167 | */ \ | ||
168 | atomic_ops_lock(flags); \ | ||
169 | orig = v->counter; \ | ||
170 | v->counter c_op i; \ | ||
171 | atomic_ops_unlock(flags); \ | ||
172 | \ | ||
173 | return orig; \ | ||
174 | } | ||
175 | |||
132 | #endif /* !CONFIG_ARC_HAS_LLSC */ | 176 | #endif /* !CONFIG_ARC_HAS_LLSC */ |
133 | 177 | ||
134 | #define ATOMIC_OPS(op, c_op, asm_op) \ | 178 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
135 | ATOMIC_OP(op, c_op, asm_op) \ | 179 | ATOMIC_OP(op, c_op, asm_op) \ |
136 | ATOMIC_OP_RETURN(op, c_op, asm_op) | 180 | ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
181 | ATOMIC_FETCH_OP(op, c_op, asm_op) | ||
137 | 182 | ||
138 | ATOMIC_OPS(add, +=, add) | 183 | ATOMIC_OPS(add, +=, add) |
139 | ATOMIC_OPS(sub, -=, sub) | 184 | ATOMIC_OPS(sub, -=, sub) |
140 | 185 | ||
141 | #define atomic_andnot atomic_andnot | 186 | #define atomic_andnot atomic_andnot |
142 | 187 | ||
143 | ATOMIC_OP(and, &=, and) | 188 | #undef ATOMIC_OPS |
144 | ATOMIC_OP(andnot, &= ~, bic) | 189 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
145 | ATOMIC_OP(or, |=, or) | 190 | ATOMIC_OP(op, c_op, asm_op) \ |
146 | ATOMIC_OP(xor, ^=, xor) | 191 | ATOMIC_FETCH_OP(op, c_op, asm_op) |
147 | 192 | ||
148 | #undef SCOND_FAIL_RETRY_VAR_DEF | 193 | ATOMIC_OPS(and, &=, and) |
149 | #undef SCOND_FAIL_RETRY_ASM | 194 | ATOMIC_OPS(andnot, &= ~, bic) |
150 | #undef SCOND_FAIL_RETRY_VARS | 195 | ATOMIC_OPS(or, |=, or) |
196 | ATOMIC_OPS(xor, ^=, xor) | ||
151 | 197 | ||
152 | #else /* CONFIG_ARC_PLAT_EZNPS */ | 198 | #else /* CONFIG_ARC_PLAT_EZNPS */ |
153 | 199 | ||
@@ -208,22 +254,51 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
208 | return temp; \ | 254 | return temp; \ |
209 | } | 255 | } |
210 | 256 | ||
257 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ | ||
258 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
259 | { \ | ||
260 | unsigned int temp = i; \ | ||
261 | \ | ||
262 | /* Explicit full memory barrier needed before/after */ \ | ||
263 | smp_mb(); \ | ||
264 | \ | ||
265 | __asm__ __volatile__( \ | ||
266 | " mov r2, %0\n" \ | ||
267 | " mov r3, %1\n" \ | ||
268 | " .word %2\n" \ | ||
269 | " mov %0, r2" \ | ||
270 | : "+r"(temp) \ | ||
271 | : "r"(&v->counter), "i"(asm_op) \ | ||
272 | : "r2", "r3", "memory"); \ | ||
273 | \ | ||
274 | smp_mb(); \ | ||
275 | \ | ||
276 | return temp; \ | ||
277 | } | ||
278 | |||
211 | #define ATOMIC_OPS(op, c_op, asm_op) \ | 279 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
212 | ATOMIC_OP(op, c_op, asm_op) \ | 280 | ATOMIC_OP(op, c_op, asm_op) \ |
213 | ATOMIC_OP_RETURN(op, c_op, asm_op) | 281 | ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
282 | ATOMIC_FETCH_OP(op, c_op, asm_op) | ||
214 | 283 | ||
215 | ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) | 284 | ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) |
216 | #define atomic_sub(i, v) atomic_add(-(i), (v)) | 285 | #define atomic_sub(i, v) atomic_add(-(i), (v)) |
217 | #define atomic_sub_return(i, v) atomic_add_return(-(i), (v)) | 286 | #define atomic_sub_return(i, v) atomic_add_return(-(i), (v)) |
218 | 287 | ||
219 | ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) | 288 | #undef ATOMIC_OPS |
289 | #define ATOMIC_OPS(op, c_op, asm_op) \ | ||
290 | ATOMIC_OP(op, c_op, asm_op) \ | ||
291 | ATOMIC_FETCH_OP(op, c_op, asm_op) | ||
292 | |||
293 | ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) | ||
220 | #define atomic_andnot(mask, v) atomic_and(~(mask), (v)) | 294 | #define atomic_andnot(mask, v) atomic_and(~(mask), (v)) |
221 | ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) | 295 | ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) |
222 | ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) | 296 | ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) |
223 | 297 | ||
224 | #endif /* CONFIG_ARC_PLAT_EZNPS */ | 298 | #endif /* CONFIG_ARC_PLAT_EZNPS */ |
225 | 299 | ||
226 | #undef ATOMIC_OPS | 300 | #undef ATOMIC_OPS |
301 | #undef ATOMIC_FETCH_OP | ||
227 | #undef ATOMIC_OP_RETURN | 302 | #undef ATOMIC_OP_RETURN |
228 | #undef ATOMIC_OP | 303 | #undef ATOMIC_OP |
229 | 304 | ||
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 9e10c4567eb4..66d0e215a773 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h | |||
@@ -77,8 +77,36 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ | |||
77 | return result; \ | 77 | return result; \ |
78 | } | 78 | } |
79 | 79 | ||
80 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ | ||
81 | static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ | ||
82 | { \ | ||
83 | unsigned long tmp; \ | ||
84 | int result, val; \ | ||
85 | \ | ||
86 | prefetchw(&v->counter); \ | ||
87 | \ | ||
88 | __asm__ __volatile__("@ atomic_fetch_" #op "\n" \ | ||
89 | "1: ldrex %0, [%4]\n" \ | ||
90 | " " #asm_op " %1, %0, %5\n" \ | ||
91 | " strex %2, %1, [%4]\n" \ | ||
92 | " teq %2, #0\n" \ | ||
93 | " bne 1b" \ | ||
94 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ | ||
95 | : "r" (&v->counter), "Ir" (i) \ | ||
96 | : "cc"); \ | ||
97 | \ | ||
98 | return result; \ | ||
99 | } | ||
100 | |||
80 | #define atomic_add_return_relaxed atomic_add_return_relaxed | 101 | #define atomic_add_return_relaxed atomic_add_return_relaxed |
81 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed | 102 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed |
103 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed | ||
104 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed | ||
105 | |||
106 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed | ||
107 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed | ||
108 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed | ||
109 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed | ||
82 | 110 | ||
83 | static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) | 111 | static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) |
84 | { | 112 | { |
@@ -159,6 +187,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
159 | return val; \ | 187 | return val; \ |
160 | } | 188 | } |
161 | 189 | ||
190 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ | ||
191 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
192 | { \ | ||
193 | unsigned long flags; \ | ||
194 | int val; \ | ||
195 | \ | ||
196 | raw_local_irq_save(flags); \ | ||
197 | val = v->counter; \ | ||
198 | v->counter c_op i; \ | ||
199 | raw_local_irq_restore(flags); \ | ||
200 | \ | ||
201 | return val; \ | ||
202 | } | ||
203 | |||
162 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | 204 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
163 | { | 205 | { |
164 | int ret; | 206 | int ret; |
@@ -187,19 +229,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
187 | 229 | ||
188 | #define ATOMIC_OPS(op, c_op, asm_op) \ | 230 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
189 | ATOMIC_OP(op, c_op, asm_op) \ | 231 | ATOMIC_OP(op, c_op, asm_op) \ |
190 | ATOMIC_OP_RETURN(op, c_op, asm_op) | 232 | ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
233 | ATOMIC_FETCH_OP(op, c_op, asm_op) | ||
191 | 234 | ||
192 | ATOMIC_OPS(add, +=, add) | 235 | ATOMIC_OPS(add, +=, add) |
193 | ATOMIC_OPS(sub, -=, sub) | 236 | ATOMIC_OPS(sub, -=, sub) |
194 | 237 | ||
195 | #define atomic_andnot atomic_andnot | 238 | #define atomic_andnot atomic_andnot |
196 | 239 | ||
197 | ATOMIC_OP(and, &=, and) | 240 | #undef ATOMIC_OPS |
198 | ATOMIC_OP(andnot, &= ~, bic) | 241 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
199 | ATOMIC_OP(or, |=, orr) | 242 | ATOMIC_OP(op, c_op, asm_op) \ |
200 | ATOMIC_OP(xor, ^=, eor) | 243 | ATOMIC_FETCH_OP(op, c_op, asm_op) |
244 | |||
245 | ATOMIC_OPS(and, &=, and) | ||
246 | ATOMIC_OPS(andnot, &= ~, bic) | ||
247 | ATOMIC_OPS(or, |=, orr) | ||
248 | ATOMIC_OPS(xor, ^=, eor) | ||
201 | 249 | ||
202 | #undef ATOMIC_OPS | 250 | #undef ATOMIC_OPS |
251 | #undef ATOMIC_FETCH_OP | ||
203 | #undef ATOMIC_OP_RETURN | 252 | #undef ATOMIC_OP_RETURN |
204 | #undef ATOMIC_OP | 253 | #undef ATOMIC_OP |
205 | 254 | ||
@@ -317,24 +366,61 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \ | |||
317 | return result; \ | 366 | return result; \ |
318 | } | 367 | } |
319 | 368 | ||
369 | #define ATOMIC64_FETCH_OP(op, op1, op2) \ | ||
370 | static inline long long \ | ||
371 | atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \ | ||
372 | { \ | ||
373 | long long result, val; \ | ||
374 | unsigned long tmp; \ | ||
375 | \ | ||
376 | prefetchw(&v->counter); \ | ||
377 | \ | ||
378 | __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \ | ||
379 | "1: ldrexd %0, %H0, [%4]\n" \ | ||
380 | " " #op1 " %Q1, %Q0, %Q5\n" \ | ||
381 | " " #op2 " %R1, %R0, %R5\n" \ | ||
382 | " strexd %2, %1, %H1, [%4]\n" \ | ||
383 | " teq %2, #0\n" \ | ||
384 | " bne 1b" \ | ||
385 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \ | ||
386 | : "r" (&v->counter), "r" (i) \ | ||
387 | : "cc"); \ | ||
388 | \ | ||
389 | return result; \ | ||
390 | } | ||
391 | |||
320 | #define ATOMIC64_OPS(op, op1, op2) \ | 392 | #define ATOMIC64_OPS(op, op1, op2) \ |
321 | ATOMIC64_OP(op, op1, op2) \ | 393 | ATOMIC64_OP(op, op1, op2) \ |
322 | ATOMIC64_OP_RETURN(op, op1, op2) | 394 | ATOMIC64_OP_RETURN(op, op1, op2) \ |
395 | ATOMIC64_FETCH_OP(op, op1, op2) | ||
323 | 396 | ||
324 | ATOMIC64_OPS(add, adds, adc) | 397 | ATOMIC64_OPS(add, adds, adc) |
325 | ATOMIC64_OPS(sub, subs, sbc) | 398 | ATOMIC64_OPS(sub, subs, sbc) |
326 | 399 | ||
327 | #define atomic64_add_return_relaxed atomic64_add_return_relaxed | 400 | #define atomic64_add_return_relaxed atomic64_add_return_relaxed |
328 | #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed | 401 | #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed |
402 | #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed | ||
403 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed | ||
404 | |||
405 | #undef ATOMIC64_OPS | ||
406 | #define ATOMIC64_OPS(op, op1, op2) \ | ||
407 | ATOMIC64_OP(op, op1, op2) \ | ||
408 | ATOMIC64_FETCH_OP(op, op1, op2) | ||
329 | 409 | ||
330 | #define atomic64_andnot atomic64_andnot | 410 | #define atomic64_andnot atomic64_andnot |
331 | 411 | ||
332 | ATOMIC64_OP(and, and, and) | 412 | ATOMIC64_OPS(and, and, and) |
333 | ATOMIC64_OP(andnot, bic, bic) | 413 | ATOMIC64_OPS(andnot, bic, bic) |
334 | ATOMIC64_OP(or, orr, orr) | 414 | ATOMIC64_OPS(or, orr, orr) |
335 | ATOMIC64_OP(xor, eor, eor) | 415 | ATOMIC64_OPS(xor, eor, eor) |
416 | |||
417 | #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed | ||
418 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed | ||
419 | #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed | ||
420 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed | ||
336 | 421 | ||
337 | #undef ATOMIC64_OPS | 422 | #undef ATOMIC64_OPS |
423 | #undef ATOMIC64_FETCH_OP | ||
338 | #undef ATOMIC64_OP_RETURN | 424 | #undef ATOMIC64_OP_RETURN |
339 | #undef ATOMIC64_OP | 425 | #undef ATOMIC64_OP |
340 | 426 | ||
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index f3a3586a421c..c0235e0ff849 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h | |||
@@ -76,6 +76,36 @@ | |||
76 | #define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) | 76 | #define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) |
77 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | 77 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) |
78 | 78 | ||
79 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed | ||
80 | #define atomic_fetch_add_acquire atomic_fetch_add_acquire | ||
81 | #define atomic_fetch_add_release atomic_fetch_add_release | ||
82 | #define atomic_fetch_add atomic_fetch_add | ||
83 | |||
84 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed | ||
85 | #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire | ||
86 | #define atomic_fetch_sub_release atomic_fetch_sub_release | ||
87 | #define atomic_fetch_sub atomic_fetch_sub | ||
88 | |||
89 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed | ||
90 | #define atomic_fetch_and_acquire atomic_fetch_and_acquire | ||
91 | #define atomic_fetch_and_release atomic_fetch_and_release | ||
92 | #define atomic_fetch_and atomic_fetch_and | ||
93 | |||
94 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed | ||
95 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire | ||
96 | #define atomic_fetch_andnot_release atomic_fetch_andnot_release | ||
97 | #define atomic_fetch_andnot atomic_fetch_andnot | ||
98 | |||
99 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed | ||
100 | #define atomic_fetch_or_acquire atomic_fetch_or_acquire | ||
101 | #define atomic_fetch_or_release atomic_fetch_or_release | ||
102 | #define atomic_fetch_or atomic_fetch_or | ||
103 | |||
104 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed | ||
105 | #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire | ||
106 | #define atomic_fetch_xor_release atomic_fetch_xor_release | ||
107 | #define atomic_fetch_xor atomic_fetch_xor | ||
108 | |||
79 | #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) | 109 | #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) |
80 | #define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new)) | 110 | #define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new)) |
81 | #define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new)) | 111 | #define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new)) |
@@ -125,6 +155,36 @@ | |||
125 | #define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) | 155 | #define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) |
126 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | 156 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) |
127 | 157 | ||
158 | #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed | ||
159 | #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire | ||
160 | #define atomic64_fetch_add_release atomic64_fetch_add_release | ||
161 | #define atomic64_fetch_add atomic64_fetch_add | ||
162 | |||
163 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed | ||
164 | #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire | ||
165 | #define atomic64_fetch_sub_release atomic64_fetch_sub_release | ||
166 | #define atomic64_fetch_sub atomic64_fetch_sub | ||
167 | |||
168 | #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed | ||
169 | #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire | ||
170 | #define atomic64_fetch_and_release atomic64_fetch_and_release | ||
171 | #define atomic64_fetch_and atomic64_fetch_and | ||
172 | |||
173 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed | ||
174 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire | ||
175 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release | ||
176 | #define atomic64_fetch_andnot atomic64_fetch_andnot | ||
177 | |||
178 | #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed | ||
179 | #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire | ||
180 | #define atomic64_fetch_or_release atomic64_fetch_or_release | ||
181 | #define atomic64_fetch_or atomic64_fetch_or | ||
182 | |||
183 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed | ||
184 | #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire | ||
185 | #define atomic64_fetch_xor_release atomic64_fetch_xor_release | ||
186 | #define atomic64_fetch_xor atomic64_fetch_xor | ||
187 | |||
128 | #define atomic64_xchg_relaxed atomic_xchg_relaxed | 188 | #define atomic64_xchg_relaxed atomic_xchg_relaxed |
129 | #define atomic64_xchg_acquire atomic_xchg_acquire | 189 | #define atomic64_xchg_acquire atomic_xchg_acquire |
130 | #define atomic64_xchg_release atomic_xchg_release | 190 | #define atomic64_xchg_release atomic_xchg_release |
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index f61c84f6ba02..f819fdcff1ac 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h | |||
@@ -77,26 +77,57 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ | |||
77 | } \ | 77 | } \ |
78 | __LL_SC_EXPORT(atomic_##op##_return##name); | 78 | __LL_SC_EXPORT(atomic_##op##_return##name); |
79 | 79 | ||
80 | #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ | ||
81 | __LL_SC_INLINE int \ | ||
82 | __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ | ||
83 | { \ | ||
84 | unsigned long tmp; \ | ||
85 | int val, result; \ | ||
86 | \ | ||
87 | asm volatile("// atomic_fetch_" #op #name "\n" \ | ||
88 | " prfm pstl1strm, %3\n" \ | ||
89 | "1: ld" #acq "xr %w0, %3\n" \ | ||
90 | " " #asm_op " %w1, %w0, %w4\n" \ | ||
91 | " st" #rel "xr %w2, %w1, %3\n" \ | ||
92 | " cbnz %w2, 1b\n" \ | ||
93 | " " #mb \ | ||
94 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ | ||
95 | : "Ir" (i) \ | ||
96 | : cl); \ | ||
97 | \ | ||
98 | return result; \ | ||
99 | } \ | ||
100 | __LL_SC_EXPORT(atomic_fetch_##op##name); | ||
101 | |||
80 | #define ATOMIC_OPS(...) \ | 102 | #define ATOMIC_OPS(...) \ |
81 | ATOMIC_OP(__VA_ARGS__) \ | 103 | ATOMIC_OP(__VA_ARGS__) \ |
82 | ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__) | 104 | ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\ |
83 | |||
84 | #define ATOMIC_OPS_RLX(...) \ | ||
85 | ATOMIC_OPS(__VA_ARGS__) \ | ||
86 | ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\ | 105 | ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\ |
87 | ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\ | 106 | ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\ |
88 | ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__) | 107 | ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\ |
108 | ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\ | ||
109 | ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\ | ||
110 | ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ | ||
111 | ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) | ||
89 | 112 | ||
90 | ATOMIC_OPS_RLX(add, add) | 113 | ATOMIC_OPS(add, add) |
91 | ATOMIC_OPS_RLX(sub, sub) | 114 | ATOMIC_OPS(sub, sub) |
115 | |||
116 | #undef ATOMIC_OPS | ||
117 | #define ATOMIC_OPS(...) \ | ||
118 | ATOMIC_OP(__VA_ARGS__) \ | ||
119 | ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\ | ||
120 | ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\ | ||
121 | ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ | ||
122 | ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) | ||
92 | 123 | ||
93 | ATOMIC_OP(and, and) | 124 | ATOMIC_OPS(and, and) |
94 | ATOMIC_OP(andnot, bic) | 125 | ATOMIC_OPS(andnot, bic) |
95 | ATOMIC_OP(or, orr) | 126 | ATOMIC_OPS(or, orr) |
96 | ATOMIC_OP(xor, eor) | 127 | ATOMIC_OPS(xor, eor) |
97 | 128 | ||
98 | #undef ATOMIC_OPS_RLX | ||
99 | #undef ATOMIC_OPS | 129 | #undef ATOMIC_OPS |
130 | #undef ATOMIC_FETCH_OP | ||
100 | #undef ATOMIC_OP_RETURN | 131 | #undef ATOMIC_OP_RETURN |
101 | #undef ATOMIC_OP | 132 | #undef ATOMIC_OP |
102 | 133 | ||
@@ -140,26 +171,57 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ | |||
140 | } \ | 171 | } \ |
141 | __LL_SC_EXPORT(atomic64_##op##_return##name); | 172 | __LL_SC_EXPORT(atomic64_##op##_return##name); |
142 | 173 | ||
174 | #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ | ||
175 | __LL_SC_INLINE long \ | ||
176 | __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ | ||
177 | { \ | ||
178 | long result, val; \ | ||
179 | unsigned long tmp; \ | ||
180 | \ | ||
181 | asm volatile("// atomic64_fetch_" #op #name "\n" \ | ||
182 | " prfm pstl1strm, %3\n" \ | ||
183 | "1: ld" #acq "xr %0, %3\n" \ | ||
184 | " " #asm_op " %1, %0, %4\n" \ | ||
185 | " st" #rel "xr %w2, %1, %3\n" \ | ||
186 | " cbnz %w2, 1b\n" \ | ||
187 | " " #mb \ | ||
188 | : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ | ||
189 | : "Ir" (i) \ | ||
190 | : cl); \ | ||
191 | \ | ||
192 | return result; \ | ||
193 | } \ | ||
194 | __LL_SC_EXPORT(atomic64_fetch_##op##name); | ||
195 | |||
143 | #define ATOMIC64_OPS(...) \ | 196 | #define ATOMIC64_OPS(...) \ |
144 | ATOMIC64_OP(__VA_ARGS__) \ | 197 | ATOMIC64_OP(__VA_ARGS__) \ |
145 | ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) | 198 | ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \ |
146 | |||
147 | #define ATOMIC64_OPS_RLX(...) \ | ||
148 | ATOMIC64_OPS(__VA_ARGS__) \ | ||
149 | ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \ | 199 | ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \ |
150 | ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \ | 200 | ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \ |
151 | ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) | 201 | ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \ |
202 | ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \ | ||
203 | ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \ | ||
204 | ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ | ||
205 | ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) | ||
152 | 206 | ||
153 | ATOMIC64_OPS_RLX(add, add) | 207 | ATOMIC64_OPS(add, add) |
154 | ATOMIC64_OPS_RLX(sub, sub) | 208 | ATOMIC64_OPS(sub, sub) |
209 | |||
210 | #undef ATOMIC64_OPS | ||
211 | #define ATOMIC64_OPS(...) \ | ||
212 | ATOMIC64_OP(__VA_ARGS__) \ | ||
213 | ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \ | ||
214 | ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \ | ||
215 | ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ | ||
216 | ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) | ||
155 | 217 | ||
156 | ATOMIC64_OP(and, and) | 218 | ATOMIC64_OPS(and, and) |
157 | ATOMIC64_OP(andnot, bic) | 219 | ATOMIC64_OPS(andnot, bic) |
158 | ATOMIC64_OP(or, orr) | 220 | ATOMIC64_OPS(or, orr) |
159 | ATOMIC64_OP(xor, eor) | 221 | ATOMIC64_OPS(xor, eor) |
160 | 222 | ||
161 | #undef ATOMIC64_OPS_RLX | ||
162 | #undef ATOMIC64_OPS | 223 | #undef ATOMIC64_OPS |
224 | #undef ATOMIC64_FETCH_OP | ||
163 | #undef ATOMIC64_OP_RETURN | 225 | #undef ATOMIC64_OP_RETURN |
164 | #undef ATOMIC64_OP | 226 | #undef ATOMIC64_OP |
165 | 227 | ||
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 39c1d340fec5..b5890be8f257 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h | |||
@@ -26,54 +26,57 @@ | |||
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) | 28 | #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) |
29 | 29 | #define ATOMIC_OP(op, asm_op) \ | |
30 | static inline void atomic_andnot(int i, atomic_t *v) | 30 | static inline void atomic_##op(int i, atomic_t *v) \ |
31 | { | 31 | { \ |
32 | register int w0 asm ("w0") = i; | 32 | register int w0 asm ("w0") = i; \ |
33 | register atomic_t *x1 asm ("x1") = v; | 33 | register atomic_t *x1 asm ("x1") = v; \ |
34 | 34 | \ | |
35 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot), | 35 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \ |
36 | " stclr %w[i], %[v]\n") | 36 | " " #asm_op " %w[i], %[v]\n") \ |
37 | : [i] "+r" (w0), [v] "+Q" (v->counter) | 37 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ |
38 | : "r" (x1) | 38 | : "r" (x1) \ |
39 | : __LL_SC_CLOBBERS); | 39 | : __LL_SC_CLOBBERS); \ |
40 | } | 40 | } |
41 | 41 | ||
42 | static inline void atomic_or(int i, atomic_t *v) | 42 | ATOMIC_OP(andnot, stclr) |
43 | { | 43 | ATOMIC_OP(or, stset) |
44 | register int w0 asm ("w0") = i; | 44 | ATOMIC_OP(xor, steor) |
45 | register atomic_t *x1 asm ("x1") = v; | 45 | ATOMIC_OP(add, stadd) |
46 | 46 | ||
47 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or), | 47 | #undef ATOMIC_OP |
48 | " stset %w[i], %[v]\n") | ||
49 | : [i] "+r" (w0), [v] "+Q" (v->counter) | ||
50 | : "r" (x1) | ||
51 | : __LL_SC_CLOBBERS); | ||
52 | } | ||
53 | 48 | ||
54 | static inline void atomic_xor(int i, atomic_t *v) | 49 | #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ |
55 | { | 50 | static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ |
56 | register int w0 asm ("w0") = i; | 51 | { \ |
57 | register atomic_t *x1 asm ("x1") = v; | 52 | register int w0 asm ("w0") = i; \ |
58 | 53 | register atomic_t *x1 asm ("x1") = v; \ | |
59 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor), | 54 | \ |
60 | " steor %w[i], %[v]\n") | 55 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ |
61 | : [i] "+r" (w0), [v] "+Q" (v->counter) | 56 | /* LL/SC */ \ |
62 | : "r" (x1) | 57 | __LL_SC_ATOMIC(fetch_##op##name), \ |
63 | : __LL_SC_CLOBBERS); | 58 | /* LSE atomics */ \ |
59 | " " #asm_op #mb " %w[i], %w[i], %[v]") \ | ||
60 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | ||
61 | : "r" (x1) \ | ||
62 | : __LL_SC_CLOBBERS, ##cl); \ | ||
63 | \ | ||
64 | return w0; \ | ||
64 | } | 65 | } |
65 | 66 | ||
66 | static inline void atomic_add(int i, atomic_t *v) | 67 | #define ATOMIC_FETCH_OPS(op, asm_op) \ |
67 | { | 68 | ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \ |
68 | register int w0 asm ("w0") = i; | 69 | ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \ |
69 | register atomic_t *x1 asm ("x1") = v; | 70 | ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \ |
71 | ATOMIC_FETCH_OP( , al, op, asm_op, "memory") | ||
70 | 72 | ||
71 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add), | 73 | ATOMIC_FETCH_OPS(andnot, ldclr) |
72 | " stadd %w[i], %[v]\n") | 74 | ATOMIC_FETCH_OPS(or, ldset) |
73 | : [i] "+r" (w0), [v] "+Q" (v->counter) | 75 | ATOMIC_FETCH_OPS(xor, ldeor) |
74 | : "r" (x1) | 76 | ATOMIC_FETCH_OPS(add, ldadd) |
75 | : __LL_SC_CLOBBERS); | 77 | |
76 | } | 78 | #undef ATOMIC_FETCH_OP |
79 | #undef ATOMIC_FETCH_OPS | ||
77 | 80 | ||
78 | #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ | 81 | #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ |
79 | static inline int atomic_add_return##name(int i, atomic_t *v) \ | 82 | static inline int atomic_add_return##name(int i, atomic_t *v) \ |
@@ -119,6 +122,33 @@ static inline void atomic_and(int i, atomic_t *v) | |||
119 | : __LL_SC_CLOBBERS); | 122 | : __LL_SC_CLOBBERS); |
120 | } | 123 | } |
121 | 124 | ||
125 | #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ | ||
126 | static inline int atomic_fetch_and##name(int i, atomic_t *v) \ | ||
127 | { \ | ||
128 | register int w0 asm ("w0") = i; \ | ||
129 | register atomic_t *x1 asm ("x1") = v; \ | ||
130 | \ | ||
131 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | ||
132 | /* LL/SC */ \ | ||
133 | " nop\n" \ | ||
134 | __LL_SC_ATOMIC(fetch_and##name), \ | ||
135 | /* LSE atomics */ \ | ||
136 | " mvn %w[i], %w[i]\n" \ | ||
137 | " ldclr" #mb " %w[i], %w[i], %[v]") \ | ||
138 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | ||
139 | : "r" (x1) \ | ||
140 | : __LL_SC_CLOBBERS, ##cl); \ | ||
141 | \ | ||
142 | return w0; \ | ||
143 | } | ||
144 | |||
145 | ATOMIC_FETCH_OP_AND(_relaxed, ) | ||
146 | ATOMIC_FETCH_OP_AND(_acquire, a, "memory") | ||
147 | ATOMIC_FETCH_OP_AND(_release, l, "memory") | ||
148 | ATOMIC_FETCH_OP_AND( , al, "memory") | ||
149 | |||
150 | #undef ATOMIC_FETCH_OP_AND | ||
151 | |||
122 | static inline void atomic_sub(int i, atomic_t *v) | 152 | static inline void atomic_sub(int i, atomic_t *v) |
123 | { | 153 | { |
124 | register int w0 asm ("w0") = i; | 154 | register int w0 asm ("w0") = i; |
@@ -164,57 +194,87 @@ ATOMIC_OP_SUB_RETURN(_release, l, "memory") | |||
164 | ATOMIC_OP_SUB_RETURN( , al, "memory") | 194 | ATOMIC_OP_SUB_RETURN( , al, "memory") |
165 | 195 | ||
166 | #undef ATOMIC_OP_SUB_RETURN | 196 | #undef ATOMIC_OP_SUB_RETURN |
167 | #undef __LL_SC_ATOMIC | ||
168 | |||
169 | #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) | ||
170 | |||
171 | static inline void atomic64_andnot(long i, atomic64_t *v) | ||
172 | { | ||
173 | register long x0 asm ("x0") = i; | ||
174 | register atomic64_t *x1 asm ("x1") = v; | ||
175 | 197 | ||
176 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot), | 198 | #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ |
177 | " stclr %[i], %[v]\n") | 199 | static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ |
178 | : [i] "+r" (x0), [v] "+Q" (v->counter) | 200 | { \ |
179 | : "r" (x1) | 201 | register int w0 asm ("w0") = i; \ |
180 | : __LL_SC_CLOBBERS); | 202 | register atomic_t *x1 asm ("x1") = v; \ |
203 | \ | ||
204 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | ||
205 | /* LL/SC */ \ | ||
206 | " nop\n" \ | ||
207 | __LL_SC_ATOMIC(fetch_sub##name), \ | ||
208 | /* LSE atomics */ \ | ||
209 | " neg %w[i], %w[i]\n" \ | ||
210 | " ldadd" #mb " %w[i], %w[i], %[v]") \ | ||
211 | : [i] "+r" (w0), [v] "+Q" (v->counter) \ | ||
212 | : "r" (x1) \ | ||
213 | : __LL_SC_CLOBBERS, ##cl); \ | ||
214 | \ | ||
215 | return w0; \ | ||
181 | } | 216 | } |
182 | 217 | ||
183 | static inline void atomic64_or(long i, atomic64_t *v) | 218 | ATOMIC_FETCH_OP_SUB(_relaxed, ) |
184 | { | 219 | ATOMIC_FETCH_OP_SUB(_acquire, a, "memory") |
185 | register long x0 asm ("x0") = i; | 220 | ATOMIC_FETCH_OP_SUB(_release, l, "memory") |
186 | register atomic64_t *x1 asm ("x1") = v; | 221 | ATOMIC_FETCH_OP_SUB( , al, "memory") |
187 | 222 | ||
188 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or), | 223 | #undef ATOMIC_FETCH_OP_SUB |
189 | " stset %[i], %[v]\n") | 224 | #undef __LL_SC_ATOMIC |
190 | : [i] "+r" (x0), [v] "+Q" (v->counter) | 225 | |
191 | : "r" (x1) | 226 | #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) |
192 | : __LL_SC_CLOBBERS); | 227 | #define ATOMIC64_OP(op, asm_op) \ |
228 | static inline void atomic64_##op(long i, atomic64_t *v) \ | ||
229 | { \ | ||
230 | register long x0 asm ("x0") = i; \ | ||
231 | register atomic64_t *x1 asm ("x1") = v; \ | ||
232 | \ | ||
233 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \ | ||
234 | " " #asm_op " %[i], %[v]\n") \ | ||
235 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | ||
236 | : "r" (x1) \ | ||
237 | : __LL_SC_CLOBBERS); \ | ||
193 | } | 238 | } |
194 | 239 | ||
195 | static inline void atomic64_xor(long i, atomic64_t *v) | 240 | ATOMIC64_OP(andnot, stclr) |
196 | { | 241 | ATOMIC64_OP(or, stset) |
197 | register long x0 asm ("x0") = i; | 242 | ATOMIC64_OP(xor, steor) |
198 | register atomic64_t *x1 asm ("x1") = v; | 243 | ATOMIC64_OP(add, stadd) |
199 | 244 | ||
200 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor), | 245 | #undef ATOMIC64_OP |
201 | " steor %[i], %[v]\n") | 246 | |
202 | : [i] "+r" (x0), [v] "+Q" (v->counter) | 247 | #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ |
203 | : "r" (x1) | 248 | static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \ |
204 | : __LL_SC_CLOBBERS); | 249 | { \ |
250 | register long x0 asm ("x0") = i; \ | ||
251 | register atomic64_t *x1 asm ("x1") = v; \ | ||
252 | \ | ||
253 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | ||
254 | /* LL/SC */ \ | ||
255 | __LL_SC_ATOMIC64(fetch_##op##name), \ | ||
256 | /* LSE atomics */ \ | ||
257 | " " #asm_op #mb " %[i], %[i], %[v]") \ | ||
258 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | ||
259 | : "r" (x1) \ | ||
260 | : __LL_SC_CLOBBERS, ##cl); \ | ||
261 | \ | ||
262 | return x0; \ | ||
205 | } | 263 | } |
206 | 264 | ||
207 | static inline void atomic64_add(long i, atomic64_t *v) | 265 | #define ATOMIC64_FETCH_OPS(op, asm_op) \ |
208 | { | 266 | ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \ |
209 | register long x0 asm ("x0") = i; | 267 | ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \ |
210 | register atomic64_t *x1 asm ("x1") = v; | 268 | ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \ |
269 | ATOMIC64_FETCH_OP( , al, op, asm_op, "memory") | ||
211 | 270 | ||
212 | asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add), | 271 | ATOMIC64_FETCH_OPS(andnot, ldclr) |
213 | " stadd %[i], %[v]\n") | 272 | ATOMIC64_FETCH_OPS(or, ldset) |
214 | : [i] "+r" (x0), [v] "+Q" (v->counter) | 273 | ATOMIC64_FETCH_OPS(xor, ldeor) |
215 | : "r" (x1) | 274 | ATOMIC64_FETCH_OPS(add, ldadd) |
216 | : __LL_SC_CLOBBERS); | 275 | |
217 | } | 276 | #undef ATOMIC64_FETCH_OP |
277 | #undef ATOMIC64_FETCH_OPS | ||
218 | 278 | ||
219 | #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ | 279 | #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ |
220 | static inline long atomic64_add_return##name(long i, atomic64_t *v) \ | 280 | static inline long atomic64_add_return##name(long i, atomic64_t *v) \ |
@@ -260,6 +320,33 @@ static inline void atomic64_and(long i, atomic64_t *v) | |||
260 | : __LL_SC_CLOBBERS); | 320 | : __LL_SC_CLOBBERS); |
261 | } | 321 | } |
262 | 322 | ||
323 | #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ | ||
324 | static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ | ||
325 | { \ | ||
326 | register long x0 asm ("w0") = i; \ | ||
327 | register atomic64_t *x1 asm ("x1") = v; \ | ||
328 | \ | ||
329 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | ||
330 | /* LL/SC */ \ | ||
331 | " nop\n" \ | ||
332 | __LL_SC_ATOMIC64(fetch_and##name), \ | ||
333 | /* LSE atomics */ \ | ||
334 | " mvn %[i], %[i]\n" \ | ||
335 | " ldclr" #mb " %[i], %[i], %[v]") \ | ||
336 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | ||
337 | : "r" (x1) \ | ||
338 | : __LL_SC_CLOBBERS, ##cl); \ | ||
339 | \ | ||
340 | return x0; \ | ||
341 | } | ||
342 | |||
343 | ATOMIC64_FETCH_OP_AND(_relaxed, ) | ||
344 | ATOMIC64_FETCH_OP_AND(_acquire, a, "memory") | ||
345 | ATOMIC64_FETCH_OP_AND(_release, l, "memory") | ||
346 | ATOMIC64_FETCH_OP_AND( , al, "memory") | ||
347 | |||
348 | #undef ATOMIC64_FETCH_OP_AND | ||
349 | |||
263 | static inline void atomic64_sub(long i, atomic64_t *v) | 350 | static inline void atomic64_sub(long i, atomic64_t *v) |
264 | { | 351 | { |
265 | register long x0 asm ("x0") = i; | 352 | register long x0 asm ("x0") = i; |
@@ -306,6 +393,33 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory") | |||
306 | 393 | ||
307 | #undef ATOMIC64_OP_SUB_RETURN | 394 | #undef ATOMIC64_OP_SUB_RETURN |
308 | 395 | ||
396 | #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ | ||
397 | static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ | ||
398 | { \ | ||
399 | register long x0 asm ("w0") = i; \ | ||
400 | register atomic64_t *x1 asm ("x1") = v; \ | ||
401 | \ | ||
402 | asm volatile(ARM64_LSE_ATOMIC_INSN( \ | ||
403 | /* LL/SC */ \ | ||
404 | " nop\n" \ | ||
405 | __LL_SC_ATOMIC64(fetch_sub##name), \ | ||
406 | /* LSE atomics */ \ | ||
407 | " neg %[i], %[i]\n" \ | ||
408 | " ldadd" #mb " %[i], %[i], %[v]") \ | ||
409 | : [i] "+r" (x0), [v] "+Q" (v->counter) \ | ||
410 | : "r" (x1) \ | ||
411 | : __LL_SC_CLOBBERS, ##cl); \ | ||
412 | \ | ||
413 | return x0; \ | ||
414 | } | ||
415 | |||
416 | ATOMIC64_FETCH_OP_SUB(_relaxed, ) | ||
417 | ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory") | ||
418 | ATOMIC64_FETCH_OP_SUB(_release, l, "memory") | ||
419 | ATOMIC64_FETCH_OP_SUB( , al, "memory") | ||
420 | |||
421 | #undef ATOMIC64_FETCH_OP_SUB | ||
422 | |||
309 | static inline long atomic64_dec_if_positive(atomic64_t *v) | 423 | static inline long atomic64_dec_if_positive(atomic64_t *v) |
310 | { | 424 | { |
311 | register long x0 asm ("x0") = (long)v; | 425 | register long x0 asm ("x0") = (long)v; |
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index d74fd8ce980a..3d5ce38a6f0b 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h | |||
@@ -41,21 +41,49 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \ | |||
41 | return result; \ | 41 | return result; \ |
42 | } | 42 | } |
43 | 43 | ||
44 | #define ATOMIC_FETCH_OP(op, asm_op, asm_con) \ | ||
45 | static inline int __atomic_fetch_##op(int i, atomic_t *v) \ | ||
46 | { \ | ||
47 | int result, val; \ | ||
48 | \ | ||
49 | asm volatile( \ | ||
50 | "/* atomic_fetch_" #op " */\n" \ | ||
51 | "1: ssrf 5\n" \ | ||
52 | " ld.w %0, %3\n" \ | ||
53 | " mov %1, %0\n" \ | ||
54 | " " #asm_op " %1, %4\n" \ | ||
55 | " stcond %2, %1\n" \ | ||
56 | " brne 1b" \ | ||
57 | : "=&r" (result), "=&r" (val), "=o" (v->counter) \ | ||
58 | : "m" (v->counter), #asm_con (i) \ | ||
59 | : "cc"); \ | ||
60 | \ | ||
61 | return result; \ | ||
62 | } | ||
63 | |||
44 | ATOMIC_OP_RETURN(sub, sub, rKs21) | 64 | ATOMIC_OP_RETURN(sub, sub, rKs21) |
45 | ATOMIC_OP_RETURN(add, add, r) | 65 | ATOMIC_OP_RETURN(add, add, r) |
66 | ATOMIC_FETCH_OP (sub, sub, rKs21) | ||
67 | ATOMIC_FETCH_OP (add, add, r) | ||
46 | 68 | ||
47 | #define ATOMIC_OP(op, asm_op) \ | 69 | #define ATOMIC_OPS(op, asm_op) \ |
48 | ATOMIC_OP_RETURN(op, asm_op, r) \ | 70 | ATOMIC_OP_RETURN(op, asm_op, r) \ |
49 | static inline void atomic_##op(int i, atomic_t *v) \ | 71 | static inline void atomic_##op(int i, atomic_t *v) \ |
50 | { \ | 72 | { \ |
51 | (void)__atomic_##op##_return(i, v); \ | 73 | (void)__atomic_##op##_return(i, v); \ |
74 | } \ | ||
75 | ATOMIC_FETCH_OP(op, asm_op, r) \ | ||
76 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
77 | { \ | ||
78 | return __atomic_fetch_##op(i, v); \ | ||
52 | } | 79 | } |
53 | 80 | ||
54 | ATOMIC_OP(and, and) | 81 | ATOMIC_OPS(and, and) |
55 | ATOMIC_OP(or, or) | 82 | ATOMIC_OPS(or, or) |
56 | ATOMIC_OP(xor, eor) | 83 | ATOMIC_OPS(xor, eor) |
57 | 84 | ||
58 | #undef ATOMIC_OP | 85 | #undef ATOMIC_OPS |
86 | #undef ATOMIC_FETCH_OP | ||
59 | #undef ATOMIC_OP_RETURN | 87 | #undef ATOMIC_OP_RETURN |
60 | 88 | ||
61 | /* | 89 | /* |
@@ -87,6 +115,14 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
87 | return __atomic_add_return(i, v); | 115 | return __atomic_add_return(i, v); |
88 | } | 116 | } |
89 | 117 | ||
118 | static inline int atomic_fetch_add(int i, atomic_t *v) | ||
119 | { | ||
120 | if (IS_21BIT_CONST(i)) | ||
121 | return __atomic_fetch_sub(-i, v); | ||
122 | |||
123 | return __atomic_fetch_add(i, v); | ||
124 | } | ||
125 | |||
90 | /* | 126 | /* |
91 | * atomic_sub_return - subtract the atomic variable | 127 | * atomic_sub_return - subtract the atomic variable |
92 | * @i: integer value to subtract | 128 | * @i: integer value to subtract |
@@ -102,6 +138,14 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
102 | return __atomic_add_return(-i, v); | 138 | return __atomic_add_return(-i, v); |
103 | } | 139 | } |
104 | 140 | ||
141 | static inline int atomic_fetch_sub(int i, atomic_t *v) | ||
142 | { | ||
143 | if (IS_21BIT_CONST(i)) | ||
144 | return __atomic_fetch_sub(i, v); | ||
145 | |||
146 | return __atomic_fetch_add(-i, v); | ||
147 | } | ||
148 | |||
105 | /* | 149 | /* |
106 | * __atomic_add_unless - add unless the number is a given value | 150 | * __atomic_add_unless - add unless the number is a given value |
107 | * @v: pointer of type atomic_t | 151 | * @v: pointer of type atomic_t |
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index 1c1c42330c99..63c7deceeeb6 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); | 18 | asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); |
19 | asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value); | 19 | asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value); |
20 | asmlinkage int __raw_atomic_xadd_asm(volatile int *ptr, int value); | ||
20 | 21 | ||
21 | asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value); | 22 | asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value); |
22 | asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value); | 23 | asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value); |
@@ -28,10 +29,17 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); | |||
28 | #define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i) | 29 | #define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i) |
29 | #define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i)) | 30 | #define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i)) |
30 | 31 | ||
32 | #define atomic_fetch_add(i, v) __raw_atomic_xadd_asm(&(v)->counter, i) | ||
33 | #define atomic_fetch_sub(i, v) __raw_atomic_xadd_asm(&(v)->counter, -(i)) | ||
34 | |||
31 | #define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i) | 35 | #define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i) |
32 | #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) | 36 | #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) |
33 | #define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) | 37 | #define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) |
34 | 38 | ||
39 | #define atomic_fetch_or(i, v) __raw_atomic_or_asm(&(v)->counter, i) | ||
40 | #define atomic_fetch_and(i, v) __raw_atomic_and_asm(&(v)->counter, i) | ||
41 | #define atomic_fetch_xor(i, v) __raw_atomic_xor_asm(&(v)->counter, i) | ||
42 | |||
35 | #endif | 43 | #endif |
36 | 44 | ||
37 | #include <asm-generic/atomic.h> | 45 | #include <asm-generic/atomic.h> |
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c index a401c27b69b4..68096e8f787f 100644 --- a/arch/blackfin/kernel/bfin_ksyms.c +++ b/arch/blackfin/kernel/bfin_ksyms.c | |||
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(insl_16); | |||
84 | 84 | ||
85 | #ifdef CONFIG_SMP | 85 | #ifdef CONFIG_SMP |
86 | EXPORT_SYMBOL(__raw_atomic_add_asm); | 86 | EXPORT_SYMBOL(__raw_atomic_add_asm); |
87 | EXPORT_SYMBOL(__raw_atomic_xadd_asm); | ||
87 | EXPORT_SYMBOL(__raw_atomic_and_asm); | 88 | EXPORT_SYMBOL(__raw_atomic_and_asm); |
88 | EXPORT_SYMBOL(__raw_atomic_or_asm); | 89 | EXPORT_SYMBOL(__raw_atomic_or_asm); |
89 | EXPORT_SYMBOL(__raw_atomic_xor_asm); | 90 | EXPORT_SYMBOL(__raw_atomic_xor_asm); |
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S index 26fccb5568b9..1e2989c5d6b2 100644 --- a/arch/blackfin/mach-bf561/atomic.S +++ b/arch/blackfin/mach-bf561/atomic.S | |||
@@ -607,6 +607,28 @@ ENDPROC(___raw_atomic_add_asm) | |||
607 | 607 | ||
608 | /* | 608 | /* |
609 | * r0 = ptr | 609 | * r0 = ptr |
610 | * r1 = value | ||
611 | * | ||
612 | * ADD a signed value to a 32bit word and return the old value atomically. | ||
613 | * Clobbers: r3:0, p1:0 | ||
614 | */ | ||
615 | ENTRY(___raw_atomic_xadd_asm) | ||
616 | p1 = r0; | ||
617 | r3 = r1; | ||
618 | [--sp] = rets; | ||
619 | call _get_core_lock; | ||
620 | r3 = [p1]; | ||
621 | r2 = r3 + r2; | ||
622 | [p1] = r2; | ||
623 | r1 = p1; | ||
624 | call _put_core_lock; | ||
625 | r0 = r3; | ||
626 | rets = [sp++]; | ||
627 | rts; | ||
628 | ENDPROC(___raw_atomic_add_asm) | ||
629 | |||
630 | /* | ||
631 | * r0 = ptr | ||
610 | * r1 = mask | 632 | * r1 = mask |
611 | * | 633 | * |
612 | * AND the mask bits from a 32bit word and return the old 32bit value | 634 | * AND the mask bits from a 32bit word and return the old 32bit value |
@@ -618,10 +640,9 @@ ENTRY(___raw_atomic_and_asm) | |||
618 | r3 = r1; | 640 | r3 = r1; |
619 | [--sp] = rets; | 641 | [--sp] = rets; |
620 | call _get_core_lock; | 642 | call _get_core_lock; |
621 | r2 = [p1]; | 643 | r3 = [p1]; |
622 | r3 = r2 & r3; | 644 | r2 = r2 & r3; |
623 | [p1] = r3; | 645 | [p1] = r2; |
624 | r3 = r2; | ||
625 | r1 = p1; | 646 | r1 = p1; |
626 | call _put_core_lock; | 647 | call _put_core_lock; |
627 | r0 = r3; | 648 | r0 = r3; |
@@ -642,10 +663,9 @@ ENTRY(___raw_atomic_or_asm) | |||
642 | r3 = r1; | 663 | r3 = r1; |
643 | [--sp] = rets; | 664 | [--sp] = rets; |
644 | call _get_core_lock; | 665 | call _get_core_lock; |
645 | r2 = [p1]; | 666 | r3 = [p1]; |
646 | r3 = r2 | r3; | 667 | r2 = r2 | r3; |
647 | [p1] = r3; | 668 | [p1] = r2; |
648 | r3 = r2; | ||
649 | r1 = p1; | 669 | r1 = p1; |
650 | call _put_core_lock; | 670 | call _put_core_lock; |
651 | r0 = r3; | 671 | r0 = r3; |
@@ -666,10 +686,9 @@ ENTRY(___raw_atomic_xor_asm) | |||
666 | r3 = r1; | 686 | r3 = r1; |
667 | [--sp] = rets; | 687 | [--sp] = rets; |
668 | call _get_core_lock; | 688 | call _get_core_lock; |
669 | r2 = [p1]; | 689 | r3 = [p1]; |
670 | r3 = r2 ^ r3; | 690 | r2 = r2 ^ r3; |
671 | [p1] = r3; | 691 | [p1] = r2; |
672 | r3 = r2; | ||
673 | r1 = p1; | 692 | r1 = p1; |
674 | call _put_core_lock; | 693 | call _put_core_lock; |
675 | r0 = r3; | 694 | r0 = r3; |
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 64f02d451aa8..1c2a5e264fc7 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h | |||
@@ -60,16 +60,6 @@ static inline int atomic_add_negative(int i, atomic_t *v) | |||
60 | return atomic_add_return(i, v) < 0; | 60 | return atomic_add_return(i, v) < 0; |
61 | } | 61 | } |
62 | 62 | ||
63 | static inline void atomic_add(int i, atomic_t *v) | ||
64 | { | ||
65 | atomic_add_return(i, v); | ||
66 | } | ||
67 | |||
68 | static inline void atomic_sub(int i, atomic_t *v) | ||
69 | { | ||
70 | atomic_sub_return(i, v); | ||
71 | } | ||
72 | |||
73 | static inline void atomic_inc(atomic_t *v) | 63 | static inline void atomic_inc(atomic_t *v) |
74 | { | 64 | { |
75 | atomic_inc_return(v); | 65 | atomic_inc_return(v); |
@@ -136,16 +126,6 @@ static inline long long atomic64_add_negative(long long i, atomic64_t *v) | |||
136 | return atomic64_add_return(i, v) < 0; | 126 | return atomic64_add_return(i, v) < 0; |
137 | } | 127 | } |
138 | 128 | ||
139 | static inline void atomic64_add(long long i, atomic64_t *v) | ||
140 | { | ||
141 | atomic64_add_return(i, v); | ||
142 | } | ||
143 | |||
144 | static inline void atomic64_sub(long long i, atomic64_t *v) | ||
145 | { | ||
146 | atomic64_sub_return(i, v); | ||
147 | } | ||
148 | |||
149 | static inline void atomic64_inc(atomic64_t *v) | 129 | static inline void atomic64_inc(atomic64_t *v) |
150 | { | 130 | { |
151 | atomic64_inc_return(v); | 131 | atomic64_inc_return(v); |
@@ -182,11 +162,19 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) | |||
182 | } | 162 | } |
183 | 163 | ||
184 | #define ATOMIC_OP(op) \ | 164 | #define ATOMIC_OP(op) \ |
165 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
166 | { \ | ||
167 | return __atomic32_fetch_##op(i, &v->counter); \ | ||
168 | } \ | ||
185 | static inline void atomic_##op(int i, atomic_t *v) \ | 169 | static inline void atomic_##op(int i, atomic_t *v) \ |
186 | { \ | 170 | { \ |
187 | (void)__atomic32_fetch_##op(i, &v->counter); \ | 171 | (void)__atomic32_fetch_##op(i, &v->counter); \ |
188 | } \ | 172 | } \ |
189 | \ | 173 | \ |
174 | static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \ | ||
175 | { \ | ||
176 | return __atomic64_fetch_##op(i, &v->counter); \ | ||
177 | } \ | ||
190 | static inline void atomic64_##op(long long i, atomic64_t *v) \ | 178 | static inline void atomic64_##op(long long i, atomic64_t *v) \ |
191 | { \ | 179 | { \ |
192 | (void)__atomic64_fetch_##op(i, &v->counter); \ | 180 | (void)__atomic64_fetch_##op(i, &v->counter); \ |
@@ -195,6 +183,8 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \ | |||
195 | ATOMIC_OP(or) | 183 | ATOMIC_OP(or) |
196 | ATOMIC_OP(and) | 184 | ATOMIC_OP(and) |
197 | ATOMIC_OP(xor) | 185 | ATOMIC_OP(xor) |
186 | ATOMIC_OP(add) | ||
187 | ATOMIC_OP(sub) | ||
198 | 188 | ||
199 | #undef ATOMIC_OP | 189 | #undef ATOMIC_OP |
200 | 190 | ||
diff --git a/arch/frv/include/asm/atomic_defs.h b/arch/frv/include/asm/atomic_defs.h index 36e126d2f801..d4912c88b829 100644 --- a/arch/frv/include/asm/atomic_defs.h +++ b/arch/frv/include/asm/atomic_defs.h | |||
@@ -162,6 +162,8 @@ ATOMIC_EXPORT(__atomic64_fetch_##op); | |||
162 | ATOMIC_FETCH_OP(or) | 162 | ATOMIC_FETCH_OP(or) |
163 | ATOMIC_FETCH_OP(and) | 163 | ATOMIC_FETCH_OP(and) |
164 | ATOMIC_FETCH_OP(xor) | 164 | ATOMIC_FETCH_OP(xor) |
165 | ATOMIC_FETCH_OP(add) | ||
166 | ATOMIC_FETCH_OP(sub) | ||
165 | 167 | ||
166 | ATOMIC_OP_RETURN(add) | 168 | ATOMIC_OP_RETURN(add) |
167 | ATOMIC_OP_RETURN(sub) | 169 | ATOMIC_OP_RETURN(sub) |
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index 4435a445ae7e..349a47a918db 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h | |||
@@ -28,6 +28,19 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
28 | return ret; \ | 28 | return ret; \ |
29 | } | 29 | } |
30 | 30 | ||
31 | #define ATOMIC_FETCH_OP(op, c_op) \ | ||
32 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
33 | { \ | ||
34 | h8300flags flags; \ | ||
35 | int ret; \ | ||
36 | \ | ||
37 | flags = arch_local_irq_save(); \ | ||
38 | ret = v->counter; \ | ||
39 | v->counter c_op i; \ | ||
40 | arch_local_irq_restore(flags); \ | ||
41 | return ret; \ | ||
42 | } | ||
43 | |||
31 | #define ATOMIC_OP(op, c_op) \ | 44 | #define ATOMIC_OP(op, c_op) \ |
32 | static inline void atomic_##op(int i, atomic_t *v) \ | 45 | static inline void atomic_##op(int i, atomic_t *v) \ |
33 | { \ | 46 | { \ |
@@ -41,17 +54,21 @@ static inline void atomic_##op(int i, atomic_t *v) \ | |||
41 | ATOMIC_OP_RETURN(add, +=) | 54 | ATOMIC_OP_RETURN(add, +=) |
42 | ATOMIC_OP_RETURN(sub, -=) | 55 | ATOMIC_OP_RETURN(sub, -=) |
43 | 56 | ||
44 | ATOMIC_OP(and, &=) | 57 | #define ATOMIC_OPS(op, c_op) \ |
45 | ATOMIC_OP(or, |=) | 58 | ATOMIC_OP(op, c_op) \ |
46 | ATOMIC_OP(xor, ^=) | 59 | ATOMIC_FETCH_OP(op, c_op) |
60 | |||
61 | ATOMIC_OPS(and, &=) | ||
62 | ATOMIC_OPS(or, |=) | ||
63 | ATOMIC_OPS(xor, ^=) | ||
64 | ATOMIC_OPS(add, +=) | ||
65 | ATOMIC_OPS(sub, -=) | ||
47 | 66 | ||
67 | #undef ATOMIC_OPS | ||
48 | #undef ATOMIC_OP_RETURN | 68 | #undef ATOMIC_OP_RETURN |
49 | #undef ATOMIC_OP | 69 | #undef ATOMIC_OP |
50 | 70 | ||
51 | #define atomic_add(i, v) (void)atomic_add_return(i, v) | ||
52 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | 71 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) |
53 | |||
54 | #define atomic_sub(i, v) (void)atomic_sub_return(i, v) | ||
55 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | 72 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) |
56 | 73 | ||
57 | #define atomic_inc_return(v) atomic_add_return(1, v) | 74 | #define atomic_inc_return(v) atomic_add_return(1, v) |
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h index 55696c4100d4..a62ba368b27d 100644 --- a/arch/hexagon/include/asm/atomic.h +++ b/arch/hexagon/include/asm/atomic.h | |||
@@ -110,7 +110,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ | |||
110 | ); \ | 110 | ); \ |
111 | } \ | 111 | } \ |
112 | 112 | ||
113 | #define ATOMIC_OP_RETURN(op) \ | 113 | #define ATOMIC_OP_RETURN(op) \ |
114 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | 114 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
115 | { \ | 115 | { \ |
116 | int output; \ | 116 | int output; \ |
@@ -127,16 +127,37 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
127 | return output; \ | 127 | return output; \ |
128 | } | 128 | } |
129 | 129 | ||
130 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 130 | #define ATOMIC_FETCH_OP(op) \ |
131 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
132 | { \ | ||
133 | int output, val; \ | ||
134 | \ | ||
135 | __asm__ __volatile__ ( \ | ||
136 | "1: %0 = memw_locked(%2);\n" \ | ||
137 | " %1 = "#op "(%0,%3);\n" \ | ||
138 | " memw_locked(%2,P3)=%1;\n" \ | ||
139 | " if !P3 jump 1b;\n" \ | ||
140 | : "=&r" (output), "=&r" (val) \ | ||
141 | : "r" (&v->counter), "r" (i) \ | ||
142 | : "memory", "p3" \ | ||
143 | ); \ | ||
144 | return output; \ | ||
145 | } | ||
146 | |||
147 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
131 | 148 | ||
132 | ATOMIC_OPS(add) | 149 | ATOMIC_OPS(add) |
133 | ATOMIC_OPS(sub) | 150 | ATOMIC_OPS(sub) |
134 | 151 | ||
135 | ATOMIC_OP(and) | 152 | #undef ATOMIC_OPS |
136 | ATOMIC_OP(or) | 153 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) |
137 | ATOMIC_OP(xor) | 154 | |
155 | ATOMIC_OPS(and) | ||
156 | ATOMIC_OPS(or) | ||
157 | ATOMIC_OPS(xor) | ||
138 | 158 | ||
139 | #undef ATOMIC_OPS | 159 | #undef ATOMIC_OPS |
160 | #undef ATOMIC_FETCH_OP | ||
140 | #undef ATOMIC_OP_RETURN | 161 | #undef ATOMIC_OP_RETURN |
141 | #undef ATOMIC_OP | 162 | #undef ATOMIC_OP |
142 | 163 | ||
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 8dfb5f6f6c35..f565ad376142 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h | |||
@@ -42,8 +42,27 @@ ia64_atomic_##op (int i, atomic_t *v) \ | |||
42 | return new; \ | 42 | return new; \ |
43 | } | 43 | } |
44 | 44 | ||
45 | ATOMIC_OP(add, +) | 45 | #define ATOMIC_FETCH_OP(op, c_op) \ |
46 | ATOMIC_OP(sub, -) | 46 | static __inline__ int \ |
47 | ia64_atomic_fetch_##op (int i, atomic_t *v) \ | ||
48 | { \ | ||
49 | __s32 old, new; \ | ||
50 | CMPXCHG_BUGCHECK_DECL \ | ||
51 | \ | ||
52 | do { \ | ||
53 | CMPXCHG_BUGCHECK(v); \ | ||
54 | old = atomic_read(v); \ | ||
55 | new = old c_op i; \ | ||
56 | } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \ | ||
57 | return old; \ | ||
58 | } | ||
59 | |||
60 | #define ATOMIC_OPS(op, c_op) \ | ||
61 | ATOMIC_OP(op, c_op) \ | ||
62 | ATOMIC_FETCH_OP(op, c_op) | ||
63 | |||
64 | ATOMIC_OPS(add, +) | ||
65 | ATOMIC_OPS(sub, -) | ||
47 | 66 | ||
48 | #define atomic_add_return(i,v) \ | 67 | #define atomic_add_return(i,v) \ |
49 | ({ \ | 68 | ({ \ |
@@ -69,14 +88,44 @@ ATOMIC_OP(sub, -) | |||
69 | : ia64_atomic_sub(__ia64_asr_i, v); \ | 88 | : ia64_atomic_sub(__ia64_asr_i, v); \ |
70 | }) | 89 | }) |
71 | 90 | ||
72 | ATOMIC_OP(and, &) | 91 | #define atomic_fetch_add(i,v) \ |
73 | ATOMIC_OP(or, |) | 92 | ({ \ |
74 | ATOMIC_OP(xor, ^) | 93 | int __ia64_aar_i = (i); \ |
94 | (__builtin_constant_p(i) \ | ||
95 | && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ | ||
96 | || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ | ||
97 | || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ | ||
98 | || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ | ||
99 | ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ | ||
100 | : ia64_atomic_fetch_add(__ia64_aar_i, v); \ | ||
101 | }) | ||
102 | |||
103 | #define atomic_fetch_sub(i,v) \ | ||
104 | ({ \ | ||
105 | int __ia64_asr_i = (i); \ | ||
106 | (__builtin_constant_p(i) \ | ||
107 | && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ | ||
108 | || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ | ||
109 | || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ | ||
110 | || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ | ||
111 | ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ | ||
112 | : ia64_atomic_fetch_sub(__ia64_asr_i, v); \ | ||
113 | }) | ||
114 | |||
115 | ATOMIC_FETCH_OP(and, &) | ||
116 | ATOMIC_FETCH_OP(or, |) | ||
117 | ATOMIC_FETCH_OP(xor, ^) | ||
118 | |||
119 | #define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v) | ||
120 | #define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v) | ||
121 | #define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v) | ||
75 | 122 | ||
76 | #define atomic_and(i,v) (void)ia64_atomic_and(i,v) | 123 | #define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v) |
77 | #define atomic_or(i,v) (void)ia64_atomic_or(i,v) | 124 | #define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v) |
78 | #define atomic_xor(i,v) (void)ia64_atomic_xor(i,v) | 125 | #define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v) |
79 | 126 | ||
127 | #undef ATOMIC_OPS | ||
128 | #undef ATOMIC_FETCH_OP | ||
80 | #undef ATOMIC_OP | 129 | #undef ATOMIC_OP |
81 | 130 | ||
82 | #define ATOMIC64_OP(op, c_op) \ | 131 | #define ATOMIC64_OP(op, c_op) \ |
@@ -94,8 +143,27 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \ | |||
94 | return new; \ | 143 | return new; \ |
95 | } | 144 | } |
96 | 145 | ||
97 | ATOMIC64_OP(add, +) | 146 | #define ATOMIC64_FETCH_OP(op, c_op) \ |
98 | ATOMIC64_OP(sub, -) | 147 | static __inline__ long \ |
148 | ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v) \ | ||
149 | { \ | ||
150 | __s64 old, new; \ | ||
151 | CMPXCHG_BUGCHECK_DECL \ | ||
152 | \ | ||
153 | do { \ | ||
154 | CMPXCHG_BUGCHECK(v); \ | ||
155 | old = atomic64_read(v); \ | ||
156 | new = old c_op i; \ | ||
157 | } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \ | ||
158 | return old; \ | ||
159 | } | ||
160 | |||
161 | #define ATOMIC64_OPS(op, c_op) \ | ||
162 | ATOMIC64_OP(op, c_op) \ | ||
163 | ATOMIC64_FETCH_OP(op, c_op) | ||
164 | |||
165 | ATOMIC64_OPS(add, +) | ||
166 | ATOMIC64_OPS(sub, -) | ||
99 | 167 | ||
100 | #define atomic64_add_return(i,v) \ | 168 | #define atomic64_add_return(i,v) \ |
101 | ({ \ | 169 | ({ \ |
@@ -121,14 +189,44 @@ ATOMIC64_OP(sub, -) | |||
121 | : ia64_atomic64_sub(__ia64_asr_i, v); \ | 189 | : ia64_atomic64_sub(__ia64_asr_i, v); \ |
122 | }) | 190 | }) |
123 | 191 | ||
124 | ATOMIC64_OP(and, &) | 192 | #define atomic64_fetch_add(i,v) \ |
125 | ATOMIC64_OP(or, |) | 193 | ({ \ |
126 | ATOMIC64_OP(xor, ^) | 194 | long __ia64_aar_i = (i); \ |
195 | (__builtin_constant_p(i) \ | ||
196 | && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ | ||
197 | || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ | ||
198 | || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ | ||
199 | || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ | ||
200 | ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \ | ||
201 | : ia64_atomic64_fetch_add(__ia64_aar_i, v); \ | ||
202 | }) | ||
203 | |||
204 | #define atomic64_fetch_sub(i,v) \ | ||
205 | ({ \ | ||
206 | long __ia64_asr_i = (i); \ | ||
207 | (__builtin_constant_p(i) \ | ||
208 | && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ | ||
209 | || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ | ||
210 | || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ | ||
211 | || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ | ||
212 | ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \ | ||
213 | : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \ | ||
214 | }) | ||
215 | |||
216 | ATOMIC64_FETCH_OP(and, &) | ||
217 | ATOMIC64_FETCH_OP(or, |) | ||
218 | ATOMIC64_FETCH_OP(xor, ^) | ||
219 | |||
220 | #define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v) | ||
221 | #define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v) | ||
222 | #define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v) | ||
127 | 223 | ||
128 | #define atomic64_and(i,v) (void)ia64_atomic64_and(i,v) | 224 | #define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v) |
129 | #define atomic64_or(i,v) (void)ia64_atomic64_or(i,v) | 225 | #define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v) |
130 | #define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v) | 226 | #define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v) |
131 | 227 | ||
228 | #undef ATOMIC64_OPS | ||
229 | #undef ATOMIC64_FETCH_OP | ||
132 | #undef ATOMIC64_OP | 230 | #undef ATOMIC64_OP |
133 | 231 | ||
134 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | 232 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) |
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index ea35160d632b..640cc1c7099f 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h | |||
@@ -89,16 +89,44 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ | |||
89 | return result; \ | 89 | return result; \ |
90 | } | 90 | } |
91 | 91 | ||
92 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 92 | #define ATOMIC_FETCH_OP(op) \ |
93 | static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \ | ||
94 | { \ | ||
95 | unsigned long flags; \ | ||
96 | int result, val; \ | ||
97 | \ | ||
98 | local_irq_save(flags); \ | ||
99 | __asm__ __volatile__ ( \ | ||
100 | "# atomic_fetch_" #op " \n\t" \ | ||
101 | DCACHE_CLEAR("%0", "r4", "%2") \ | ||
102 | M32R_LOCK" %1, @%2; \n\t" \ | ||
103 | "mv %0, %1 \n\t" \ | ||
104 | #op " %1, %3; \n\t" \ | ||
105 | M32R_UNLOCK" %1, @%2; \n\t" \ | ||
106 | : "=&r" (result), "=&r" (val) \ | ||
107 | : "r" (&v->counter), "r" (i) \ | ||
108 | : "memory" \ | ||
109 | __ATOMIC_CLOBBER \ | ||
110 | ); \ | ||
111 | local_irq_restore(flags); \ | ||
112 | \ | ||
113 | return result; \ | ||
114 | } | ||
115 | |||
116 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
93 | 117 | ||
94 | ATOMIC_OPS(add) | 118 | ATOMIC_OPS(add) |
95 | ATOMIC_OPS(sub) | 119 | ATOMIC_OPS(sub) |
96 | 120 | ||
97 | ATOMIC_OP(and) | 121 | #undef ATOMIC_OPS |
98 | ATOMIC_OP(or) | 122 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) |
99 | ATOMIC_OP(xor) | 123 | |
124 | ATOMIC_OPS(and) | ||
125 | ATOMIC_OPS(or) | ||
126 | ATOMIC_OPS(xor) | ||
100 | 127 | ||
101 | #undef ATOMIC_OPS | 128 | #undef ATOMIC_OPS |
129 | #undef ATOMIC_FETCH_OP | ||
102 | #undef ATOMIC_OP_RETURN | 130 | #undef ATOMIC_OP_RETURN |
103 | #undef ATOMIC_OP | 131 | #undef ATOMIC_OP |
104 | 132 | ||
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 4858178260f9..cf4c3a7b1a45 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h | |||
@@ -53,6 +53,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
53 | return t; \ | 53 | return t; \ |
54 | } | 54 | } |
55 | 55 | ||
56 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ | ||
57 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
58 | { \ | ||
59 | int t, tmp; \ | ||
60 | \ | ||
61 | __asm__ __volatile__( \ | ||
62 | "1: movel %2,%1\n" \ | ||
63 | " " #asm_op "l %3,%1\n" \ | ||
64 | " casl %2,%1,%0\n" \ | ||
65 | " jne 1b" \ | ||
66 | : "+m" (*v), "=&d" (t), "=&d" (tmp) \ | ||
67 | : "g" (i), "2" (atomic_read(v))); \ | ||
68 | return tmp; \ | ||
69 | } | ||
70 | |||
56 | #else | 71 | #else |
57 | 72 | ||
58 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | 73 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
@@ -68,20 +83,41 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ | |||
68 | return t; \ | 83 | return t; \ |
69 | } | 84 | } |
70 | 85 | ||
86 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ | ||
87 | static inline int atomic_fetch_##op(int i, atomic_t * v) \ | ||
88 | { \ | ||
89 | unsigned long flags; \ | ||
90 | int t; \ | ||
91 | \ | ||
92 | local_irq_save(flags); \ | ||
93 | t = v->counter; \ | ||
94 | v->counter c_op i; \ | ||
95 | local_irq_restore(flags); \ | ||
96 | \ | ||
97 | return t; \ | ||
98 | } | ||
99 | |||
71 | #endif /* CONFIG_RMW_INSNS */ | 100 | #endif /* CONFIG_RMW_INSNS */ |
72 | 101 | ||
73 | #define ATOMIC_OPS(op, c_op, asm_op) \ | 102 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
74 | ATOMIC_OP(op, c_op, asm_op) \ | 103 | ATOMIC_OP(op, c_op, asm_op) \ |
75 | ATOMIC_OP_RETURN(op, c_op, asm_op) | 104 | ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
105 | ATOMIC_FETCH_OP(op, c_op, asm_op) | ||
76 | 106 | ||
77 | ATOMIC_OPS(add, +=, add) | 107 | ATOMIC_OPS(add, +=, add) |
78 | ATOMIC_OPS(sub, -=, sub) | 108 | ATOMIC_OPS(sub, -=, sub) |
79 | 109 | ||
80 | ATOMIC_OP(and, &=, and) | 110 | #undef ATOMIC_OPS |
81 | ATOMIC_OP(or, |=, or) | 111 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
82 | ATOMIC_OP(xor, ^=, eor) | 112 | ATOMIC_OP(op, c_op, asm_op) \ |
113 | ATOMIC_FETCH_OP(op, c_op, asm_op) | ||
114 | |||
115 | ATOMIC_OPS(and, &=, and) | ||
116 | ATOMIC_OPS(or, |=, or) | ||
117 | ATOMIC_OPS(xor, ^=, eor) | ||
83 | 118 | ||
84 | #undef ATOMIC_OPS | 119 | #undef ATOMIC_OPS |
120 | #undef ATOMIC_FETCH_OP | ||
85 | #undef ATOMIC_OP_RETURN | 121 | #undef ATOMIC_OP_RETURN |
86 | #undef ATOMIC_OP | 122 | #undef ATOMIC_OP |
87 | 123 | ||
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h index 88fa25fae8bd..def2c642f053 100644 --- a/arch/metag/include/asm/atomic_lnkget.h +++ b/arch/metag/include/asm/atomic_lnkget.h | |||
@@ -69,16 +69,44 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
69 | return result; \ | 69 | return result; \ |
70 | } | 70 | } |
71 | 71 | ||
72 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 72 | #define ATOMIC_FETCH_OP(op) \ |
73 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
74 | { \ | ||
75 | int result, temp; \ | ||
76 | \ | ||
77 | smp_mb(); \ | ||
78 | \ | ||
79 | asm volatile ( \ | ||
80 | "1: LNKGETD %1, [%2]\n" \ | ||
81 | " " #op " %0, %1, %3\n" \ | ||
82 | " LNKSETD [%2], %0\n" \ | ||
83 | " DEFR %0, TXSTAT\n" \ | ||
84 | " ANDT %0, %0, #HI(0x3f000000)\n" \ | ||
85 | " CMPT %0, #HI(0x02000000)\n" \ | ||
86 | " BNZ 1b\n" \ | ||
87 | : "=&d" (temp), "=&d" (result) \ | ||
88 | : "da" (&v->counter), "bd" (i) \ | ||
89 | : "cc"); \ | ||
90 | \ | ||
91 | smp_mb(); \ | ||
92 | \ | ||
93 | return result; \ | ||
94 | } | ||
95 | |||
96 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
73 | 97 | ||
74 | ATOMIC_OPS(add) | 98 | ATOMIC_OPS(add) |
75 | ATOMIC_OPS(sub) | 99 | ATOMIC_OPS(sub) |
76 | 100 | ||
77 | ATOMIC_OP(and) | 101 | #undef ATOMIC_OPS |
78 | ATOMIC_OP(or) | 102 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) |
79 | ATOMIC_OP(xor) | 103 | |
104 | ATOMIC_OPS(and) | ||
105 | ATOMIC_OPS(or) | ||
106 | ATOMIC_OPS(xor) | ||
80 | 107 | ||
81 | #undef ATOMIC_OPS | 108 | #undef ATOMIC_OPS |
109 | #undef ATOMIC_FETCH_OP | ||
82 | #undef ATOMIC_OP_RETURN | 110 | #undef ATOMIC_OP_RETURN |
83 | #undef ATOMIC_OP | 111 | #undef ATOMIC_OP |
84 | 112 | ||
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h index 0295d9b8d5bf..6c1380a8a0d4 100644 --- a/arch/metag/include/asm/atomic_lock1.h +++ b/arch/metag/include/asm/atomic_lock1.h | |||
@@ -64,15 +64,40 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
64 | return result; \ | 64 | return result; \ |
65 | } | 65 | } |
66 | 66 | ||
67 | #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op) | 67 | #define ATOMIC_FETCH_OP(op, c_op) \ |
68 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
69 | { \ | ||
70 | unsigned long result; \ | ||
71 | unsigned long flags; \ | ||
72 | \ | ||
73 | __global_lock1(flags); \ | ||
74 | result = v->counter; \ | ||
75 | fence(); \ | ||
76 | v->counter c_op i; \ | ||
77 | __global_unlock1(flags); \ | ||
78 | \ | ||
79 | return result; \ | ||
80 | } | ||
81 | |||
82 | #define ATOMIC_OPS(op, c_op) \ | ||
83 | ATOMIC_OP(op, c_op) \ | ||
84 | ATOMIC_OP_RETURN(op, c_op) \ | ||
85 | ATOMIC_FETCH_OP(op, c_op) | ||
68 | 86 | ||
69 | ATOMIC_OPS(add, +=) | 87 | ATOMIC_OPS(add, +=) |
70 | ATOMIC_OPS(sub, -=) | 88 | ATOMIC_OPS(sub, -=) |
71 | ATOMIC_OP(and, &=) | ||
72 | ATOMIC_OP(or, |=) | ||
73 | ATOMIC_OP(xor, ^=) | ||
74 | 89 | ||
75 | #undef ATOMIC_OPS | 90 | #undef ATOMIC_OPS |
91 | #define ATOMIC_OPS(op, c_op) \ | ||
92 | ATOMIC_OP(op, c_op) \ | ||
93 | ATOMIC_FETCH_OP(op, c_op) | ||
94 | |||
95 | ATOMIC_OPS(and, &=) | ||
96 | ATOMIC_OPS(or, |=) | ||
97 | ATOMIC_OPS(xor, ^=) | ||
98 | |||
99 | #undef ATOMIC_OPS | ||
100 | #undef ATOMIC_FETCH_OP | ||
76 | #undef ATOMIC_OP_RETURN | 101 | #undef ATOMIC_OP_RETURN |
77 | #undef ATOMIC_OP | 102 | #undef ATOMIC_OP |
78 | 103 | ||
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 835b402e4574..0ab176bdb8e8 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
@@ -66,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ | |||
66 | " " #asm_op " %0, %2 \n" \ | 66 | " " #asm_op " %0, %2 \n" \ |
67 | " sc %0, %1 \n" \ | 67 | " sc %0, %1 \n" \ |
68 | " .set mips0 \n" \ | 68 | " .set mips0 \n" \ |
69 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ | 69 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
70 | : "Ir" (i)); \ | 70 | : "Ir" (i)); \ |
71 | } while (unlikely(!temp)); \ | 71 | } while (unlikely(!temp)); \ |
72 | } else { \ | 72 | } else { \ |
@@ -79,12 +79,10 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ | |||
79 | } | 79 | } |
80 | 80 | ||
81 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | 81 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
82 | static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ | 82 | static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ |
83 | { \ | 83 | { \ |
84 | int result; \ | 84 | int result; \ |
85 | \ | 85 | \ |
86 | smp_mb__before_llsc(); \ | ||
87 | \ | ||
88 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ | 86 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ |
89 | int temp; \ | 87 | int temp; \ |
90 | \ | 88 | \ |
@@ -125,23 +123,84 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ | |||
125 | raw_local_irq_restore(flags); \ | 123 | raw_local_irq_restore(flags); \ |
126 | } \ | 124 | } \ |
127 | \ | 125 | \ |
128 | smp_llsc_mb(); \ | 126 | return result; \ |
127 | } | ||
128 | |||
129 | #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ | ||
130 | static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ | ||
131 | { \ | ||
132 | int result; \ | ||
133 | \ | ||
134 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ | ||
135 | int temp; \ | ||
136 | \ | ||
137 | __asm__ __volatile__( \ | ||
138 | " .set arch=r4000 \n" \ | ||
139 | "1: ll %1, %2 # atomic_fetch_" #op " \n" \ | ||
140 | " " #asm_op " %0, %1, %3 \n" \ | ||
141 | " sc %0, %2 \n" \ | ||
142 | " beqzl %0, 1b \n" \ | ||
143 | " move %0, %1 \n" \ | ||
144 | " .set mips0 \n" \ | ||
145 | : "=&r" (result), "=&r" (temp), \ | ||
146 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ | ||
147 | : "Ir" (i)); \ | ||
148 | } else if (kernel_uses_llsc) { \ | ||
149 | int temp; \ | ||
150 | \ | ||
151 | do { \ | ||
152 | __asm__ __volatile__( \ | ||
153 | " .set "MIPS_ISA_LEVEL" \n" \ | ||
154 | " ll %1, %2 # atomic_fetch_" #op " \n" \ | ||
155 | " " #asm_op " %0, %1, %3 \n" \ | ||
156 | " sc %0, %2 \n" \ | ||
157 | " .set mips0 \n" \ | ||
158 | : "=&r" (result), "=&r" (temp), \ | ||
159 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ | ||
160 | : "Ir" (i)); \ | ||
161 | } while (unlikely(!result)); \ | ||
162 | \ | ||
163 | result = temp; \ | ||
164 | } else { \ | ||
165 | unsigned long flags; \ | ||
166 | \ | ||
167 | raw_local_irq_save(flags); \ | ||
168 | result = v->counter; \ | ||
169 | v->counter c_op i; \ | ||
170 | raw_local_irq_restore(flags); \ | ||
171 | } \ | ||
129 | \ | 172 | \ |
130 | return result; \ | 173 | return result; \ |
131 | } | 174 | } |
132 | 175 | ||
133 | #define ATOMIC_OPS(op, c_op, asm_op) \ | 176 | #define ATOMIC_OPS(op, c_op, asm_op) \ |
134 | ATOMIC_OP(op, c_op, asm_op) \ | 177 | ATOMIC_OP(op, c_op, asm_op) \ |
135 | ATOMIC_OP_RETURN(op, c_op, asm_op) | 178 | ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
179 | ATOMIC_FETCH_OP(op, c_op, asm_op) | ||
136 | 180 | ||
137 | ATOMIC_OPS(add, +=, addu) | 181 | ATOMIC_OPS(add, +=, addu) |
138 | ATOMIC_OPS(sub, -=, subu) | 182 | ATOMIC_OPS(sub, -=, subu) |
139 | 183 | ||
140 | ATOMIC_OP(and, &=, and) | 184 | #define atomic_add_return_relaxed atomic_add_return_relaxed |
141 | ATOMIC_OP(or, |=, or) | 185 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed |
142 | ATOMIC_OP(xor, ^=, xor) | 186 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed |
187 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed | ||
188 | |||
189 | #undef ATOMIC_OPS | ||
190 | #define ATOMIC_OPS(op, c_op, asm_op) \ | ||
191 | ATOMIC_OP(op, c_op, asm_op) \ | ||
192 | ATOMIC_FETCH_OP(op, c_op, asm_op) | ||
193 | |||
194 | ATOMIC_OPS(and, &=, and) | ||
195 | ATOMIC_OPS(or, |=, or) | ||
196 | ATOMIC_OPS(xor, ^=, xor) | ||
197 | |||
198 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed | ||
199 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed | ||
200 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed | ||
143 | 201 | ||
144 | #undef ATOMIC_OPS | 202 | #undef ATOMIC_OPS |
203 | #undef ATOMIC_FETCH_OP | ||
145 | #undef ATOMIC_OP_RETURN | 204 | #undef ATOMIC_OP_RETURN |
146 | #undef ATOMIC_OP | 205 | #undef ATOMIC_OP |
147 | 206 | ||
@@ -362,12 +421,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ | |||
362 | } | 421 | } |
363 | 422 | ||
364 | #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ | 423 | #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ |
365 | static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ | 424 | static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ |
366 | { \ | 425 | { \ |
367 | long result; \ | 426 | long result; \ |
368 | \ | 427 | \ |
369 | smp_mb__before_llsc(); \ | ||
370 | \ | ||
371 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ | 428 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ |
372 | long temp; \ | 429 | long temp; \ |
373 | \ | 430 | \ |
@@ -409,22 +466,85 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ | |||
409 | raw_local_irq_restore(flags); \ | 466 | raw_local_irq_restore(flags); \ |
410 | } \ | 467 | } \ |
411 | \ | 468 | \ |
412 | smp_llsc_mb(); \ | 469 | return result; \ |
470 | } | ||
471 | |||
472 | #define ATOMIC64_FETCH_OP(op, c_op, asm_op) \ | ||
473 | static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ | ||
474 | { \ | ||
475 | long result; \ | ||
476 | \ | ||
477 | if (kernel_uses_llsc && R10000_LLSC_WAR) { \ | ||
478 | long temp; \ | ||
479 | \ | ||
480 | __asm__ __volatile__( \ | ||
481 | " .set arch=r4000 \n" \ | ||
482 | "1: lld %1, %2 # atomic64_fetch_" #op "\n" \ | ||
483 | " " #asm_op " %0, %1, %3 \n" \ | ||
484 | " scd %0, %2 \n" \ | ||
485 | " beqzl %0, 1b \n" \ | ||
486 | " move %0, %1 \n" \ | ||
487 | " .set mips0 \n" \ | ||
488 | : "=&r" (result), "=&r" (temp), \ | ||
489 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ | ||
490 | : "Ir" (i)); \ | ||
491 | } else if (kernel_uses_llsc) { \ | ||
492 | long temp; \ | ||
493 | \ | ||
494 | do { \ | ||
495 | __asm__ __volatile__( \ | ||
496 | " .set "MIPS_ISA_LEVEL" \n" \ | ||
497 | " lld %1, %2 # atomic64_fetch_" #op "\n" \ | ||
498 | " " #asm_op " %0, %1, %3 \n" \ | ||
499 | " scd %0, %2 \n" \ | ||
500 | " .set mips0 \n" \ | ||
501 | : "=&r" (result), "=&r" (temp), \ | ||
502 | "=" GCC_OFF_SMALL_ASM() (v->counter) \ | ||
503 | : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ | ||
504 | : "memory"); \ | ||
505 | } while (unlikely(!result)); \ | ||
506 | \ | ||
507 | result = temp; \ | ||
508 | } else { \ | ||
509 | unsigned long flags; \ | ||
510 | \ | ||
511 | raw_local_irq_save(flags); \ | ||
512 | result = v->counter; \ | ||
513 | v->counter c_op i; \ | ||
514 | raw_local_irq_restore(flags); \ | ||
515 | } \ | ||
413 | \ | 516 | \ |
414 | return result; \ | 517 | return result; \ |
415 | } | 518 | } |
416 | 519 | ||
417 | #define ATOMIC64_OPS(op, c_op, asm_op) \ | 520 | #define ATOMIC64_OPS(op, c_op, asm_op) \ |
418 | ATOMIC64_OP(op, c_op, asm_op) \ | 521 | ATOMIC64_OP(op, c_op, asm_op) \ |
419 | ATOMIC64_OP_RETURN(op, c_op, asm_op) | 522 | ATOMIC64_OP_RETURN(op, c_op, asm_op) \ |
523 | ATOMIC64_FETCH_OP(op, c_op, asm_op) | ||
420 | 524 | ||
421 | ATOMIC64_OPS(add, +=, daddu) | 525 | ATOMIC64_OPS(add, +=, daddu) |
422 | ATOMIC64_OPS(sub, -=, dsubu) | 526 | ATOMIC64_OPS(sub, -=, dsubu) |
423 | ATOMIC64_OP(and, &=, and) | 527 | |
424 | ATOMIC64_OP(or, |=, or) | 528 | #define atomic64_add_return_relaxed atomic64_add_return_relaxed |
425 | ATOMIC64_OP(xor, ^=, xor) | 529 | #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed |
530 | #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed | ||
531 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed | ||
532 | |||
533 | #undef ATOMIC64_OPS | ||
534 | #define ATOMIC64_OPS(op, c_op, asm_op) \ | ||
535 | ATOMIC64_OP(op, c_op, asm_op) \ | ||
536 | ATOMIC64_FETCH_OP(op, c_op, asm_op) | ||
537 | |||
538 | ATOMIC64_OPS(and, &=, and) | ||
539 | ATOMIC64_OPS(or, |=, or) | ||
540 | ATOMIC64_OPS(xor, ^=, xor) | ||
541 | |||
542 | #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed | ||
543 | #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed | ||
544 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed | ||
426 | 545 | ||
427 | #undef ATOMIC64_OPS | 546 | #undef ATOMIC64_OPS |
547 | #undef ATOMIC64_FETCH_OP | ||
428 | #undef ATOMIC64_OP_RETURN | 548 | #undef ATOMIC64_OP_RETURN |
429 | #undef ATOMIC64_OP | 549 | #undef ATOMIC64_OP |
430 | 550 | ||
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index ce318d5ab23b..36389efd45e8 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h | |||
@@ -84,16 +84,41 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
84 | return retval; \ | 84 | return retval; \ |
85 | } | 85 | } |
86 | 86 | ||
87 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 87 | #define ATOMIC_FETCH_OP(op) \ |
88 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
89 | { \ | ||
90 | int retval, status; \ | ||
91 | \ | ||
92 | asm volatile( \ | ||
93 | "1: mov %4,(_AAR,%3) \n" \ | ||
94 | " mov (_ADR,%3),%1 \n" \ | ||
95 | " mov %1,%0 \n" \ | ||
96 | " " #op " %5,%0 \n" \ | ||
97 | " mov %0,(_ADR,%3) \n" \ | ||
98 | " mov (_ADR,%3),%0 \n" /* flush */ \ | ||
99 | " mov (_ASR,%3),%0 \n" \ | ||
100 | " or %0,%0 \n" \ | ||
101 | " bne 1b \n" \ | ||
102 | : "=&r"(status), "=&r"(retval), "=m"(v->counter) \ | ||
103 | : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \ | ||
104 | : "memory", "cc"); \ | ||
105 | return retval; \ | ||
106 | } | ||
107 | |||
108 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
88 | 109 | ||
89 | ATOMIC_OPS(add) | 110 | ATOMIC_OPS(add) |
90 | ATOMIC_OPS(sub) | 111 | ATOMIC_OPS(sub) |
91 | 112 | ||
92 | ATOMIC_OP(and) | 113 | #undef ATOMIC_OPS |
93 | ATOMIC_OP(or) | 114 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) |
94 | ATOMIC_OP(xor) | 115 | |
116 | ATOMIC_OPS(and) | ||
117 | ATOMIC_OPS(or) | ||
118 | ATOMIC_OPS(xor) | ||
95 | 119 | ||
96 | #undef ATOMIC_OPS | 120 | #undef ATOMIC_OPS |
121 | #undef ATOMIC_FETCH_OP | ||
97 | #undef ATOMIC_OP_RETURN | 122 | #undef ATOMIC_OP_RETURN |
98 | #undef ATOMIC_OP | 123 | #undef ATOMIC_OP |
99 | 124 | ||
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 1d109990a022..5394b9c5f914 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h | |||
@@ -121,16 +121,39 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ | |||
121 | return ret; \ | 121 | return ret; \ |
122 | } | 122 | } |
123 | 123 | ||
124 | #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op) | 124 | #define ATOMIC_FETCH_OP(op, c_op) \ |
125 | static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \ | ||
126 | { \ | ||
127 | unsigned long flags; \ | ||
128 | int ret; \ | ||
129 | \ | ||
130 | _atomic_spin_lock_irqsave(v, flags); \ | ||
131 | ret = v->counter; \ | ||
132 | v->counter c_op i; \ | ||
133 | _atomic_spin_unlock_irqrestore(v, flags); \ | ||
134 | \ | ||
135 | return ret; \ | ||
136 | } | ||
137 | |||
138 | #define ATOMIC_OPS(op, c_op) \ | ||
139 | ATOMIC_OP(op, c_op) \ | ||
140 | ATOMIC_OP_RETURN(op, c_op) \ | ||
141 | ATOMIC_FETCH_OP(op, c_op) | ||
125 | 142 | ||
126 | ATOMIC_OPS(add, +=) | 143 | ATOMIC_OPS(add, +=) |
127 | ATOMIC_OPS(sub, -=) | 144 | ATOMIC_OPS(sub, -=) |
128 | 145 | ||
129 | ATOMIC_OP(and, &=) | 146 | #undef ATOMIC_OPS |
130 | ATOMIC_OP(or, |=) | 147 | #define ATOMIC_OPS(op, c_op) \ |
131 | ATOMIC_OP(xor, ^=) | 148 | ATOMIC_OP(op, c_op) \ |
149 | ATOMIC_FETCH_OP(op, c_op) | ||
150 | |||
151 | ATOMIC_OPS(and, &=) | ||
152 | ATOMIC_OPS(or, |=) | ||
153 | ATOMIC_OPS(xor, ^=) | ||
132 | 154 | ||
133 | #undef ATOMIC_OPS | 155 | #undef ATOMIC_OPS |
156 | #undef ATOMIC_FETCH_OP | ||
134 | #undef ATOMIC_OP_RETURN | 157 | #undef ATOMIC_OP_RETURN |
135 | #undef ATOMIC_OP | 158 | #undef ATOMIC_OP |
136 | 159 | ||
@@ -185,15 +208,39 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ | |||
185 | return ret; \ | 208 | return ret; \ |
186 | } | 209 | } |
187 | 210 | ||
188 | #define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op) | 211 | #define ATOMIC64_FETCH_OP(op, c_op) \ |
212 | static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \ | ||
213 | { \ | ||
214 | unsigned long flags; \ | ||
215 | s64 ret; \ | ||
216 | \ | ||
217 | _atomic_spin_lock_irqsave(v, flags); \ | ||
218 | ret = v->counter; \ | ||
219 | v->counter c_op i; \ | ||
220 | _atomic_spin_unlock_irqrestore(v, flags); \ | ||
221 | \ | ||
222 | return ret; \ | ||
223 | } | ||
224 | |||
225 | #define ATOMIC64_OPS(op, c_op) \ | ||
226 | ATOMIC64_OP(op, c_op) \ | ||
227 | ATOMIC64_OP_RETURN(op, c_op) \ | ||
228 | ATOMIC64_FETCH_OP(op, c_op) | ||
189 | 229 | ||
190 | ATOMIC64_OPS(add, +=) | 230 | ATOMIC64_OPS(add, +=) |
191 | ATOMIC64_OPS(sub, -=) | 231 | ATOMIC64_OPS(sub, -=) |
192 | ATOMIC64_OP(and, &=) | ||
193 | ATOMIC64_OP(or, |=) | ||
194 | ATOMIC64_OP(xor, ^=) | ||
195 | 232 | ||
196 | #undef ATOMIC64_OPS | 233 | #undef ATOMIC64_OPS |
234 | #define ATOMIC64_OPS(op, c_op) \ | ||
235 | ATOMIC64_OP(op, c_op) \ | ||
236 | ATOMIC64_FETCH_OP(op, c_op) | ||
237 | |||
238 | ATOMIC64_OPS(and, &=) | ||
239 | ATOMIC64_OPS(or, |=) | ||
240 | ATOMIC64_OPS(xor, ^=) | ||
241 | |||
242 | #undef ATOMIC64_OPS | ||
243 | #undef ATOMIC64_FETCH_OP | ||
197 | #undef ATOMIC64_OP_RETURN | 244 | #undef ATOMIC64_OP_RETURN |
198 | #undef ATOMIC64_OP | 245 | #undef ATOMIC64_OP |
199 | 246 | ||
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index ae0751ef8788..f08d567e0ca4 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h | |||
@@ -78,21 +78,53 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \ | |||
78 | return t; \ | 78 | return t; \ |
79 | } | 79 | } |
80 | 80 | ||
81 | #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \ | ||
82 | static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \ | ||
83 | { \ | ||
84 | int res, t; \ | ||
85 | \ | ||
86 | __asm__ __volatile__( \ | ||
87 | "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \ | ||
88 | #asm_op " %1,%3,%0\n" \ | ||
89 | PPC405_ERR77(0, %4) \ | ||
90 | " stwcx. %1,0,%4\n" \ | ||
91 | " bne- 1b\n" \ | ||
92 | : "=&r" (res), "=&r" (t), "+m" (v->counter) \ | ||
93 | : "r" (a), "r" (&v->counter) \ | ||
94 | : "cc"); \ | ||
95 | \ | ||
96 | return res; \ | ||
97 | } | ||
98 | |||
81 | #define ATOMIC_OPS(op, asm_op) \ | 99 | #define ATOMIC_OPS(op, asm_op) \ |
82 | ATOMIC_OP(op, asm_op) \ | 100 | ATOMIC_OP(op, asm_op) \ |
83 | ATOMIC_OP_RETURN_RELAXED(op, asm_op) | 101 | ATOMIC_OP_RETURN_RELAXED(op, asm_op) \ |
102 | ATOMIC_FETCH_OP_RELAXED(op, asm_op) | ||
84 | 103 | ||
85 | ATOMIC_OPS(add, add) | 104 | ATOMIC_OPS(add, add) |
86 | ATOMIC_OPS(sub, subf) | 105 | ATOMIC_OPS(sub, subf) |
87 | 106 | ||
88 | ATOMIC_OP(and, and) | ||
89 | ATOMIC_OP(or, or) | ||
90 | ATOMIC_OP(xor, xor) | ||
91 | |||
92 | #define atomic_add_return_relaxed atomic_add_return_relaxed | 107 | #define atomic_add_return_relaxed atomic_add_return_relaxed |
93 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed | 108 | #define atomic_sub_return_relaxed atomic_sub_return_relaxed |
94 | 109 | ||
110 | #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed | ||
111 | #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed | ||
112 | |||
113 | #undef ATOMIC_OPS | ||
114 | #define ATOMIC_OPS(op, asm_op) \ | ||
115 | ATOMIC_OP(op, asm_op) \ | ||
116 | ATOMIC_FETCH_OP_RELAXED(op, asm_op) | ||
117 | |||
118 | ATOMIC_OPS(and, and) | ||
119 | ATOMIC_OPS(or, or) | ||
120 | ATOMIC_OPS(xor, xor) | ||
121 | |||
122 | #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed | ||
123 | #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed | ||
124 | #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed | ||
125 | |||
95 | #undef ATOMIC_OPS | 126 | #undef ATOMIC_OPS |
127 | #undef ATOMIC_FETCH_OP_RELAXED | ||
96 | #undef ATOMIC_OP_RETURN_RELAXED | 128 | #undef ATOMIC_OP_RETURN_RELAXED |
97 | #undef ATOMIC_OP | 129 | #undef ATOMIC_OP |
98 | 130 | ||
@@ -329,20 +361,53 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v) \ | |||
329 | return t; \ | 361 | return t; \ |
330 | } | 362 | } |
331 | 363 | ||
364 | #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \ | ||
365 | static inline long \ | ||
366 | atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \ | ||
367 | { \ | ||
368 | long res, t; \ | ||
369 | \ | ||
370 | __asm__ __volatile__( \ | ||
371 | "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \ | ||
372 | #asm_op " %1,%3,%0\n" \ | ||
373 | " stdcx. %1,0,%4\n" \ | ||
374 | " bne- 1b\n" \ | ||
375 | : "=&r" (res), "=&r" (t), "+m" (v->counter) \ | ||
376 | : "r" (a), "r" (&v->counter) \ | ||
377 | : "cc"); \ | ||
378 | \ | ||
379 | return res; \ | ||
380 | } | ||
381 | |||
332 | #define ATOMIC64_OPS(op, asm_op) \ | 382 | #define ATOMIC64_OPS(op, asm_op) \ |
333 | ATOMIC64_OP(op, asm_op) \ | 383 | ATOMIC64_OP(op, asm_op) \ |
334 | ATOMIC64_OP_RETURN_RELAXED(op, asm_op) | 384 | ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \ |
385 | ATOMIC64_FETCH_OP_RELAXED(op, asm_op) | ||
335 | 386 | ||
336 | ATOMIC64_OPS(add, add) | 387 | ATOMIC64_OPS(add, add) |
337 | ATOMIC64_OPS(sub, subf) | 388 | ATOMIC64_OPS(sub, subf) |
338 | ATOMIC64_OP(and, and) | ||
339 | ATOMIC64_OP(or, or) | ||
340 | ATOMIC64_OP(xor, xor) | ||
341 | 389 | ||
342 | #define atomic64_add_return_relaxed atomic64_add_return_relaxed | 390 | #define atomic64_add_return_relaxed atomic64_add_return_relaxed |
343 | #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed | 391 | #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed |
344 | 392 | ||
393 | #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed | ||
394 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed | ||
395 | |||
396 | #undef ATOMIC64_OPS | ||
397 | #define ATOMIC64_OPS(op, asm_op) \ | ||
398 | ATOMIC64_OP(op, asm_op) \ | ||
399 | ATOMIC64_FETCH_OP_RELAXED(op, asm_op) | ||
400 | |||
401 | ATOMIC64_OPS(and, and) | ||
402 | ATOMIC64_OPS(or, or) | ||
403 | ATOMIC64_OPS(xor, xor) | ||
404 | |||
405 | #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed | ||
406 | #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed | ||
407 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed | ||
408 | |||
345 | #undef ATOPIC64_OPS | 409 | #undef ATOPIC64_OPS |
410 | #undef ATOMIC64_FETCH_OP_RELAXED | ||
346 | #undef ATOMIC64_OP_RETURN_RELAXED | 411 | #undef ATOMIC64_OP_RETURN_RELAXED |
347 | #undef ATOMIC64_OP | 412 | #undef ATOMIC64_OP |
348 | 413 | ||
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 911064aa59b2..d28cc2f5b7b2 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -93,6 +93,11 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
93 | return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i; | 93 | return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i; |
94 | } | 94 | } |
95 | 95 | ||
96 | static inline int atomic_fetch_add(int i, atomic_t *v) | ||
97 | { | ||
98 | return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER); | ||
99 | } | ||
100 | |||
96 | static inline void atomic_add(int i, atomic_t *v) | 101 | static inline void atomic_add(int i, atomic_t *v) |
97 | { | 102 | { |
98 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | 103 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
@@ -114,22 +119,27 @@ static inline void atomic_add(int i, atomic_t *v) | |||
114 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) | 119 | #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) |
115 | #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v) | 120 | #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v) |
116 | #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v) | 121 | #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v) |
122 | #define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v) | ||
117 | #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) | 123 | #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) |
118 | #define atomic_dec(_v) atomic_sub(1, _v) | 124 | #define atomic_dec(_v) atomic_sub(1, _v) |
119 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) | 125 | #define atomic_dec_return(_v) atomic_sub_return(1, _v) |
120 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) | 126 | #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) |
121 | 127 | ||
122 | #define ATOMIC_OP(op, OP) \ | 128 | #define ATOMIC_OPS(op, OP) \ |
123 | static inline void atomic_##op(int i, atomic_t *v) \ | 129 | static inline void atomic_##op(int i, atomic_t *v) \ |
124 | { \ | 130 | { \ |
125 | __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \ | 131 | __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \ |
132 | } \ | ||
133 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
134 | { \ | ||
135 | return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \ | ||
126 | } | 136 | } |
127 | 137 | ||
128 | ATOMIC_OP(and, AND) | 138 | ATOMIC_OPS(and, AND) |
129 | ATOMIC_OP(or, OR) | 139 | ATOMIC_OPS(or, OR) |
130 | ATOMIC_OP(xor, XOR) | 140 | ATOMIC_OPS(xor, XOR) |
131 | 141 | ||
132 | #undef ATOMIC_OP | 142 | #undef ATOMIC_OPS |
133 | 143 | ||
134 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 144 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
135 | 145 | ||
@@ -236,6 +246,11 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) | |||
236 | return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i; | 246 | return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i; |
237 | } | 247 | } |
238 | 248 | ||
249 | static inline long long atomic64_fetch_add(long long i, atomic64_t *v) | ||
250 | { | ||
251 | return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER); | ||
252 | } | ||
253 | |||
239 | static inline void atomic64_add(long long i, atomic64_t *v) | 254 | static inline void atomic64_add(long long i, atomic64_t *v) |
240 | { | 255 | { |
241 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES | 256 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
@@ -264,17 +279,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, | |||
264 | return old; | 279 | return old; |
265 | } | 280 | } |
266 | 281 | ||
267 | #define ATOMIC64_OP(op, OP) \ | 282 | #define ATOMIC64_OPS(op, OP) \ |
268 | static inline void atomic64_##op(long i, atomic64_t *v) \ | 283 | static inline void atomic64_##op(long i, atomic64_t *v) \ |
269 | { \ | 284 | { \ |
270 | __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \ | 285 | __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \ |
286 | } \ | ||
287 | static inline long atomic64_fetch_##op(long i, atomic64_t *v) \ | ||
288 | { \ | ||
289 | return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \ | ||
271 | } | 290 | } |
272 | 291 | ||
273 | ATOMIC64_OP(and, AND) | 292 | ATOMIC64_OPS(and, AND) |
274 | ATOMIC64_OP(or, OR) | 293 | ATOMIC64_OPS(or, OR) |
275 | ATOMIC64_OP(xor, XOR) | 294 | ATOMIC64_OPS(xor, XOR) |
276 | 295 | ||
277 | #undef ATOMIC64_OP | 296 | #undef ATOMIC64_OPS |
278 | #undef __ATOMIC64_LOOP | 297 | #undef __ATOMIC64_LOOP |
279 | 298 | ||
280 | static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) | 299 | static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) |
@@ -315,6 +334,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) | |||
315 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) | 334 | #define atomic64_inc_return(_v) atomic64_add_return(1, _v) |
316 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) | 335 | #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) |
317 | #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v) | 336 | #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v) |
337 | #define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v) | ||
318 | #define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v) | 338 | #define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v) |
319 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) | 339 | #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) |
320 | #define atomic64_dec(_v) atomic64_sub(1, _v) | 340 | #define atomic64_dec(_v) atomic64_sub(1, _v) |
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h index b94df40e5f2d..d755e96c3064 100644 --- a/arch/sh/include/asm/atomic-grb.h +++ b/arch/sh/include/asm/atomic-grb.h | |||
@@ -43,16 +43,42 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
43 | return tmp; \ | 43 | return tmp; \ |
44 | } | 44 | } |
45 | 45 | ||
46 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 46 | #define ATOMIC_FETCH_OP(op) \ |
47 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
48 | { \ | ||
49 | int res, tmp; \ | ||
50 | \ | ||
51 | __asm__ __volatile__ ( \ | ||
52 | " .align 2 \n\t" \ | ||
53 | " mova 1f, r0 \n\t" /* r0 = end point */ \ | ||
54 | " mov r15, r1 \n\t" /* r1 = saved sp */ \ | ||
55 | " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \ | ||
56 | " mov.l @%2, %0 \n\t" /* load old value */ \ | ||
57 | " mov %0, %1 \n\t" /* save old value */ \ | ||
58 | " " #op " %3, %0 \n\t" /* $op */ \ | ||
59 | " mov.l %0, @%2 \n\t" /* store new value */ \ | ||
60 | "1: mov r1, r15 \n\t" /* LOGOUT */ \ | ||
61 | : "=&r" (tmp), "=&r" (res), "+r" (v) \ | ||
62 | : "r" (i) \ | ||
63 | : "memory" , "r0", "r1"); \ | ||
64 | \ | ||
65 | return res; \ | ||
66 | } | ||
67 | |||
68 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
47 | 69 | ||
48 | ATOMIC_OPS(add) | 70 | ATOMIC_OPS(add) |
49 | ATOMIC_OPS(sub) | 71 | ATOMIC_OPS(sub) |
50 | 72 | ||
51 | ATOMIC_OP(and) | 73 | #undef ATOMIC_OPS |
52 | ATOMIC_OP(or) | 74 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) |
53 | ATOMIC_OP(xor) | 75 | |
76 | ATOMIC_OPS(and) | ||
77 | ATOMIC_OPS(or) | ||
78 | ATOMIC_OPS(xor) | ||
54 | 79 | ||
55 | #undef ATOMIC_OPS | 80 | #undef ATOMIC_OPS |
81 | #undef ATOMIC_FETCH_OP | ||
56 | #undef ATOMIC_OP_RETURN | 82 | #undef ATOMIC_OP_RETURN |
57 | #undef ATOMIC_OP | 83 | #undef ATOMIC_OP |
58 | 84 | ||
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h index 23fcdad5773e..8e2da5fa0178 100644 --- a/arch/sh/include/asm/atomic-irq.h +++ b/arch/sh/include/asm/atomic-irq.h | |||
@@ -33,15 +33,38 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
33 | return temp; \ | 33 | return temp; \ |
34 | } | 34 | } |
35 | 35 | ||
36 | #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op) | 36 | #define ATOMIC_FETCH_OP(op, c_op) \ |
37 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
38 | { \ | ||
39 | unsigned long temp, flags; \ | ||
40 | \ | ||
41 | raw_local_irq_save(flags); \ | ||
42 | temp = v->counter; \ | ||
43 | v->counter c_op i; \ | ||
44 | raw_local_irq_restore(flags); \ | ||
45 | \ | ||
46 | return temp; \ | ||
47 | } | ||
48 | |||
49 | #define ATOMIC_OPS(op, c_op) \ | ||
50 | ATOMIC_OP(op, c_op) \ | ||
51 | ATOMIC_OP_RETURN(op, c_op) \ | ||
52 | ATOMIC_FETCH_OP(op, c_op) | ||
37 | 53 | ||
38 | ATOMIC_OPS(add, +=) | 54 | ATOMIC_OPS(add, +=) |
39 | ATOMIC_OPS(sub, -=) | 55 | ATOMIC_OPS(sub, -=) |
40 | ATOMIC_OP(and, &=) | ||
41 | ATOMIC_OP(or, |=) | ||
42 | ATOMIC_OP(xor, ^=) | ||
43 | 56 | ||
44 | #undef ATOMIC_OPS | 57 | #undef ATOMIC_OPS |
58 | #define ATOMIC_OPS(op, c_op) \ | ||
59 | ATOMIC_OP(op, c_op) \ | ||
60 | ATOMIC_FETCH_OP(op, c_op) | ||
61 | |||
62 | ATOMIC_OPS(and, &=) | ||
63 | ATOMIC_OPS(or, |=) | ||
64 | ATOMIC_OPS(xor, ^=) | ||
65 | |||
66 | #undef ATOMIC_OPS | ||
67 | #undef ATOMIC_FETCH_OP | ||
45 | #undef ATOMIC_OP_RETURN | 68 | #undef ATOMIC_OP_RETURN |
46 | #undef ATOMIC_OP | 69 | #undef ATOMIC_OP |
47 | 70 | ||
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h index 33d34b16d4d6..caea2c45f6c2 100644 --- a/arch/sh/include/asm/atomic-llsc.h +++ b/arch/sh/include/asm/atomic-llsc.h | |||
@@ -48,15 +48,39 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
48 | return temp; \ | 48 | return temp; \ |
49 | } | 49 | } |
50 | 50 | ||
51 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 51 | #define ATOMIC_FETCH_OP(op) \ |
52 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
53 | { \ | ||
54 | unsigned long res, temp; \ | ||
55 | \ | ||
56 | __asm__ __volatile__ ( \ | ||
57 | "1: movli.l @%3, %0 ! atomic_fetch_" #op " \n" \ | ||
58 | " mov %0, %1 \n" \ | ||
59 | " " #op " %2, %0 \n" \ | ||
60 | " movco.l %0, @%3 \n" \ | ||
61 | " bf 1b \n" \ | ||
62 | " synco \n" \ | ||
63 | : "=&z" (temp), "=&z" (res) \ | ||
64 | : "r" (i), "r" (&v->counter) \ | ||
65 | : "t"); \ | ||
66 | \ | ||
67 | return res; \ | ||
68 | } | ||
69 | |||
70 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
52 | 71 | ||
53 | ATOMIC_OPS(add) | 72 | ATOMIC_OPS(add) |
54 | ATOMIC_OPS(sub) | 73 | ATOMIC_OPS(sub) |
55 | ATOMIC_OP(and) | ||
56 | ATOMIC_OP(or) | ||
57 | ATOMIC_OP(xor) | ||
58 | 74 | ||
59 | #undef ATOMIC_OPS | 75 | #undef ATOMIC_OPS |
76 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) | ||
77 | |||
78 | ATOMIC_OPS(and) | ||
79 | ATOMIC_OPS(or) | ||
80 | ATOMIC_OPS(xor) | ||
81 | |||
82 | #undef ATOMIC_OPS | ||
83 | #undef ATOMIC_FETCH_OP | ||
60 | #undef ATOMIC_OP_RETURN | 84 | #undef ATOMIC_OP_RETURN |
61 | #undef ATOMIC_OP | 85 | #undef ATOMIC_OP |
62 | 86 | ||
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 7dcbebbcaec6..ee3f11c43cda 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h | |||
@@ -20,9 +20,10 @@ | |||
20 | #define ATOMIC_INIT(i) { (i) } | 20 | #define ATOMIC_INIT(i) { (i) } |
21 | 21 | ||
22 | int atomic_add_return(int, atomic_t *); | 22 | int atomic_add_return(int, atomic_t *); |
23 | void atomic_and(int, atomic_t *); | 23 | int atomic_fetch_add(int, atomic_t *); |
24 | void atomic_or(int, atomic_t *); | 24 | int atomic_fetch_and(int, atomic_t *); |
25 | void atomic_xor(int, atomic_t *); | 25 | int atomic_fetch_or(int, atomic_t *); |
26 | int atomic_fetch_xor(int, atomic_t *); | ||
26 | int atomic_cmpxchg(atomic_t *, int, int); | 27 | int atomic_cmpxchg(atomic_t *, int, int); |
27 | int atomic_xchg(atomic_t *, int); | 28 | int atomic_xchg(atomic_t *, int); |
28 | int __atomic_add_unless(atomic_t *, int, int); | 29 | int __atomic_add_unless(atomic_t *, int, int); |
@@ -35,7 +36,13 @@ void atomic_set(atomic_t *, int); | |||
35 | #define atomic_inc(v) ((void)atomic_add_return( 1, (v))) | 36 | #define atomic_inc(v) ((void)atomic_add_return( 1, (v))) |
36 | #define atomic_dec(v) ((void)atomic_add_return( -1, (v))) | 37 | #define atomic_dec(v) ((void)atomic_add_return( -1, (v))) |
37 | 38 | ||
39 | #define atomic_and(i, v) ((void)atomic_fetch_and((i), (v))) | ||
40 | #define atomic_or(i, v) ((void)atomic_fetch_or((i), (v))) | ||
41 | #define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v))) | ||
42 | |||
38 | #define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v))) | 43 | #define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v))) |
44 | #define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v))) | ||
45 | |||
39 | #define atomic_inc_return(v) (atomic_add_return( 1, (v))) | 46 | #define atomic_inc_return(v) (atomic_add_return( 1, (v))) |
40 | #define atomic_dec_return(v) (atomic_add_return( -1, (v))) | 47 | #define atomic_dec_return(v) (atomic_add_return( -1, (v))) |
41 | 48 | ||
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index f2fbf9e16faf..24827a3f733a 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h | |||
@@ -28,16 +28,24 @@ void atomic64_##op(long, atomic64_t *); | |||
28 | int atomic_##op##_return(int, atomic_t *); \ | 28 | int atomic_##op##_return(int, atomic_t *); \ |
29 | long atomic64_##op##_return(long, atomic64_t *); | 29 | long atomic64_##op##_return(long, atomic64_t *); |
30 | 30 | ||
31 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 31 | #define ATOMIC_FETCH_OP(op) \ |
32 | int atomic_fetch_##op(int, atomic_t *); \ | ||
33 | long atomic64_fetch_##op(long, atomic64_t *); | ||
34 | |||
35 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
32 | 36 | ||
33 | ATOMIC_OPS(add) | 37 | ATOMIC_OPS(add) |
34 | ATOMIC_OPS(sub) | 38 | ATOMIC_OPS(sub) |
35 | 39 | ||
36 | ATOMIC_OP(and) | 40 | #undef ATOMIC_OPS |
37 | ATOMIC_OP(or) | 41 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) |
38 | ATOMIC_OP(xor) | 42 | |
43 | ATOMIC_OPS(and) | ||
44 | ATOMIC_OPS(or) | ||
45 | ATOMIC_OPS(xor) | ||
39 | 46 | ||
40 | #undef ATOMIC_OPS | 47 | #undef ATOMIC_OPS |
48 | #undef ATOMIC_FETCH_OP | ||
41 | #undef ATOMIC_OP_RETURN | 49 | #undef ATOMIC_OP_RETURN |
42 | #undef ATOMIC_OP | 50 | #undef ATOMIC_OP |
43 | 51 | ||
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index b9d63c0a7aab..2c373329d5cb 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c | |||
@@ -27,39 +27,44 @@ static DEFINE_SPINLOCK(dummy); | |||
27 | 27 | ||
28 | #endif /* SMP */ | 28 | #endif /* SMP */ |
29 | 29 | ||
30 | #define ATOMIC_OP_RETURN(op, c_op) \ | 30 | #define ATOMIC_FETCH_OP(op, c_op) \ |
31 | int atomic_##op##_return(int i, atomic_t *v) \ | 31 | int atomic_fetch_##op(int i, atomic_t *v) \ |
32 | { \ | 32 | { \ |
33 | int ret; \ | 33 | int ret; \ |
34 | unsigned long flags; \ | 34 | unsigned long flags; \ |
35 | spin_lock_irqsave(ATOMIC_HASH(v), flags); \ | 35 | spin_lock_irqsave(ATOMIC_HASH(v), flags); \ |
36 | \ | 36 | \ |
37 | ret = (v->counter c_op i); \ | 37 | ret = v->counter; \ |
38 | v->counter c_op i; \ | ||
38 | \ | 39 | \ |
39 | spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ | 40 | spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ |
40 | return ret; \ | 41 | return ret; \ |
41 | } \ | 42 | } \ |
42 | EXPORT_SYMBOL(atomic_##op##_return); | 43 | EXPORT_SYMBOL(atomic_fetch_##op); |
43 | 44 | ||
44 | #define ATOMIC_OP(op, c_op) \ | 45 | #define ATOMIC_OP_RETURN(op, c_op) \ |
45 | void atomic_##op(int i, atomic_t *v) \ | 46 | int atomic_##op##_return(int i, atomic_t *v) \ |
46 | { \ | 47 | { \ |
48 | int ret; \ | ||
47 | unsigned long flags; \ | 49 | unsigned long flags; \ |
48 | spin_lock_irqsave(ATOMIC_HASH(v), flags); \ | 50 | spin_lock_irqsave(ATOMIC_HASH(v), flags); \ |
49 | \ | 51 | \ |
50 | v->counter c_op i; \ | 52 | ret = (v->counter c_op i); \ |
51 | \ | 53 | \ |
52 | spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ | 54 | spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ |
55 | return ret; \ | ||
53 | } \ | 56 | } \ |
54 | EXPORT_SYMBOL(atomic_##op); | 57 | EXPORT_SYMBOL(atomic_##op##_return); |
55 | 58 | ||
56 | ATOMIC_OP_RETURN(add, +=) | 59 | ATOMIC_OP_RETURN(add, +=) |
57 | ATOMIC_OP(and, &=) | ||
58 | ATOMIC_OP(or, |=) | ||
59 | ATOMIC_OP(xor, ^=) | ||
60 | 60 | ||
61 | ATOMIC_FETCH_OP(add, +=) | ||
62 | ATOMIC_FETCH_OP(and, &=) | ||
63 | ATOMIC_FETCH_OP(or, |=) | ||
64 | ATOMIC_FETCH_OP(xor, ^=) | ||
65 | |||
66 | #undef ATOMIC_FETCH_OP | ||
61 | #undef ATOMIC_OP_RETURN | 67 | #undef ATOMIC_OP_RETURN |
62 | #undef ATOMIC_OP | ||
63 | 68 | ||
64 | int atomic_xchg(atomic_t *v, int new) | 69 | int atomic_xchg(atomic_t *v, int new) |
65 | { | 70 | { |
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index d6b0363f345b..a5c5a0279ccc 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S | |||
@@ -9,10 +9,11 @@ | |||
9 | 9 | ||
10 | .text | 10 | .text |
11 | 11 | ||
12 | /* Two versions of the atomic routines, one that | 12 | /* Three versions of the atomic routines, one that |
13 | * does not return a value and does not perform | 13 | * does not return a value and does not perform |
14 | * memory barriers, and a second which returns | 14 | * memory barriers, and a two which return |
15 | * a value and does the barriers. | 15 | * a value, the new and old value resp. and does the |
16 | * barriers. | ||
16 | */ | 17 | */ |
17 | 18 | ||
18 | #define ATOMIC_OP(op) \ | 19 | #define ATOMIC_OP(op) \ |
@@ -43,15 +44,34 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ | |||
43 | 2: BACKOFF_SPIN(%o2, %o3, 1b); \ | 44 | 2: BACKOFF_SPIN(%o2, %o3, 1b); \ |
44 | ENDPROC(atomic_##op##_return); | 45 | ENDPROC(atomic_##op##_return); |
45 | 46 | ||
46 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 47 | #define ATOMIC_FETCH_OP(op) \ |
48 | ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ | ||
49 | BACKOFF_SETUP(%o2); \ | ||
50 | 1: lduw [%o1], %g1; \ | ||
51 | op %g1, %o0, %g7; \ | ||
52 | cas [%o1], %g1, %g7; \ | ||
53 | cmp %g1, %g7; \ | ||
54 | bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ | ||
55 | nop; \ | ||
56 | retl; \ | ||
57 | sra %g1, 0, %o0; \ | ||
58 | 2: BACKOFF_SPIN(%o2, %o3, 1b); \ | ||
59 | ENDPROC(atomic_fetch_##op); | ||
60 | |||
61 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
47 | 62 | ||
48 | ATOMIC_OPS(add) | 63 | ATOMIC_OPS(add) |
49 | ATOMIC_OPS(sub) | 64 | ATOMIC_OPS(sub) |
50 | ATOMIC_OP(and) | ||
51 | ATOMIC_OP(or) | ||
52 | ATOMIC_OP(xor) | ||
53 | 65 | ||
54 | #undef ATOMIC_OPS | 66 | #undef ATOMIC_OPS |
67 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) | ||
68 | |||
69 | ATOMIC_OPS(and) | ||
70 | ATOMIC_OPS(or) | ||
71 | ATOMIC_OPS(xor) | ||
72 | |||
73 | #undef ATOMIC_OPS | ||
74 | #undef ATOMIC_FETCH_OP | ||
55 | #undef ATOMIC_OP_RETURN | 75 | #undef ATOMIC_OP_RETURN |
56 | #undef ATOMIC_OP | 76 | #undef ATOMIC_OP |
57 | 77 | ||
@@ -83,15 +103,34 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ | |||
83 | 2: BACKOFF_SPIN(%o2, %o3, 1b); \ | 103 | 2: BACKOFF_SPIN(%o2, %o3, 1b); \ |
84 | ENDPROC(atomic64_##op##_return); | 104 | ENDPROC(atomic64_##op##_return); |
85 | 105 | ||
86 | #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) | 106 | #define ATOMIC64_FETCH_OP(op) \ |
107 | ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ | ||
108 | BACKOFF_SETUP(%o2); \ | ||
109 | 1: ldx [%o1], %g1; \ | ||
110 | op %g1, %o0, %g7; \ | ||
111 | casx [%o1], %g1, %g7; \ | ||
112 | cmp %g1, %g7; \ | ||
113 | bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ | ||
114 | nop; \ | ||
115 | retl; \ | ||
116 | mov %g1, %o0; \ | ||
117 | 2: BACKOFF_SPIN(%o2, %o3, 1b); \ | ||
118 | ENDPROC(atomic64_fetch_##op); | ||
119 | |||
120 | #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) | ||
87 | 121 | ||
88 | ATOMIC64_OPS(add) | 122 | ATOMIC64_OPS(add) |
89 | ATOMIC64_OPS(sub) | 123 | ATOMIC64_OPS(sub) |
90 | ATOMIC64_OP(and) | ||
91 | ATOMIC64_OP(or) | ||
92 | ATOMIC64_OP(xor) | ||
93 | 124 | ||
94 | #undef ATOMIC64_OPS | 125 | #undef ATOMIC64_OPS |
126 | #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op) | ||
127 | |||
128 | ATOMIC64_OPS(and) | ||
129 | ATOMIC64_OPS(or) | ||
130 | ATOMIC64_OPS(xor) | ||
131 | |||
132 | #undef ATOMIC64_OPS | ||
133 | #undef ATOMIC64_FETCH_OP | ||
95 | #undef ATOMIC64_OP_RETURN | 134 | #undef ATOMIC64_OP_RETURN |
96 | #undef ATOMIC64_OP | 135 | #undef ATOMIC64_OP |
97 | 136 | ||
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 8eb454cfe05c..de5e97817bdb 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c | |||
@@ -107,15 +107,24 @@ EXPORT_SYMBOL(atomic64_##op); | |||
107 | EXPORT_SYMBOL(atomic_##op##_return); \ | 107 | EXPORT_SYMBOL(atomic_##op##_return); \ |
108 | EXPORT_SYMBOL(atomic64_##op##_return); | 108 | EXPORT_SYMBOL(atomic64_##op##_return); |
109 | 109 | ||
110 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 110 | #define ATOMIC_FETCH_OP(op) \ |
111 | EXPORT_SYMBOL(atomic_fetch_##op); \ | ||
112 | EXPORT_SYMBOL(atomic64_fetch_##op); | ||
113 | |||
114 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) | ||
111 | 115 | ||
112 | ATOMIC_OPS(add) | 116 | ATOMIC_OPS(add) |
113 | ATOMIC_OPS(sub) | 117 | ATOMIC_OPS(sub) |
114 | ATOMIC_OP(and) | ||
115 | ATOMIC_OP(or) | ||
116 | ATOMIC_OP(xor) | ||
117 | 118 | ||
118 | #undef ATOMIC_OPS | 119 | #undef ATOMIC_OPS |
120 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) | ||
121 | |||
122 | ATOMIC_OPS(and) | ||
123 | ATOMIC_OPS(or) | ||
124 | ATOMIC_OPS(xor) | ||
125 | |||
126 | #undef ATOMIC_OPS | ||
127 | #undef ATOMIC_FETCH_OP | ||
119 | #undef ATOMIC_OP_RETURN | 128 | #undef ATOMIC_OP_RETURN |
120 | #undef ATOMIC_OP | 129 | #undef ATOMIC_OP |
121 | 130 | ||
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index 9fc0107a9c5e..8dda3c8ff5ab 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h | |||
@@ -46,6 +46,8 @@ static inline int atomic_read(const atomic_t *v) | |||
46 | */ | 46 | */ |
47 | #define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v)) | 47 | #define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v)) |
48 | 48 | ||
49 | #define atomic_fetch_sub(i, v) atomic_fetch_add(-(int)(i), (v)) | ||
50 | |||
49 | /** | 51 | /** |
50 | * atomic_sub - subtract integer from atomic variable | 52 | * atomic_sub - subtract integer from atomic variable |
51 | * @i: integer value to subtract | 53 | * @i: integer value to subtract |
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index d320ce253d86..a93774255136 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h | |||
@@ -34,18 +34,29 @@ static inline void atomic_add(int i, atomic_t *v) | |||
34 | _atomic_xchg_add(&v->counter, i); | 34 | _atomic_xchg_add(&v->counter, i); |
35 | } | 35 | } |
36 | 36 | ||
37 | #define ATOMIC_OP(op) \ | 37 | #define ATOMIC_OPS(op) \ |
38 | unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \ | 38 | unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \ |
39 | static inline void atomic_##op(int i, atomic_t *v) \ | 39 | static inline void atomic_##op(int i, atomic_t *v) \ |
40 | { \ | 40 | { \ |
41 | _atomic_##op((unsigned long *)&v->counter, i); \ | 41 | _atomic_fetch_##op((unsigned long *)&v->counter, i); \ |
42 | } \ | ||
43 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
44 | { \ | ||
45 | smp_mb(); \ | ||
46 | return _atomic_fetch_##op((unsigned long *)&v->counter, i); \ | ||
42 | } | 47 | } |
43 | 48 | ||
44 | ATOMIC_OP(and) | 49 | ATOMIC_OPS(and) |
45 | ATOMIC_OP(or) | 50 | ATOMIC_OPS(or) |
46 | ATOMIC_OP(xor) | 51 | ATOMIC_OPS(xor) |
47 | 52 | ||
48 | #undef ATOMIC_OP | 53 | #undef ATOMIC_OPS |
54 | |||
55 | static inline int atomic_fetch_add(int i, atomic_t *v) | ||
56 | { | ||
57 | smp_mb(); | ||
58 | return _atomic_xchg_add(&v->counter, i); | ||
59 | } | ||
49 | 60 | ||
50 | /** | 61 | /** |
51 | * atomic_add_return - add integer and return | 62 | * atomic_add_return - add integer and return |
@@ -126,16 +137,29 @@ static inline void atomic64_add(long long i, atomic64_t *v) | |||
126 | _atomic64_xchg_add(&v->counter, i); | 137 | _atomic64_xchg_add(&v->counter, i); |
127 | } | 138 | } |
128 | 139 | ||
129 | #define ATOMIC64_OP(op) \ | 140 | #define ATOMIC64_OPS(op) \ |
130 | long long _atomic64_##op(long long *v, long long n); \ | 141 | long long _atomic64_fetch_##op(long long *v, long long n); \ |
131 | static inline void atomic64_##op(long long i, atomic64_t *v) \ | 142 | static inline void atomic64_##op(long long i, atomic64_t *v) \ |
132 | { \ | 143 | { \ |
133 | _atomic64_##op(&v->counter, i); \ | 144 | _atomic64_fetch_##op(&v->counter, i); \ |
145 | } \ | ||
146 | static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \ | ||
147 | { \ | ||
148 | smp_mb(); \ | ||
149 | return _atomic64_fetch_##op(&v->counter, i); \ | ||
134 | } | 150 | } |
135 | 151 | ||
136 | ATOMIC64_OP(and) | 152 | ATOMIC64_OPS(and) |
137 | ATOMIC64_OP(or) | 153 | ATOMIC64_OPS(or) |
138 | ATOMIC64_OP(xor) | 154 | ATOMIC64_OPS(xor) |
155 | |||
156 | #undef ATOMIC64_OPS | ||
157 | |||
158 | static inline long long atomic64_fetch_add(long long i, atomic64_t *v) | ||
159 | { | ||
160 | smp_mb(); | ||
161 | return _atomic64_xchg_add(&v->counter, i); | ||
162 | } | ||
139 | 163 | ||
140 | /** | 164 | /** |
141 | * atomic64_add_return - add integer and return | 165 | * atomic64_add_return - add integer and return |
@@ -186,6 +210,7 @@ static inline void atomic64_set(atomic64_t *v, long long n) | |||
186 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | 210 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) |
187 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | 211 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) |
188 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) | 212 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) |
213 | #define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v)) | ||
189 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | 214 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) |
190 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) | 215 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) |
191 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | 216 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) |
@@ -193,7 +218,6 @@ static inline void atomic64_set(atomic64_t *v, long long n) | |||
193 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | 218 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) |
194 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | 219 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) |
195 | 220 | ||
196 | |||
197 | #endif /* !__ASSEMBLY__ */ | 221 | #endif /* !__ASSEMBLY__ */ |
198 | 222 | ||
199 | /* | 223 | /* |
@@ -242,16 +266,16 @@ struct __get_user { | |||
242 | unsigned long val; | 266 | unsigned long val; |
243 | int err; | 267 | int err; |
244 | }; | 268 | }; |
245 | extern struct __get_user __atomic_cmpxchg(volatile int *p, | 269 | extern struct __get_user __atomic32_cmpxchg(volatile int *p, |
246 | int *lock, int o, int n); | 270 | int *lock, int o, int n); |
247 | extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); | 271 | extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n); |
248 | extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); | 272 | extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n); |
249 | extern struct __get_user __atomic_xchg_add_unless(volatile int *p, | 273 | extern struct __get_user __atomic32_xchg_add_unless(volatile int *p, |
250 | int *lock, int o, int n); | 274 | int *lock, int o, int n); |
251 | extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); | 275 | extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n); |
252 | extern struct __get_user __atomic_and(volatile int *p, int *lock, int n); | 276 | extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n); |
253 | extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); | 277 | extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n); |
254 | extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); | 278 | extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n); |
255 | extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, | 279 | extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, |
256 | long long o, long long n); | 280 | long long o, long long n); |
257 | extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); | 281 | extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); |
@@ -259,9 +283,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock, | |||
259 | long long n); | 283 | long long n); |
260 | extern long long __atomic64_xchg_add_unless(volatile long long *p, | 284 | extern long long __atomic64_xchg_add_unless(volatile long long *p, |
261 | int *lock, long long o, long long n); | 285 | int *lock, long long o, long long n); |
262 | extern long long __atomic64_and(volatile long long *p, int *lock, long long n); | 286 | extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n); |
263 | extern long long __atomic64_or(volatile long long *p, int *lock, long long n); | 287 | extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n); |
264 | extern long long __atomic64_xor(volatile long long *p, int *lock, long long n); | 288 | extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n); |
265 | 289 | ||
266 | /* Return failure from the atomic wrappers. */ | 290 | /* Return failure from the atomic wrappers. */ |
267 | struct __get_user __atomic_bad_address(int __user *addr); | 291 | struct __get_user __atomic_bad_address(int __user *addr); |
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index b0531a623653..4cefa0c9fd81 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h | |||
@@ -32,11 +32,6 @@ | |||
32 | * on any routine which updates memory and returns a value. | 32 | * on any routine which updates memory and returns a value. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | static inline void atomic_add(int i, atomic_t *v) | ||
36 | { | ||
37 | __insn_fetchadd4((void *)&v->counter, i); | ||
38 | } | ||
39 | |||
40 | /* | 35 | /* |
41 | * Note a subtlety of the locking here. We are required to provide a | 36 | * Note a subtlety of the locking here. We are required to provide a |
42 | * full memory barrier before and after the operation. However, we | 37 | * full memory barrier before and after the operation. However, we |
@@ -59,28 +54,39 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
59 | return val; | 54 | return val; |
60 | } | 55 | } |
61 | 56 | ||
62 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | 57 | #define ATOMIC_OPS(op) \ |
58 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
59 | { \ | ||
60 | int val; \ | ||
61 | smp_mb(); \ | ||
62 | val = __insn_fetch##op##4((void *)&v->counter, i); \ | ||
63 | smp_mb(); \ | ||
64 | return val; \ | ||
65 | } \ | ||
66 | static inline void atomic_##op(int i, atomic_t *v) \ | ||
67 | { \ | ||
68 | __insn_fetch##op##4((void *)&v->counter, i); \ | ||
69 | } | ||
70 | |||
71 | ATOMIC_OPS(add) | ||
72 | ATOMIC_OPS(and) | ||
73 | ATOMIC_OPS(or) | ||
74 | |||
75 | #undef ATOMIC_OPS | ||
76 | |||
77 | static inline int atomic_fetch_xor(int i, atomic_t *v) | ||
63 | { | 78 | { |
64 | int guess, oldval = v->counter; | 79 | int guess, oldval = v->counter; |
80 | smp_mb(); | ||
65 | do { | 81 | do { |
66 | if (oldval == u) | ||
67 | break; | ||
68 | guess = oldval; | 82 | guess = oldval; |
69 | oldval = cmpxchg(&v->counter, guess, guess + a); | 83 | __insn_mtspr(SPR_CMPEXCH_VALUE, guess); |
84 | oldval = __insn_cmpexch4(&v->counter, guess ^ i); | ||
70 | } while (guess != oldval); | 85 | } while (guess != oldval); |
86 | smp_mb(); | ||
71 | return oldval; | 87 | return oldval; |
72 | } | 88 | } |
73 | 89 | ||
74 | static inline void atomic_and(int i, atomic_t *v) | ||
75 | { | ||
76 | __insn_fetchand4((void *)&v->counter, i); | ||
77 | } | ||
78 | |||
79 | static inline void atomic_or(int i, atomic_t *v) | ||
80 | { | ||
81 | __insn_fetchor4((void *)&v->counter, i); | ||
82 | } | ||
83 | |||
84 | static inline void atomic_xor(int i, atomic_t *v) | 90 | static inline void atomic_xor(int i, atomic_t *v) |
85 | { | 91 | { |
86 | int guess, oldval = v->counter; | 92 | int guess, oldval = v->counter; |
@@ -91,6 +97,18 @@ static inline void atomic_xor(int i, atomic_t *v) | |||
91 | } while (guess != oldval); | 97 | } while (guess != oldval); |
92 | } | 98 | } |
93 | 99 | ||
100 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | ||
101 | { | ||
102 | int guess, oldval = v->counter; | ||
103 | do { | ||
104 | if (oldval == u) | ||
105 | break; | ||
106 | guess = oldval; | ||
107 | oldval = cmpxchg(&v->counter, guess, guess + a); | ||
108 | } while (guess != oldval); | ||
109 | return oldval; | ||
110 | } | ||
111 | |||
94 | /* Now the true 64-bit operations. */ | 112 | /* Now the true 64-bit operations. */ |
95 | 113 | ||
96 | #define ATOMIC64_INIT(i) { (i) } | 114 | #define ATOMIC64_INIT(i) { (i) } |
@@ -98,11 +116,6 @@ static inline void atomic_xor(int i, atomic_t *v) | |||
98 | #define atomic64_read(v) READ_ONCE((v)->counter) | 116 | #define atomic64_read(v) READ_ONCE((v)->counter) |
99 | #define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) | 117 | #define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) |
100 | 118 | ||
101 | static inline void atomic64_add(long i, atomic64_t *v) | ||
102 | { | ||
103 | __insn_fetchadd((void *)&v->counter, i); | ||
104 | } | ||
105 | |||
106 | static inline long atomic64_add_return(long i, atomic64_t *v) | 119 | static inline long atomic64_add_return(long i, atomic64_t *v) |
107 | { | 120 | { |
108 | int val; | 121 | int val; |
@@ -112,26 +125,37 @@ static inline long atomic64_add_return(long i, atomic64_t *v) | |||
112 | return val; | 125 | return val; |
113 | } | 126 | } |
114 | 127 | ||
115 | static inline long atomic64_add_unless(atomic64_t *v, long a, long u) | 128 | #define ATOMIC64_OPS(op) \ |
129 | static inline long atomic64_fetch_##op(long i, atomic64_t *v) \ | ||
130 | { \ | ||
131 | long val; \ | ||
132 | smp_mb(); \ | ||
133 | val = __insn_fetch##op((void *)&v->counter, i); \ | ||
134 | smp_mb(); \ | ||
135 | return val; \ | ||
136 | } \ | ||
137 | static inline void atomic64_##op(long i, atomic64_t *v) \ | ||
138 | { \ | ||
139 | __insn_fetch##op((void *)&v->counter, i); \ | ||
140 | } | ||
141 | |||
142 | ATOMIC64_OPS(add) | ||
143 | ATOMIC64_OPS(and) | ||
144 | ATOMIC64_OPS(or) | ||
145 | |||
146 | #undef ATOMIC64_OPS | ||
147 | |||
148 | static inline long atomic64_fetch_xor(long i, atomic64_t *v) | ||
116 | { | 149 | { |
117 | long guess, oldval = v->counter; | 150 | long guess, oldval = v->counter; |
151 | smp_mb(); | ||
118 | do { | 152 | do { |
119 | if (oldval == u) | ||
120 | break; | ||
121 | guess = oldval; | 153 | guess = oldval; |
122 | oldval = cmpxchg(&v->counter, guess, guess + a); | 154 | __insn_mtspr(SPR_CMPEXCH_VALUE, guess); |
155 | oldval = __insn_cmpexch(&v->counter, guess ^ i); | ||
123 | } while (guess != oldval); | 156 | } while (guess != oldval); |
124 | return oldval != u; | 157 | smp_mb(); |
125 | } | 158 | return oldval; |
126 | |||
127 | static inline void atomic64_and(long i, atomic64_t *v) | ||
128 | { | ||
129 | __insn_fetchand((void *)&v->counter, i); | ||
130 | } | ||
131 | |||
132 | static inline void atomic64_or(long i, atomic64_t *v) | ||
133 | { | ||
134 | __insn_fetchor((void *)&v->counter, i); | ||
135 | } | 159 | } |
136 | 160 | ||
137 | static inline void atomic64_xor(long i, atomic64_t *v) | 161 | static inline void atomic64_xor(long i, atomic64_t *v) |
@@ -144,7 +168,20 @@ static inline void atomic64_xor(long i, atomic64_t *v) | |||
144 | } while (guess != oldval); | 168 | } while (guess != oldval); |
145 | } | 169 | } |
146 | 170 | ||
171 | static inline long atomic64_add_unless(atomic64_t *v, long a, long u) | ||
172 | { | ||
173 | long guess, oldval = v->counter; | ||
174 | do { | ||
175 | if (oldval == u) | ||
176 | break; | ||
177 | guess = oldval; | ||
178 | oldval = cmpxchg(&v->counter, guess, guess + a); | ||
179 | } while (guess != oldval); | ||
180 | return oldval != u; | ||
181 | } | ||
182 | |||
147 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) | 183 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) |
184 | #define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v)) | ||
148 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) | 185 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) |
149 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) | 186 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) |
150 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | 187 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) |
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h index bbf7b666f21d..d1406a95f6b7 100644 --- a/arch/tile/include/asm/bitops_32.h +++ b/arch/tile/include/asm/bitops_32.h | |||
@@ -19,9 +19,9 @@ | |||
19 | #include <asm/barrier.h> | 19 | #include <asm/barrier.h> |
20 | 20 | ||
21 | /* Tile-specific routines to support <asm/bitops.h>. */ | 21 | /* Tile-specific routines to support <asm/bitops.h>. */ |
22 | unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); | 22 | unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask); |
23 | unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask); | 23 | unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask); |
24 | unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask); | 24 | unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask); |
25 | 25 | ||
26 | /** | 26 | /** |
27 | * set_bit - Atomically set a bit in memory | 27 | * set_bit - Atomically set a bit in memory |
@@ -35,7 +35,7 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask); | |||
35 | */ | 35 | */ |
36 | static inline void set_bit(unsigned nr, volatile unsigned long *addr) | 36 | static inline void set_bit(unsigned nr, volatile unsigned long *addr) |
37 | { | 37 | { |
38 | _atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr)); | 38 | _atomic_fetch_or(addr + BIT_WORD(nr), BIT_MASK(nr)); |
39 | } | 39 | } |
40 | 40 | ||
41 | /** | 41 | /** |
@@ -54,7 +54,7 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr) | |||
54 | */ | 54 | */ |
55 | static inline void clear_bit(unsigned nr, volatile unsigned long *addr) | 55 | static inline void clear_bit(unsigned nr, volatile unsigned long *addr) |
56 | { | 56 | { |
57 | _atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr)); | 57 | _atomic_fetch_andn(addr + BIT_WORD(nr), BIT_MASK(nr)); |
58 | } | 58 | } |
59 | 59 | ||
60 | /** | 60 | /** |
@@ -69,7 +69,7 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr) | |||
69 | */ | 69 | */ |
70 | static inline void change_bit(unsigned nr, volatile unsigned long *addr) | 70 | static inline void change_bit(unsigned nr, volatile unsigned long *addr) |
71 | { | 71 | { |
72 | _atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr)); | 72 | _atomic_fetch_xor(addr + BIT_WORD(nr), BIT_MASK(nr)); |
73 | } | 73 | } |
74 | 74 | ||
75 | /** | 75 | /** |
@@ -85,7 +85,7 @@ static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr) | |||
85 | unsigned long mask = BIT_MASK(nr); | 85 | unsigned long mask = BIT_MASK(nr); |
86 | addr += BIT_WORD(nr); | 86 | addr += BIT_WORD(nr); |
87 | smp_mb(); /* barrier for proper semantics */ | 87 | smp_mb(); /* barrier for proper semantics */ |
88 | return (_atomic_or(addr, mask) & mask) != 0; | 88 | return (_atomic_fetch_or(addr, mask) & mask) != 0; |
89 | } | 89 | } |
90 | 90 | ||
91 | /** | 91 | /** |
@@ -101,7 +101,7 @@ static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr) | |||
101 | unsigned long mask = BIT_MASK(nr); | 101 | unsigned long mask = BIT_MASK(nr); |
102 | addr += BIT_WORD(nr); | 102 | addr += BIT_WORD(nr); |
103 | smp_mb(); /* barrier for proper semantics */ | 103 | smp_mb(); /* barrier for proper semantics */ |
104 | return (_atomic_andn(addr, mask) & mask) != 0; | 104 | return (_atomic_fetch_andn(addr, mask) & mask) != 0; |
105 | } | 105 | } |
106 | 106 | ||
107 | /** | 107 | /** |
@@ -118,7 +118,7 @@ static inline int test_and_change_bit(unsigned nr, | |||
118 | unsigned long mask = BIT_MASK(nr); | 118 | unsigned long mask = BIT_MASK(nr); |
119 | addr += BIT_WORD(nr); | 119 | addr += BIT_WORD(nr); |
120 | smp_mb(); /* barrier for proper semantics */ | 120 | smp_mb(); /* barrier for proper semantics */ |
121 | return (_atomic_xor(addr, mask) & mask) != 0; | 121 | return (_atomic_fetch_xor(addr, mask) & mask) != 0; |
122 | } | 122 | } |
123 | 123 | ||
124 | #include <asm-generic/bitops/ext2-atomic.h> | 124 | #include <asm-generic/bitops/ext2-atomic.h> |
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h index 1a6ef1b69cb1..e64a1b75fc38 100644 --- a/arch/tile/include/asm/futex.h +++ b/arch/tile/include/asm/futex.h | |||
@@ -80,16 +80,16 @@ | |||
80 | ret = gu.err; \ | 80 | ret = gu.err; \ |
81 | } | 81 | } |
82 | 82 | ||
83 | #define __futex_set() __futex_call(__atomic_xchg) | 83 | #define __futex_set() __futex_call(__atomic32_xchg) |
84 | #define __futex_add() __futex_call(__atomic_xchg_add) | 84 | #define __futex_add() __futex_call(__atomic32_xchg_add) |
85 | #define __futex_or() __futex_call(__atomic_or) | 85 | #define __futex_or() __futex_call(__atomic32_fetch_or) |
86 | #define __futex_andn() __futex_call(__atomic_andn) | 86 | #define __futex_andn() __futex_call(__atomic32_fetch_andn) |
87 | #define __futex_xor() __futex_call(__atomic_xor) | 87 | #define __futex_xor() __futex_call(__atomic32_fetch_xor) |
88 | 88 | ||
89 | #define __futex_cmpxchg() \ | 89 | #define __futex_cmpxchg() \ |
90 | { \ | 90 | { \ |
91 | struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \ | 91 | struct __get_user gu = __atomic32_cmpxchg((u32 __force *)uaddr, \ |
92 | lock, oldval, oparg); \ | 92 | lock, oldval, oparg); \ |
93 | val = gu.val; \ | 93 | val = gu.val; \ |
94 | ret = gu.err; \ | 94 | ret = gu.err; \ |
95 | } | 95 | } |
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c index 298df1e9912a..f8128800dbf5 100644 --- a/arch/tile/lib/atomic_32.c +++ b/arch/tile/lib/atomic_32.c | |||
@@ -61,13 +61,13 @@ static inline int *__atomic_setup(volatile void *v) | |||
61 | 61 | ||
62 | int _atomic_xchg(int *v, int n) | 62 | int _atomic_xchg(int *v, int n) |
63 | { | 63 | { |
64 | return __atomic_xchg(v, __atomic_setup(v), n).val; | 64 | return __atomic32_xchg(v, __atomic_setup(v), n).val; |
65 | } | 65 | } |
66 | EXPORT_SYMBOL(_atomic_xchg); | 66 | EXPORT_SYMBOL(_atomic_xchg); |
67 | 67 | ||
68 | int _atomic_xchg_add(int *v, int i) | 68 | int _atomic_xchg_add(int *v, int i) |
69 | { | 69 | { |
70 | return __atomic_xchg_add(v, __atomic_setup(v), i).val; | 70 | return __atomic32_xchg_add(v, __atomic_setup(v), i).val; |
71 | } | 71 | } |
72 | EXPORT_SYMBOL(_atomic_xchg_add); | 72 | EXPORT_SYMBOL(_atomic_xchg_add); |
73 | 73 | ||
@@ -78,39 +78,39 @@ int _atomic_xchg_add_unless(int *v, int a, int u) | |||
78 | * to use the first argument consistently as the "old value" | 78 | * to use the first argument consistently as the "old value" |
79 | * in the assembly, as is done for _atomic_cmpxchg(). | 79 | * in the assembly, as is done for _atomic_cmpxchg(). |
80 | */ | 80 | */ |
81 | return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val; | 81 | return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val; |
82 | } | 82 | } |
83 | EXPORT_SYMBOL(_atomic_xchg_add_unless); | 83 | EXPORT_SYMBOL(_atomic_xchg_add_unless); |
84 | 84 | ||
85 | int _atomic_cmpxchg(int *v, int o, int n) | 85 | int _atomic_cmpxchg(int *v, int o, int n) |
86 | { | 86 | { |
87 | return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val; | 87 | return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val; |
88 | } | 88 | } |
89 | EXPORT_SYMBOL(_atomic_cmpxchg); | 89 | EXPORT_SYMBOL(_atomic_cmpxchg); |
90 | 90 | ||
91 | unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask) | 91 | unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask) |
92 | { | 92 | { |
93 | return __atomic_or((int *)p, __atomic_setup(p), mask).val; | 93 | return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val; |
94 | } | 94 | } |
95 | EXPORT_SYMBOL(_atomic_or); | 95 | EXPORT_SYMBOL(_atomic_fetch_or); |
96 | 96 | ||
97 | unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask) | 97 | unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask) |
98 | { | 98 | { |
99 | return __atomic_and((int *)p, __atomic_setup(p), mask).val; | 99 | return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val; |
100 | } | 100 | } |
101 | EXPORT_SYMBOL(_atomic_and); | 101 | EXPORT_SYMBOL(_atomic_fetch_and); |
102 | 102 | ||
103 | unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) | 103 | unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask) |
104 | { | 104 | { |
105 | return __atomic_andn((int *)p, __atomic_setup(p), mask).val; | 105 | return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val; |
106 | } | 106 | } |
107 | EXPORT_SYMBOL(_atomic_andn); | 107 | EXPORT_SYMBOL(_atomic_fetch_andn); |
108 | 108 | ||
109 | unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask) | 109 | unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask) |
110 | { | 110 | { |
111 | return __atomic_xor((int *)p, __atomic_setup(p), mask).val; | 111 | return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val; |
112 | } | 112 | } |
113 | EXPORT_SYMBOL(_atomic_xor); | 113 | EXPORT_SYMBOL(_atomic_fetch_xor); |
114 | 114 | ||
115 | 115 | ||
116 | long long _atomic64_xchg(long long *v, long long n) | 116 | long long _atomic64_xchg(long long *v, long long n) |
@@ -142,23 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n) | |||
142 | } | 142 | } |
143 | EXPORT_SYMBOL(_atomic64_cmpxchg); | 143 | EXPORT_SYMBOL(_atomic64_cmpxchg); |
144 | 144 | ||
145 | long long _atomic64_and(long long *v, long long n) | 145 | long long _atomic64_fetch_and(long long *v, long long n) |
146 | { | 146 | { |
147 | return __atomic64_and(v, __atomic_setup(v), n); | 147 | return __atomic64_fetch_and(v, __atomic_setup(v), n); |
148 | } | 148 | } |
149 | EXPORT_SYMBOL(_atomic64_and); | 149 | EXPORT_SYMBOL(_atomic64_fetch_and); |
150 | 150 | ||
151 | long long _atomic64_or(long long *v, long long n) | 151 | long long _atomic64_fetch_or(long long *v, long long n) |
152 | { | 152 | { |
153 | return __atomic64_or(v, __atomic_setup(v), n); | 153 | return __atomic64_fetch_or(v, __atomic_setup(v), n); |
154 | } | 154 | } |
155 | EXPORT_SYMBOL(_atomic64_or); | 155 | EXPORT_SYMBOL(_atomic64_fetch_or); |
156 | 156 | ||
157 | long long _atomic64_xor(long long *v, long long n) | 157 | long long _atomic64_fetch_xor(long long *v, long long n) |
158 | { | 158 | { |
159 | return __atomic64_xor(v, __atomic_setup(v), n); | 159 | return __atomic64_fetch_xor(v, __atomic_setup(v), n); |
160 | } | 160 | } |
161 | EXPORT_SYMBOL(_atomic64_xor); | 161 | EXPORT_SYMBOL(_atomic64_fetch_xor); |
162 | 162 | ||
163 | /* | 163 | /* |
164 | * If any of the atomic or futex routines hit a bad address (not in | 164 | * If any of the atomic or futex routines hit a bad address (not in |
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S index f611265633d6..1a70e6c0f259 100644 --- a/arch/tile/lib/atomic_asm_32.S +++ b/arch/tile/lib/atomic_asm_32.S | |||
@@ -172,15 +172,20 @@ STD_ENTRY_SECTION(__atomic\name, .text.atomic) | |||
172 | .endif | 172 | .endif |
173 | .endm | 173 | .endm |
174 | 174 | ||
175 | atomic_op _cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }" | 175 | |
176 | atomic_op _xchg, 32, "move r24, r2" | 176 | /* |
177 | atomic_op _xchg_add, 32, "add r24, r22, r2" | 177 | * Use __atomic32 prefix to avoid collisions with GCC builtin __atomic functions. |
178 | atomic_op _xchg_add_unless, 32, \ | 178 | */ |
179 | |||
180 | atomic_op 32_cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }" | ||
181 | atomic_op 32_xchg, 32, "move r24, r2" | ||
182 | atomic_op 32_xchg_add, 32, "add r24, r22, r2" | ||
183 | atomic_op 32_xchg_add_unless, 32, \ | ||
179 | "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" | 184 | "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" |
180 | atomic_op _or, 32, "or r24, r22, r2" | 185 | atomic_op 32_fetch_or, 32, "or r24, r22, r2" |
181 | atomic_op _and, 32, "and r24, r22, r2" | 186 | atomic_op 32_fetch_and, 32, "and r24, r22, r2" |
182 | atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" | 187 | atomic_op 32_fetch_andn, 32, "nor r2, r2, zero; and r24, r22, r2" |
183 | atomic_op _xor, 32, "xor r24, r22, r2" | 188 | atomic_op 32_fetch_xor, 32, "xor r24, r22, r2" |
184 | 189 | ||
185 | atomic_op 64_cmpxchg, 64, "{ seq r26, r22, r2; seq r27, r23, r3 }; \ | 190 | atomic_op 64_cmpxchg, 64, "{ seq r26, r22, r2; seq r27, r23, r3 }; \ |
186 | { bbns r26, 3f; move r24, r4 }; { bbns r27, 3f; move r25, r5 }" | 191 | { bbns r26, 3f; move r24, r4 }; { bbns r27, 3f; move r25, r5 }" |
@@ -192,9 +197,9 @@ atomic_op 64_xchg_add_unless, 64, \ | |||
192 | { bbns r26, 3f; add r24, r22, r4 }; \ | 197 | { bbns r26, 3f; add r24, r22, r4 }; \ |
193 | { bbns r27, 3f; add r25, r23, r5 }; \ | 198 | { bbns r27, 3f; add r25, r23, r5 }; \ |
194 | slt_u r26, r24, r22; add r25, r25, r26" | 199 | slt_u r26, r24, r22; add r25, r25, r26" |
195 | atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }" | 200 | atomic_op 64_fetch_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }" |
196 | atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }" | 201 | atomic_op 64_fetch_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }" |
197 | atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }" | 202 | atomic_op 64_fetch_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }" |
198 | 203 | ||
199 | jrp lr /* happy backtracer */ | 204 | jrp lr /* happy backtracer */ |
200 | 205 | ||
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 3e8674288198..a58b99811105 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h | |||
@@ -171,6 +171,16 @@ static __always_inline int atomic_sub_return(int i, atomic_t *v) | |||
171 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | 171 | #define atomic_inc_return(v) (atomic_add_return(1, v)) |
172 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | 172 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) |
173 | 173 | ||
174 | static __always_inline int atomic_fetch_add(int i, atomic_t *v) | ||
175 | { | ||
176 | return xadd(&v->counter, i); | ||
177 | } | ||
178 | |||
179 | static __always_inline int atomic_fetch_sub(int i, atomic_t *v) | ||
180 | { | ||
181 | return xadd(&v->counter, -i); | ||
182 | } | ||
183 | |||
174 | static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) | 184 | static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
175 | { | 185 | { |
176 | return cmpxchg(&v->counter, old, new); | 186 | return cmpxchg(&v->counter, old, new); |
@@ -190,10 +200,29 @@ static inline void atomic_##op(int i, atomic_t *v) \ | |||
190 | : "memory"); \ | 200 | : "memory"); \ |
191 | } | 201 | } |
192 | 202 | ||
193 | ATOMIC_OP(and) | 203 | #define ATOMIC_FETCH_OP(op, c_op) \ |
194 | ATOMIC_OP(or) | 204 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ |
195 | ATOMIC_OP(xor) | 205 | { \ |
206 | int old, val = atomic_read(v); \ | ||
207 | for (;;) { \ | ||
208 | old = atomic_cmpxchg(v, val, val c_op i); \ | ||
209 | if (old == val) \ | ||
210 | break; \ | ||
211 | val = old; \ | ||
212 | } \ | ||
213 | return old; \ | ||
214 | } | ||
215 | |||
216 | #define ATOMIC_OPS(op, c_op) \ | ||
217 | ATOMIC_OP(op) \ | ||
218 | ATOMIC_FETCH_OP(op, c_op) | ||
219 | |||
220 | ATOMIC_OPS(and, &) | ||
221 | ATOMIC_OPS(or , |) | ||
222 | ATOMIC_OPS(xor, ^) | ||
196 | 223 | ||
224 | #undef ATOMIC_OPS | ||
225 | #undef ATOMIC_FETCH_OP | ||
197 | #undef ATOMIC_OP | 226 | #undef ATOMIC_OP |
198 | 227 | ||
199 | /** | 228 | /** |
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index a984111135b1..71d7705fb303 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h | |||
@@ -320,10 +320,29 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \ | |||
320 | c = old; \ | 320 | c = old; \ |
321 | } | 321 | } |
322 | 322 | ||
323 | ATOMIC64_OP(and, &) | 323 | #define ATOMIC64_FETCH_OP(op, c_op) \ |
324 | ATOMIC64_OP(or, |) | 324 | static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \ |
325 | ATOMIC64_OP(xor, ^) | 325 | { \ |
326 | long long old, c = 0; \ | ||
327 | while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \ | ||
328 | c = old; \ | ||
329 | return old; \ | ||
330 | } | ||
331 | |||
332 | ATOMIC64_FETCH_OP(add, +) | ||
333 | |||
334 | #define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v)) | ||
335 | |||
336 | #define ATOMIC64_OPS(op, c_op) \ | ||
337 | ATOMIC64_OP(op, c_op) \ | ||
338 | ATOMIC64_FETCH_OP(op, c_op) | ||
339 | |||
340 | ATOMIC64_OPS(and, &) | ||
341 | ATOMIC64_OPS(or, |) | ||
342 | ATOMIC64_OPS(xor, ^) | ||
326 | 343 | ||
344 | #undef ATOMIC64_OPS | ||
345 | #undef ATOMIC64_FETCH_OP | ||
327 | #undef ATOMIC64_OP | 346 | #undef ATOMIC64_OP |
328 | 347 | ||
329 | #endif /* _ASM_X86_ATOMIC64_32_H */ | 348 | #endif /* _ASM_X86_ATOMIC64_32_H */ |
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 037351022f54..70eed0e14553 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h | |||
@@ -158,6 +158,16 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) | |||
158 | return atomic64_add_return(-i, v); | 158 | return atomic64_add_return(-i, v); |
159 | } | 159 | } |
160 | 160 | ||
161 | static inline long atomic64_fetch_add(long i, atomic64_t *v) | ||
162 | { | ||
163 | return xadd(&v->counter, i); | ||
164 | } | ||
165 | |||
166 | static inline long atomic64_fetch_sub(long i, atomic64_t *v) | ||
167 | { | ||
168 | return xadd(&v->counter, -i); | ||
169 | } | ||
170 | |||
161 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) | 171 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) |
162 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) | 172 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) |
163 | 173 | ||
@@ -229,10 +239,29 @@ static inline void atomic64_##op(long i, atomic64_t *v) \ | |||
229 | : "memory"); \ | 239 | : "memory"); \ |
230 | } | 240 | } |
231 | 241 | ||
232 | ATOMIC64_OP(and) | 242 | #define ATOMIC64_FETCH_OP(op, c_op) \ |
233 | ATOMIC64_OP(or) | 243 | static inline long atomic64_fetch_##op(long i, atomic64_t *v) \ |
234 | ATOMIC64_OP(xor) | 244 | { \ |
245 | long old, val = atomic64_read(v); \ | ||
246 | for (;;) { \ | ||
247 | old = atomic64_cmpxchg(v, val, val c_op i); \ | ||
248 | if (old == val) \ | ||
249 | break; \ | ||
250 | val = old; \ | ||
251 | } \ | ||
252 | return old; \ | ||
253 | } | ||
254 | |||
255 | #define ATOMIC64_OPS(op, c_op) \ | ||
256 | ATOMIC64_OP(op) \ | ||
257 | ATOMIC64_FETCH_OP(op, c_op) | ||
258 | |||
259 | ATOMIC64_OPS(and, &) | ||
260 | ATOMIC64_OPS(or, |) | ||
261 | ATOMIC64_OPS(xor, ^) | ||
235 | 262 | ||
263 | #undef ATOMIC64_OPS | ||
264 | #undef ATOMIC64_FETCH_OP | ||
236 | #undef ATOMIC64_OP | 265 | #undef ATOMIC64_OP |
237 | 266 | ||
238 | #endif /* _ASM_X86_ATOMIC64_64_H */ | 267 | #endif /* _ASM_X86_ATOMIC64_64_H */ |
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index fd8017ce298a..e7a23f2a519a 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h | |||
@@ -98,6 +98,26 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ | |||
98 | return result; \ | 98 | return result; \ |
99 | } | 99 | } |
100 | 100 | ||
101 | #define ATOMIC_FETCH_OP(op) \ | ||
102 | static inline int atomic_fetch_##op(int i, atomic_t * v) \ | ||
103 | { \ | ||
104 | unsigned long tmp; \ | ||
105 | int result; \ | ||
106 | \ | ||
107 | __asm__ __volatile__( \ | ||
108 | "1: l32i %1, %3, 0\n" \ | ||
109 | " wsr %1, scompare1\n" \ | ||
110 | " " #op " %0, %1, %2\n" \ | ||
111 | " s32c1i %0, %3, 0\n" \ | ||
112 | " bne %0, %1, 1b\n" \ | ||
113 | : "=&a" (result), "=&a" (tmp) \ | ||
114 | : "a" (i), "a" (v) \ | ||
115 | : "memory" \ | ||
116 | ); \ | ||
117 | \ | ||
118 | return result; \ | ||
119 | } | ||
120 | |||
101 | #else /* XCHAL_HAVE_S32C1I */ | 121 | #else /* XCHAL_HAVE_S32C1I */ |
102 | 122 | ||
103 | #define ATOMIC_OP(op) \ | 123 | #define ATOMIC_OP(op) \ |
@@ -138,18 +158,42 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ | |||
138 | return vval; \ | 158 | return vval; \ |
139 | } | 159 | } |
140 | 160 | ||
161 | #define ATOMIC_FETCH_OP(op) \ | ||
162 | static inline int atomic_fetch_##op(int i, atomic_t * v) \ | ||
163 | { \ | ||
164 | unsigned int tmp, vval; \ | ||
165 | \ | ||
166 | __asm__ __volatile__( \ | ||
167 | " rsil a15,"__stringify(TOPLEVEL)"\n" \ | ||
168 | " l32i %0, %3, 0\n" \ | ||
169 | " " #op " %1, %0, %2\n" \ | ||
170 | " s32i %1, %3, 0\n" \ | ||
171 | " wsr a15, ps\n" \ | ||
172 | " rsync\n" \ | ||
173 | : "=&a" (vval), "=&a" (tmp) \ | ||
174 | : "a" (i), "a" (v) \ | ||
175 | : "a15", "memory" \ | ||
176 | ); \ | ||
177 | \ | ||
178 | return vval; \ | ||
179 | } | ||
180 | |||
141 | #endif /* XCHAL_HAVE_S32C1I */ | 181 | #endif /* XCHAL_HAVE_S32C1I */ |
142 | 182 | ||
143 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) | 183 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op) |
144 | 184 | ||
145 | ATOMIC_OPS(add) | 185 | ATOMIC_OPS(add) |
146 | ATOMIC_OPS(sub) | 186 | ATOMIC_OPS(sub) |
147 | 187 | ||
148 | ATOMIC_OP(and) | 188 | #undef ATOMIC_OPS |
149 | ATOMIC_OP(or) | 189 | #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) |
150 | ATOMIC_OP(xor) | 190 | |
191 | ATOMIC_OPS(and) | ||
192 | ATOMIC_OPS(or) | ||
193 | ATOMIC_OPS(xor) | ||
151 | 194 | ||
152 | #undef ATOMIC_OPS | 195 | #undef ATOMIC_OPS |
196 | #undef ATOMIC_FETCH_OP | ||
153 | #undef ATOMIC_OP_RETURN | 197 | #undef ATOMIC_OP_RETURN |
154 | #undef ATOMIC_OP | 198 | #undef ATOMIC_OP |
155 | 199 | ||
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 5e1f345b58dd..2d0d3cf791ab 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h | |||
@@ -112,6 +112,40 @@ static __always_inline void atomic_long_dec(atomic_long_t *l) | |||
112 | ATOMIC_LONG_PFX(_dec)(v); | 112 | ATOMIC_LONG_PFX(_dec)(v); |
113 | } | 113 | } |
114 | 114 | ||
115 | #define ATOMIC_LONG_FETCH_OP(op, mo) \ | ||
116 | static inline long \ | ||
117 | atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \ | ||
118 | { \ | ||
119 | ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ | ||
120 | \ | ||
121 | return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \ | ||
122 | } | ||
123 | |||
124 | ATOMIC_LONG_FETCH_OP(add, ) | ||
125 | ATOMIC_LONG_FETCH_OP(add, _relaxed) | ||
126 | ATOMIC_LONG_FETCH_OP(add, _acquire) | ||
127 | ATOMIC_LONG_FETCH_OP(add, _release) | ||
128 | ATOMIC_LONG_FETCH_OP(sub, ) | ||
129 | ATOMIC_LONG_FETCH_OP(sub, _relaxed) | ||
130 | ATOMIC_LONG_FETCH_OP(sub, _acquire) | ||
131 | ATOMIC_LONG_FETCH_OP(sub, _release) | ||
132 | ATOMIC_LONG_FETCH_OP(and, ) | ||
133 | ATOMIC_LONG_FETCH_OP(and, _relaxed) | ||
134 | ATOMIC_LONG_FETCH_OP(and, _acquire) | ||
135 | ATOMIC_LONG_FETCH_OP(and, _release) | ||
136 | ATOMIC_LONG_FETCH_OP(andnot, ) | ||
137 | ATOMIC_LONG_FETCH_OP(andnot, _relaxed) | ||
138 | ATOMIC_LONG_FETCH_OP(andnot, _acquire) | ||
139 | ATOMIC_LONG_FETCH_OP(andnot, _release) | ||
140 | ATOMIC_LONG_FETCH_OP(or, ) | ||
141 | ATOMIC_LONG_FETCH_OP(or, _relaxed) | ||
142 | ATOMIC_LONG_FETCH_OP(or, _acquire) | ||
143 | ATOMIC_LONG_FETCH_OP(or, _release) | ||
144 | ATOMIC_LONG_FETCH_OP(xor, ) | ||
145 | ATOMIC_LONG_FETCH_OP(xor, _relaxed) | ||
146 | ATOMIC_LONG_FETCH_OP(xor, _acquire) | ||
147 | ATOMIC_LONG_FETCH_OP(xor, _release) | ||
148 | |||
115 | #define ATOMIC_LONG_OP(op) \ | 149 | #define ATOMIC_LONG_OP(op) \ |
116 | static __always_inline void \ | 150 | static __always_inline void \ |
117 | atomic_long_##op(long i, atomic_long_t *l) \ | 151 | atomic_long_##op(long i, atomic_long_t *l) \ |
@@ -124,9 +158,9 @@ atomic_long_##op(long i, atomic_long_t *l) \ | |||
124 | ATOMIC_LONG_OP(add) | 158 | ATOMIC_LONG_OP(add) |
125 | ATOMIC_LONG_OP(sub) | 159 | ATOMIC_LONG_OP(sub) |
126 | ATOMIC_LONG_OP(and) | 160 | ATOMIC_LONG_OP(and) |
161 | ATOMIC_LONG_OP(andnot) | ||
127 | ATOMIC_LONG_OP(or) | 162 | ATOMIC_LONG_OP(or) |
128 | ATOMIC_LONG_OP(xor) | 163 | ATOMIC_LONG_OP(xor) |
129 | ATOMIC_LONG_OP(andnot) | ||
130 | 164 | ||
131 | #undef ATOMIC_LONG_OP | 165 | #undef ATOMIC_LONG_OP |
132 | 166 | ||
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 74f1a3704d7a..9ed8b987185b 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -61,6 +61,18 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
61 | return c c_op i; \ | 61 | return c c_op i; \ |
62 | } | 62 | } |
63 | 63 | ||
64 | #define ATOMIC_FETCH_OP(op, c_op) \ | ||
65 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
66 | { \ | ||
67 | int c, old; \ | ||
68 | \ | ||
69 | c = v->counter; \ | ||
70 | while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ | ||
71 | c = old; \ | ||
72 | \ | ||
73 | return c; \ | ||
74 | } | ||
75 | |||
64 | #else | 76 | #else |
65 | 77 | ||
66 | #include <linux/irqflags.h> | 78 | #include <linux/irqflags.h> |
@@ -88,6 +100,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
88 | return ret; \ | 100 | return ret; \ |
89 | } | 101 | } |
90 | 102 | ||
103 | #define ATOMIC_FETCH_OP(op, c_op) \ | ||
104 | static inline int atomic_fetch_##op(int i, atomic_t *v) \ | ||
105 | { \ | ||
106 | unsigned long flags; \ | ||
107 | int ret; \ | ||
108 | \ | ||
109 | raw_local_irq_save(flags); \ | ||
110 | ret = v->counter; \ | ||
111 | v->counter = v->counter c_op i; \ | ||
112 | raw_local_irq_restore(flags); \ | ||
113 | \ | ||
114 | return ret; \ | ||
115 | } | ||
116 | |||
91 | #endif /* CONFIG_SMP */ | 117 | #endif /* CONFIG_SMP */ |
92 | 118 | ||
93 | #ifndef atomic_add_return | 119 | #ifndef atomic_add_return |
@@ -98,6 +124,26 @@ ATOMIC_OP_RETURN(add, +) | |||
98 | ATOMIC_OP_RETURN(sub, -) | 124 | ATOMIC_OP_RETURN(sub, -) |
99 | #endif | 125 | #endif |
100 | 126 | ||
127 | #ifndef atomic_fetch_add | ||
128 | ATOMIC_FETCH_OP(add, +) | ||
129 | #endif | ||
130 | |||
131 | #ifndef atomic_fetch_sub | ||
132 | ATOMIC_FETCH_OP(sub, -) | ||
133 | #endif | ||
134 | |||
135 | #ifndef atomic_fetch_and | ||
136 | ATOMIC_FETCH_OP(and, &) | ||
137 | #endif | ||
138 | |||
139 | #ifndef atomic_fetch_or | ||
140 | ATOMIC_FETCH_OP(or, |) | ||
141 | #endif | ||
142 | |||
143 | #ifndef atomic_fetch_xor | ||
144 | ATOMIC_FETCH_OP(xor, ^) | ||
145 | #endif | ||
146 | |||
101 | #ifndef atomic_and | 147 | #ifndef atomic_and |
102 | ATOMIC_OP(and, &) | 148 | ATOMIC_OP(and, &) |
103 | #endif | 149 | #endif |
@@ -110,6 +156,7 @@ ATOMIC_OP(or, |) | |||
110 | ATOMIC_OP(xor, ^) | 156 | ATOMIC_OP(xor, ^) |
111 | #endif | 157 | #endif |
112 | 158 | ||
159 | #undef ATOMIC_FETCH_OP | ||
113 | #undef ATOMIC_OP_RETURN | 160 | #undef ATOMIC_OP_RETURN |
114 | #undef ATOMIC_OP | 161 | #undef ATOMIC_OP |
115 | 162 | ||
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index d48e78ccad3d..dad68bf46c77 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h | |||
@@ -27,16 +27,23 @@ extern void atomic64_##op(long long a, atomic64_t *v); | |||
27 | #define ATOMIC64_OP_RETURN(op) \ | 27 | #define ATOMIC64_OP_RETURN(op) \ |
28 | extern long long atomic64_##op##_return(long long a, atomic64_t *v); | 28 | extern long long atomic64_##op##_return(long long a, atomic64_t *v); |
29 | 29 | ||
30 | #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) | 30 | #define ATOMIC64_FETCH_OP(op) \ |
31 | extern long long atomic64_fetch_##op(long long a, atomic64_t *v); | ||
32 | |||
33 | #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) | ||
31 | 34 | ||
32 | ATOMIC64_OPS(add) | 35 | ATOMIC64_OPS(add) |
33 | ATOMIC64_OPS(sub) | 36 | ATOMIC64_OPS(sub) |
34 | 37 | ||
35 | ATOMIC64_OP(and) | 38 | #undef ATOMIC64_OPS |
36 | ATOMIC64_OP(or) | 39 | #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op) |
37 | ATOMIC64_OP(xor) | 40 | |
41 | ATOMIC64_OPS(and) | ||
42 | ATOMIC64_OPS(or) | ||
43 | ATOMIC64_OPS(xor) | ||
38 | 44 | ||
39 | #undef ATOMIC64_OPS | 45 | #undef ATOMIC64_OPS |
46 | #undef ATOMIC64_FETCH_OP | ||
40 | #undef ATOMIC64_OP_RETURN | 47 | #undef ATOMIC64_OP_RETURN |
41 | #undef ATOMIC64_OP | 48 | #undef ATOMIC64_OP |
42 | 49 | ||
diff --git a/include/linux/atomic.h b/include/linux/atomic.h index e451534fe54d..12d910d61b83 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h | |||
@@ -163,206 +163,201 @@ | |||
163 | #endif | 163 | #endif |
164 | #endif /* atomic_dec_return_relaxed */ | 164 | #endif /* atomic_dec_return_relaxed */ |
165 | 165 | ||
166 | /* atomic_xchg_relaxed */ | ||
167 | #ifndef atomic_xchg_relaxed | ||
168 | #define atomic_xchg_relaxed atomic_xchg | ||
169 | #define atomic_xchg_acquire atomic_xchg | ||
170 | #define atomic_xchg_release atomic_xchg | ||
171 | 166 | ||
172 | #else /* atomic_xchg_relaxed */ | 167 | /* atomic_fetch_add_relaxed */ |
168 | #ifndef atomic_fetch_add_relaxed | ||
169 | #define atomic_fetch_add_relaxed atomic_fetch_add | ||
170 | #define atomic_fetch_add_acquire atomic_fetch_add | ||
171 | #define atomic_fetch_add_release atomic_fetch_add | ||
173 | 172 | ||
174 | #ifndef atomic_xchg_acquire | 173 | #else /* atomic_fetch_add_relaxed */ |
175 | #define atomic_xchg_acquire(...) \ | ||
176 | __atomic_op_acquire(atomic_xchg, __VA_ARGS__) | ||
177 | #endif | ||
178 | 174 | ||
179 | #ifndef atomic_xchg_release | 175 | #ifndef atomic_fetch_add_acquire |
180 | #define atomic_xchg_release(...) \ | 176 | #define atomic_fetch_add_acquire(...) \ |
181 | __atomic_op_release(atomic_xchg, __VA_ARGS__) | 177 | __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__) |
182 | #endif | 178 | #endif |
183 | 179 | ||
184 | #ifndef atomic_xchg | 180 | #ifndef atomic_fetch_add_release |
185 | #define atomic_xchg(...) \ | 181 | #define atomic_fetch_add_release(...) \ |
186 | __atomic_op_fence(atomic_xchg, __VA_ARGS__) | 182 | __atomic_op_release(atomic_fetch_add, __VA_ARGS__) |
187 | #endif | 183 | #endif |
188 | #endif /* atomic_xchg_relaxed */ | ||
189 | 184 | ||
190 | /* atomic_cmpxchg_relaxed */ | 185 | #ifndef atomic_fetch_add |
191 | #ifndef atomic_cmpxchg_relaxed | 186 | #define atomic_fetch_add(...) \ |
192 | #define atomic_cmpxchg_relaxed atomic_cmpxchg | 187 | __atomic_op_fence(atomic_fetch_add, __VA_ARGS__) |
193 | #define atomic_cmpxchg_acquire atomic_cmpxchg | ||
194 | #define atomic_cmpxchg_release atomic_cmpxchg | ||
195 | |||
196 | #else /* atomic_cmpxchg_relaxed */ | ||
197 | |||
198 | #ifndef atomic_cmpxchg_acquire | ||
199 | #define atomic_cmpxchg_acquire(...) \ | ||
200 | __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) | ||
201 | #endif | 188 | #endif |
189 | #endif /* atomic_fetch_add_relaxed */ | ||
202 | 190 | ||
203 | #ifndef atomic_cmpxchg_release | 191 | /* atomic_fetch_sub_relaxed */ |
204 | #define atomic_cmpxchg_release(...) \ | 192 | #ifndef atomic_fetch_sub_relaxed |
205 | __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) | 193 | #define atomic_fetch_sub_relaxed atomic_fetch_sub |
206 | #endif | 194 | #define atomic_fetch_sub_acquire atomic_fetch_sub |
195 | #define atomic_fetch_sub_release atomic_fetch_sub | ||
207 | 196 | ||
208 | #ifndef atomic_cmpxchg | 197 | #else /* atomic_fetch_sub_relaxed */ |
209 | #define atomic_cmpxchg(...) \ | 198 | |
210 | __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) | 199 | #ifndef atomic_fetch_sub_acquire |
200 | #define atomic_fetch_sub_acquire(...) \ | ||
201 | __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__) | ||
211 | #endif | 202 | #endif |
212 | #endif /* atomic_cmpxchg_relaxed */ | ||
213 | 203 | ||
214 | #ifndef atomic64_read_acquire | 204 | #ifndef atomic_fetch_sub_release |
215 | #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) | 205 | #define atomic_fetch_sub_release(...) \ |
206 | __atomic_op_release(atomic_fetch_sub, __VA_ARGS__) | ||
216 | #endif | 207 | #endif |
217 | 208 | ||
218 | #ifndef atomic64_set_release | 209 | #ifndef atomic_fetch_sub |
219 | #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) | 210 | #define atomic_fetch_sub(...) \ |
211 | __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__) | ||
220 | #endif | 212 | #endif |
213 | #endif /* atomic_fetch_sub_relaxed */ | ||
221 | 214 | ||
222 | /* atomic64_add_return_relaxed */ | 215 | /* atomic_fetch_or_relaxed */ |
223 | #ifndef atomic64_add_return_relaxed | 216 | #ifndef atomic_fetch_or_relaxed |
224 | #define atomic64_add_return_relaxed atomic64_add_return | 217 | #define atomic_fetch_or_relaxed atomic_fetch_or |
225 | #define atomic64_add_return_acquire atomic64_add_return | 218 | #define atomic_fetch_or_acquire atomic_fetch_or |
226 | #define atomic64_add_return_release atomic64_add_return | 219 | #define atomic_fetch_or_release atomic_fetch_or |
227 | 220 | ||
228 | #else /* atomic64_add_return_relaxed */ | 221 | #else /* atomic_fetch_or_relaxed */ |
229 | 222 | ||
230 | #ifndef atomic64_add_return_acquire | 223 | #ifndef atomic_fetch_or_acquire |
231 | #define atomic64_add_return_acquire(...) \ | 224 | #define atomic_fetch_or_acquire(...) \ |
232 | __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) | 225 | __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__) |
233 | #endif | 226 | #endif |
234 | 227 | ||
235 | #ifndef atomic64_add_return_release | 228 | #ifndef atomic_fetch_or_release |
236 | #define atomic64_add_return_release(...) \ | 229 | #define atomic_fetch_or_release(...) \ |
237 | __atomic_op_release(atomic64_add_return, __VA_ARGS__) | 230 | __atomic_op_release(atomic_fetch_or, __VA_ARGS__) |
238 | #endif | 231 | #endif |
239 | 232 | ||
240 | #ifndef atomic64_add_return | 233 | #ifndef atomic_fetch_or |
241 | #define atomic64_add_return(...) \ | 234 | #define atomic_fetch_or(...) \ |
242 | __atomic_op_fence(atomic64_add_return, __VA_ARGS__) | 235 | __atomic_op_fence(atomic_fetch_or, __VA_ARGS__) |
243 | #endif | 236 | #endif |
244 | #endif /* atomic64_add_return_relaxed */ | 237 | #endif /* atomic_fetch_or_relaxed */ |
245 | 238 | ||
246 | /* atomic64_inc_return_relaxed */ | 239 | /* atomic_fetch_and_relaxed */ |
247 | #ifndef atomic64_inc_return_relaxed | 240 | #ifndef atomic_fetch_and_relaxed |
248 | #define atomic64_inc_return_relaxed atomic64_inc_return | 241 | #define atomic_fetch_and_relaxed atomic_fetch_and |
249 | #define atomic64_inc_return_acquire atomic64_inc_return | 242 | #define atomic_fetch_and_acquire atomic_fetch_and |
250 | #define atomic64_inc_return_release atomic64_inc_return | 243 | #define atomic_fetch_and_release atomic_fetch_and |
251 | 244 | ||
252 | #else /* atomic64_inc_return_relaxed */ | 245 | #else /* atomic_fetch_and_relaxed */ |
253 | 246 | ||
254 | #ifndef atomic64_inc_return_acquire | 247 | #ifndef atomic_fetch_and_acquire |
255 | #define atomic64_inc_return_acquire(...) \ | 248 | #define atomic_fetch_and_acquire(...) \ |
256 | __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) | 249 | __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__) |
257 | #endif | 250 | #endif |
258 | 251 | ||
259 | #ifndef atomic64_inc_return_release | 252 | #ifndef atomic_fetch_and_release |
260 | #define atomic64_inc_return_release(...) \ | 253 | #define atomic_fetch_and_release(...) \ |
261 | __atomic_op_release(atomic64_inc_return, __VA_ARGS__) | 254 | __atomic_op_release(atomic_fetch_and, __VA_ARGS__) |
262 | #endif | 255 | #endif |
263 | 256 | ||
264 | #ifndef atomic64_inc_return | 257 | #ifndef atomic_fetch_and |
265 | #define atomic64_inc_return(...) \ | 258 | #define atomic_fetch_and(...) \ |
266 | __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) | 259 | __atomic_op_fence(atomic_fetch_and, __VA_ARGS__) |
267 | #endif | 260 | #endif |
268 | #endif /* atomic64_inc_return_relaxed */ | 261 | #endif /* atomic_fetch_and_relaxed */ |
269 | |||
270 | 262 | ||
271 | /* atomic64_sub_return_relaxed */ | 263 | #ifdef atomic_andnot |
272 | #ifndef atomic64_sub_return_relaxed | 264 | /* atomic_fetch_andnot_relaxed */ |
273 | #define atomic64_sub_return_relaxed atomic64_sub_return | 265 | #ifndef atomic_fetch_andnot_relaxed |
274 | #define atomic64_sub_return_acquire atomic64_sub_return | 266 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot |
275 | #define atomic64_sub_return_release atomic64_sub_return | 267 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot |
268 | #define atomic_fetch_andnot_release atomic_fetch_andnot | ||
276 | 269 | ||
277 | #else /* atomic64_sub_return_relaxed */ | 270 | #else /* atomic_fetch_andnot_relaxed */ |
278 | 271 | ||
279 | #ifndef atomic64_sub_return_acquire | 272 | #ifndef atomic_fetch_andnot_acquire |
280 | #define atomic64_sub_return_acquire(...) \ | 273 | #define atomic_fetch_andnot_acquire(...) \ |
281 | __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) | 274 | __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__) |
282 | #endif | 275 | #endif |
283 | 276 | ||
284 | #ifndef atomic64_sub_return_release | 277 | #ifndef atomic_fetch_andnot_release |
285 | #define atomic64_sub_return_release(...) \ | 278 | #define atomic_fetch_andnot_release(...) \ |
286 | __atomic_op_release(atomic64_sub_return, __VA_ARGS__) | 279 | __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__) |
287 | #endif | 280 | #endif |
288 | 281 | ||
289 | #ifndef atomic64_sub_return | 282 | #ifndef atomic_fetch_andnot |
290 | #define atomic64_sub_return(...) \ | 283 | #define atomic_fetch_andnot(...) \ |
291 | __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) | 284 | __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) |
292 | #endif | 285 | #endif |
293 | #endif /* atomic64_sub_return_relaxed */ | 286 | #endif /* atomic_fetch_andnot_relaxed */ |
287 | #endif /* atomic_andnot */ | ||
294 | 288 | ||
295 | /* atomic64_dec_return_relaxed */ | 289 | /* atomic_fetch_xor_relaxed */ |
296 | #ifndef atomic64_dec_return_relaxed | 290 | #ifndef atomic_fetch_xor_relaxed |
297 | #define atomic64_dec_return_relaxed atomic64_dec_return | 291 | #define atomic_fetch_xor_relaxed atomic_fetch_xor |
298 | #define atomic64_dec_return_acquire atomic64_dec_return | 292 | #define atomic_fetch_xor_acquire atomic_fetch_xor |
299 | #define atomic64_dec_return_release atomic64_dec_return | 293 | #define atomic_fetch_xor_release atomic_fetch_xor |
300 | 294 | ||
301 | #else /* atomic64_dec_return_relaxed */ | 295 | #else /* atomic_fetch_xor_relaxed */ |
302 | 296 | ||
303 | #ifndef atomic64_dec_return_acquire | 297 | #ifndef atomic_fetch_xor_acquire |
304 | #define atomic64_dec_return_acquire(...) \ | 298 | #define atomic_fetch_xor_acquire(...) \ |
305 | __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) | 299 | __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__) |
306 | #endif | 300 | #endif |
307 | 301 | ||
308 | #ifndef atomic64_dec_return_release | 302 | #ifndef atomic_fetch_xor_release |
309 | #define atomic64_dec_return_release(...) \ | 303 | #define atomic_fetch_xor_release(...) \ |
310 | __atomic_op_release(atomic64_dec_return, __VA_ARGS__) | 304 | __atomic_op_release(atomic_fetch_xor, __VA_ARGS__) |
311 | #endif | 305 | #endif |
312 | 306 | ||
313 | #ifndef atomic64_dec_return | 307 | #ifndef atomic_fetch_xor |
314 | #define atomic64_dec_return(...) \ | 308 | #define atomic_fetch_xor(...) \ |
315 | __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) | 309 | __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__) |
316 | #endif | 310 | #endif |
317 | #endif /* atomic64_dec_return_relaxed */ | 311 | #endif /* atomic_fetch_xor_relaxed */ |
318 | 312 | ||
319 | /* atomic64_xchg_relaxed */ | ||
320 | #ifndef atomic64_xchg_relaxed | ||
321 | #define atomic64_xchg_relaxed atomic64_xchg | ||
322 | #define atomic64_xchg_acquire atomic64_xchg | ||
323 | #define atomic64_xchg_release atomic64_xchg | ||
324 | 313 | ||
325 | #else /* atomic64_xchg_relaxed */ | 314 | /* atomic_xchg_relaxed */ |
315 | #ifndef atomic_xchg_relaxed | ||
316 | #define atomic_xchg_relaxed atomic_xchg | ||
317 | #define atomic_xchg_acquire atomic_xchg | ||
318 | #define atomic_xchg_release atomic_xchg | ||
326 | 319 | ||
327 | #ifndef atomic64_xchg_acquire | 320 | #else /* atomic_xchg_relaxed */ |
328 | #define atomic64_xchg_acquire(...) \ | 321 | |
329 | __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) | 322 | #ifndef atomic_xchg_acquire |
323 | #define atomic_xchg_acquire(...) \ | ||
324 | __atomic_op_acquire(atomic_xchg, __VA_ARGS__) | ||
330 | #endif | 325 | #endif |
331 | 326 | ||
332 | #ifndef atomic64_xchg_release | 327 | #ifndef atomic_xchg_release |
333 | #define atomic64_xchg_release(...) \ | 328 | #define atomic_xchg_release(...) \ |
334 | __atomic_op_release(atomic64_xchg, __VA_ARGS__) | 329 | __atomic_op_release(atomic_xchg, __VA_ARGS__) |
335 | #endif | 330 | #endif |
336 | 331 | ||
337 | #ifndef atomic64_xchg | 332 | #ifndef atomic_xchg |
338 | #define atomic64_xchg(...) \ | 333 | #define atomic_xchg(...) \ |
339 | __atomic_op_fence(atomic64_xchg, __VA_ARGS__) | 334 | __atomic_op_fence(atomic_xchg, __VA_ARGS__) |
340 | #endif | 335 | #endif |
341 | #endif /* atomic64_xchg_relaxed */ | 336 | #endif /* atomic_xchg_relaxed */ |
342 | 337 | ||
343 | /* atomic64_cmpxchg_relaxed */ | 338 | /* atomic_cmpxchg_relaxed */ |
344 | #ifndef atomic64_cmpxchg_relaxed | 339 | #ifndef atomic_cmpxchg_relaxed |
345 | #define atomic64_cmpxchg_relaxed atomic64_cmpxchg | 340 | #define atomic_cmpxchg_relaxed atomic_cmpxchg |
346 | #define atomic64_cmpxchg_acquire atomic64_cmpxchg | 341 | #define atomic_cmpxchg_acquire atomic_cmpxchg |
347 | #define atomic64_cmpxchg_release atomic64_cmpxchg | 342 | #define atomic_cmpxchg_release atomic_cmpxchg |
348 | 343 | ||
349 | #else /* atomic64_cmpxchg_relaxed */ | 344 | #else /* atomic_cmpxchg_relaxed */ |
350 | 345 | ||
351 | #ifndef atomic64_cmpxchg_acquire | 346 | #ifndef atomic_cmpxchg_acquire |
352 | #define atomic64_cmpxchg_acquire(...) \ | 347 | #define atomic_cmpxchg_acquire(...) \ |
353 | __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) | 348 | __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) |
354 | #endif | 349 | #endif |
355 | 350 | ||
356 | #ifndef atomic64_cmpxchg_release | 351 | #ifndef atomic_cmpxchg_release |
357 | #define atomic64_cmpxchg_release(...) \ | 352 | #define atomic_cmpxchg_release(...) \ |
358 | __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) | 353 | __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) |
359 | #endif | 354 | #endif |
360 | 355 | ||
361 | #ifndef atomic64_cmpxchg | 356 | #ifndef atomic_cmpxchg |
362 | #define atomic64_cmpxchg(...) \ | 357 | #define atomic_cmpxchg(...) \ |
363 | __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) | 358 | __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) |
364 | #endif | 359 | #endif |
365 | #endif /* atomic64_cmpxchg_relaxed */ | 360 | #endif /* atomic_cmpxchg_relaxed */ |
366 | 361 | ||
367 | /* cmpxchg_relaxed */ | 362 | /* cmpxchg_relaxed */ |
368 | #ifndef cmpxchg_relaxed | 363 | #ifndef cmpxchg_relaxed |
@@ -463,17 +458,27 @@ static inline void atomic_andnot(int i, atomic_t *v) | |||
463 | { | 458 | { |
464 | atomic_and(~i, v); | 459 | atomic_and(~i, v); |
465 | } | 460 | } |
466 | #endif | ||
467 | 461 | ||
468 | static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) | 462 | static inline int atomic_fetch_andnot(int i, atomic_t *v) |
463 | { | ||
464 | return atomic_fetch_and(~i, v); | ||
465 | } | ||
466 | |||
467 | static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) | ||
468 | { | ||
469 | return atomic_fetch_and_relaxed(~i, v); | ||
470 | } | ||
471 | |||
472 | static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) | ||
469 | { | 473 | { |
470 | atomic_andnot(mask, v); | 474 | return atomic_fetch_and_acquire(~i, v); |
471 | } | 475 | } |
472 | 476 | ||
473 | static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) | 477 | static inline int atomic_fetch_andnot_release(int i, atomic_t *v) |
474 | { | 478 | { |
475 | atomic_or(mask, v); | 479 | return atomic_fetch_and_release(~i, v); |
476 | } | 480 | } |
481 | #endif | ||
477 | 482 | ||
478 | /** | 483 | /** |
479 | * atomic_inc_not_zero_hint - increment if not null | 484 | * atomic_inc_not_zero_hint - increment if not null |
@@ -558,36 +563,336 @@ static inline int atomic_dec_if_positive(atomic_t *v) | |||
558 | } | 563 | } |
559 | #endif | 564 | #endif |
560 | 565 | ||
561 | /** | 566 | #ifdef CONFIG_GENERIC_ATOMIC64 |
562 | * atomic_fetch_or - perform *p |= mask and return old value of *p | 567 | #include <asm-generic/atomic64.h> |
563 | * @mask: mask to OR on the atomic_t | 568 | #endif |
564 | * @p: pointer to atomic_t | ||
565 | */ | ||
566 | #ifndef atomic_fetch_or | ||
567 | static inline int atomic_fetch_or(int mask, atomic_t *p) | ||
568 | { | ||
569 | int old, val = atomic_read(p); | ||
570 | 569 | ||
571 | for (;;) { | 570 | #ifndef atomic64_read_acquire |
572 | old = atomic_cmpxchg(p, val, val | mask); | 571 | #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) |
573 | if (old == val) | 572 | #endif |
574 | break; | ||
575 | val = old; | ||
576 | } | ||
577 | 573 | ||
578 | return old; | 574 | #ifndef atomic64_set_release |
579 | } | 575 | #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) |
580 | #endif | 576 | #endif |
581 | 577 | ||
582 | #ifdef CONFIG_GENERIC_ATOMIC64 | 578 | /* atomic64_add_return_relaxed */ |
583 | #include <asm-generic/atomic64.h> | 579 | #ifndef atomic64_add_return_relaxed |
580 | #define atomic64_add_return_relaxed atomic64_add_return | ||
581 | #define atomic64_add_return_acquire atomic64_add_return | ||
582 | #define atomic64_add_return_release atomic64_add_return | ||
583 | |||
584 | #else /* atomic64_add_return_relaxed */ | ||
585 | |||
586 | #ifndef atomic64_add_return_acquire | ||
587 | #define atomic64_add_return_acquire(...) \ | ||
588 | __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) | ||
589 | #endif | ||
590 | |||
591 | #ifndef atomic64_add_return_release | ||
592 | #define atomic64_add_return_release(...) \ | ||
593 | __atomic_op_release(atomic64_add_return, __VA_ARGS__) | ||
594 | #endif | ||
595 | |||
596 | #ifndef atomic64_add_return | ||
597 | #define atomic64_add_return(...) \ | ||
598 | __atomic_op_fence(atomic64_add_return, __VA_ARGS__) | ||
584 | #endif | 599 | #endif |
600 | #endif /* atomic64_add_return_relaxed */ | ||
601 | |||
602 | /* atomic64_inc_return_relaxed */ | ||
603 | #ifndef atomic64_inc_return_relaxed | ||
604 | #define atomic64_inc_return_relaxed atomic64_inc_return | ||
605 | #define atomic64_inc_return_acquire atomic64_inc_return | ||
606 | #define atomic64_inc_return_release atomic64_inc_return | ||
607 | |||
608 | #else /* atomic64_inc_return_relaxed */ | ||
609 | |||
610 | #ifndef atomic64_inc_return_acquire | ||
611 | #define atomic64_inc_return_acquire(...) \ | ||
612 | __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) | ||
613 | #endif | ||
614 | |||
615 | #ifndef atomic64_inc_return_release | ||
616 | #define atomic64_inc_return_release(...) \ | ||
617 | __atomic_op_release(atomic64_inc_return, __VA_ARGS__) | ||
618 | #endif | ||
619 | |||
620 | #ifndef atomic64_inc_return | ||
621 | #define atomic64_inc_return(...) \ | ||
622 | __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) | ||
623 | #endif | ||
624 | #endif /* atomic64_inc_return_relaxed */ | ||
625 | |||
626 | |||
627 | /* atomic64_sub_return_relaxed */ | ||
628 | #ifndef atomic64_sub_return_relaxed | ||
629 | #define atomic64_sub_return_relaxed atomic64_sub_return | ||
630 | #define atomic64_sub_return_acquire atomic64_sub_return | ||
631 | #define atomic64_sub_return_release atomic64_sub_return | ||
632 | |||
633 | #else /* atomic64_sub_return_relaxed */ | ||
634 | |||
635 | #ifndef atomic64_sub_return_acquire | ||
636 | #define atomic64_sub_return_acquire(...) \ | ||
637 | __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) | ||
638 | #endif | ||
639 | |||
640 | #ifndef atomic64_sub_return_release | ||
641 | #define atomic64_sub_return_release(...) \ | ||
642 | __atomic_op_release(atomic64_sub_return, __VA_ARGS__) | ||
643 | #endif | ||
644 | |||
645 | #ifndef atomic64_sub_return | ||
646 | #define atomic64_sub_return(...) \ | ||
647 | __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) | ||
648 | #endif | ||
649 | #endif /* atomic64_sub_return_relaxed */ | ||
650 | |||
651 | /* atomic64_dec_return_relaxed */ | ||
652 | #ifndef atomic64_dec_return_relaxed | ||
653 | #define atomic64_dec_return_relaxed atomic64_dec_return | ||
654 | #define atomic64_dec_return_acquire atomic64_dec_return | ||
655 | #define atomic64_dec_return_release atomic64_dec_return | ||
656 | |||
657 | #else /* atomic64_dec_return_relaxed */ | ||
658 | |||
659 | #ifndef atomic64_dec_return_acquire | ||
660 | #define atomic64_dec_return_acquire(...) \ | ||
661 | __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) | ||
662 | #endif | ||
663 | |||
664 | #ifndef atomic64_dec_return_release | ||
665 | #define atomic64_dec_return_release(...) \ | ||
666 | __atomic_op_release(atomic64_dec_return, __VA_ARGS__) | ||
667 | #endif | ||
668 | |||
669 | #ifndef atomic64_dec_return | ||
670 | #define atomic64_dec_return(...) \ | ||
671 | __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) | ||
672 | #endif | ||
673 | #endif /* atomic64_dec_return_relaxed */ | ||
674 | |||
675 | |||
676 | /* atomic64_fetch_add_relaxed */ | ||
677 | #ifndef atomic64_fetch_add_relaxed | ||
678 | #define atomic64_fetch_add_relaxed atomic64_fetch_add | ||
679 | #define atomic64_fetch_add_acquire atomic64_fetch_add | ||
680 | #define atomic64_fetch_add_release atomic64_fetch_add | ||
681 | |||
682 | #else /* atomic64_fetch_add_relaxed */ | ||
683 | |||
684 | #ifndef atomic64_fetch_add_acquire | ||
685 | #define atomic64_fetch_add_acquire(...) \ | ||
686 | __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__) | ||
687 | #endif | ||
688 | |||
689 | #ifndef atomic64_fetch_add_release | ||
690 | #define atomic64_fetch_add_release(...) \ | ||
691 | __atomic_op_release(atomic64_fetch_add, __VA_ARGS__) | ||
692 | #endif | ||
693 | |||
694 | #ifndef atomic64_fetch_add | ||
695 | #define atomic64_fetch_add(...) \ | ||
696 | __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__) | ||
697 | #endif | ||
698 | #endif /* atomic64_fetch_add_relaxed */ | ||
699 | |||
700 | /* atomic64_fetch_sub_relaxed */ | ||
701 | #ifndef atomic64_fetch_sub_relaxed | ||
702 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub | ||
703 | #define atomic64_fetch_sub_acquire atomic64_fetch_sub | ||
704 | #define atomic64_fetch_sub_release atomic64_fetch_sub | ||
705 | |||
706 | #else /* atomic64_fetch_sub_relaxed */ | ||
707 | |||
708 | #ifndef atomic64_fetch_sub_acquire | ||
709 | #define atomic64_fetch_sub_acquire(...) \ | ||
710 | __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__) | ||
711 | #endif | ||
712 | |||
713 | #ifndef atomic64_fetch_sub_release | ||
714 | #define atomic64_fetch_sub_release(...) \ | ||
715 | __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__) | ||
716 | #endif | ||
717 | |||
718 | #ifndef atomic64_fetch_sub | ||
719 | #define atomic64_fetch_sub(...) \ | ||
720 | __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__) | ||
721 | #endif | ||
722 | #endif /* atomic64_fetch_sub_relaxed */ | ||
723 | |||
724 | /* atomic64_fetch_or_relaxed */ | ||
725 | #ifndef atomic64_fetch_or_relaxed | ||
726 | #define atomic64_fetch_or_relaxed atomic64_fetch_or | ||
727 | #define atomic64_fetch_or_acquire atomic64_fetch_or | ||
728 | #define atomic64_fetch_or_release atomic64_fetch_or | ||
729 | |||
730 | #else /* atomic64_fetch_or_relaxed */ | ||
731 | |||
732 | #ifndef atomic64_fetch_or_acquire | ||
733 | #define atomic64_fetch_or_acquire(...) \ | ||
734 | __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__) | ||
735 | #endif | ||
736 | |||
737 | #ifndef atomic64_fetch_or_release | ||
738 | #define atomic64_fetch_or_release(...) \ | ||
739 | __atomic_op_release(atomic64_fetch_or, __VA_ARGS__) | ||
740 | #endif | ||
741 | |||
742 | #ifndef atomic64_fetch_or | ||
743 | #define atomic64_fetch_or(...) \ | ||
744 | __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__) | ||
745 | #endif | ||
746 | #endif /* atomic64_fetch_or_relaxed */ | ||
747 | |||
748 | /* atomic64_fetch_and_relaxed */ | ||
749 | #ifndef atomic64_fetch_and_relaxed | ||
750 | #define atomic64_fetch_and_relaxed atomic64_fetch_and | ||
751 | #define atomic64_fetch_and_acquire atomic64_fetch_and | ||
752 | #define atomic64_fetch_and_release atomic64_fetch_and | ||
753 | |||
754 | #else /* atomic64_fetch_and_relaxed */ | ||
755 | |||
756 | #ifndef atomic64_fetch_and_acquire | ||
757 | #define atomic64_fetch_and_acquire(...) \ | ||
758 | __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__) | ||
759 | #endif | ||
760 | |||
761 | #ifndef atomic64_fetch_and_release | ||
762 | #define atomic64_fetch_and_release(...) \ | ||
763 | __atomic_op_release(atomic64_fetch_and, __VA_ARGS__) | ||
764 | #endif | ||
765 | |||
766 | #ifndef atomic64_fetch_and | ||
767 | #define atomic64_fetch_and(...) \ | ||
768 | __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__) | ||
769 | #endif | ||
770 | #endif /* atomic64_fetch_and_relaxed */ | ||
771 | |||
772 | #ifdef atomic64_andnot | ||
773 | /* atomic64_fetch_andnot_relaxed */ | ||
774 | #ifndef atomic64_fetch_andnot_relaxed | ||
775 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot | ||
776 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot | ||
777 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot | ||
778 | |||
779 | #else /* atomic64_fetch_andnot_relaxed */ | ||
780 | |||
781 | #ifndef atomic64_fetch_andnot_acquire | ||
782 | #define atomic64_fetch_andnot_acquire(...) \ | ||
783 | __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__) | ||
784 | #endif | ||
785 | |||
786 | #ifndef atomic64_fetch_andnot_release | ||
787 | #define atomic64_fetch_andnot_release(...) \ | ||
788 | __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__) | ||
789 | #endif | ||
790 | |||
791 | #ifndef atomic64_fetch_andnot | ||
792 | #define atomic64_fetch_andnot(...) \ | ||
793 | __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) | ||
794 | #endif | ||
795 | #endif /* atomic64_fetch_andnot_relaxed */ | ||
796 | #endif /* atomic64_andnot */ | ||
797 | |||
798 | /* atomic64_fetch_xor_relaxed */ | ||
799 | #ifndef atomic64_fetch_xor_relaxed | ||
800 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor | ||
801 | #define atomic64_fetch_xor_acquire atomic64_fetch_xor | ||
802 | #define atomic64_fetch_xor_release atomic64_fetch_xor | ||
803 | |||
804 | #else /* atomic64_fetch_xor_relaxed */ | ||
805 | |||
806 | #ifndef atomic64_fetch_xor_acquire | ||
807 | #define atomic64_fetch_xor_acquire(...) \ | ||
808 | __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__) | ||
809 | #endif | ||
810 | |||
811 | #ifndef atomic64_fetch_xor_release | ||
812 | #define atomic64_fetch_xor_release(...) \ | ||
813 | __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__) | ||
814 | #endif | ||
815 | |||
816 | #ifndef atomic64_fetch_xor | ||
817 | #define atomic64_fetch_xor(...) \ | ||
818 | __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__) | ||
819 | #endif | ||
820 | #endif /* atomic64_fetch_xor_relaxed */ | ||
821 | |||
822 | |||
823 | /* atomic64_xchg_relaxed */ | ||
824 | #ifndef atomic64_xchg_relaxed | ||
825 | #define atomic64_xchg_relaxed atomic64_xchg | ||
826 | #define atomic64_xchg_acquire atomic64_xchg | ||
827 | #define atomic64_xchg_release atomic64_xchg | ||
828 | |||
829 | #else /* atomic64_xchg_relaxed */ | ||
830 | |||
831 | #ifndef atomic64_xchg_acquire | ||
832 | #define atomic64_xchg_acquire(...) \ | ||
833 | __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) | ||
834 | #endif | ||
835 | |||
836 | #ifndef atomic64_xchg_release | ||
837 | #define atomic64_xchg_release(...) \ | ||
838 | __atomic_op_release(atomic64_xchg, __VA_ARGS__) | ||
839 | #endif | ||
840 | |||
841 | #ifndef atomic64_xchg | ||
842 | #define atomic64_xchg(...) \ | ||
843 | __atomic_op_fence(atomic64_xchg, __VA_ARGS__) | ||
844 | #endif | ||
845 | #endif /* atomic64_xchg_relaxed */ | ||
846 | |||
847 | /* atomic64_cmpxchg_relaxed */ | ||
848 | #ifndef atomic64_cmpxchg_relaxed | ||
849 | #define atomic64_cmpxchg_relaxed atomic64_cmpxchg | ||
850 | #define atomic64_cmpxchg_acquire atomic64_cmpxchg | ||
851 | #define atomic64_cmpxchg_release atomic64_cmpxchg | ||
852 | |||
853 | #else /* atomic64_cmpxchg_relaxed */ | ||
854 | |||
855 | #ifndef atomic64_cmpxchg_acquire | ||
856 | #define atomic64_cmpxchg_acquire(...) \ | ||
857 | __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) | ||
858 | #endif | ||
859 | |||
860 | #ifndef atomic64_cmpxchg_release | ||
861 | #define atomic64_cmpxchg_release(...) \ | ||
862 | __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) | ||
863 | #endif | ||
864 | |||
865 | #ifndef atomic64_cmpxchg | ||
866 | #define atomic64_cmpxchg(...) \ | ||
867 | __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) | ||
868 | #endif | ||
869 | #endif /* atomic64_cmpxchg_relaxed */ | ||
585 | 870 | ||
586 | #ifndef atomic64_andnot | 871 | #ifndef atomic64_andnot |
587 | static inline void atomic64_andnot(long long i, atomic64_t *v) | 872 | static inline void atomic64_andnot(long long i, atomic64_t *v) |
588 | { | 873 | { |
589 | atomic64_and(~i, v); | 874 | atomic64_and(~i, v); |
590 | } | 875 | } |
876 | |||
877 | static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v) | ||
878 | { | ||
879 | return atomic64_fetch_and(~i, v); | ||
880 | } | ||
881 | |||
882 | static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v) | ||
883 | { | ||
884 | return atomic64_fetch_and_relaxed(~i, v); | ||
885 | } | ||
886 | |||
887 | static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v) | ||
888 | { | ||
889 | return atomic64_fetch_and_acquire(~i, v); | ||
890 | } | ||
891 | |||
892 | static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v) | ||
893 | { | ||
894 | return atomic64_fetch_and_release(~i, v); | ||
895 | } | ||
591 | #endif | 896 | #endif |
592 | 897 | ||
593 | #include <asm-generic/atomic-long.h> | 898 | #include <asm-generic/atomic-long.h> |
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index fec082338668..19248ddf37ce 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c | |||
@@ -93,7 +93,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) | |||
93 | * that accesses can't leak upwards out of our subsequent critical | 93 | * that accesses can't leak upwards out of our subsequent critical |
94 | * section in the case that the lock is currently held for write. | 94 | * section in the case that the lock is currently held for write. |
95 | */ | 95 | */ |
96 | cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts) - _QR_BIAS; | 96 | cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts); |
97 | rspin_until_writer_unlock(lock, cnts); | 97 | rspin_until_writer_unlock(lock, cnts); |
98 | 98 | ||
99 | /* | 99 | /* |
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 21ede57f68b3..37649e69056c 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h | |||
@@ -112,12 +112,12 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock) | |||
112 | #else /* _Q_PENDING_BITS == 8 */ | 112 | #else /* _Q_PENDING_BITS == 8 */ |
113 | static __always_inline void set_pending(struct qspinlock *lock) | 113 | static __always_inline void set_pending(struct qspinlock *lock) |
114 | { | 114 | { |
115 | atomic_set_mask(_Q_PENDING_VAL, &lock->val); | 115 | atomic_or(_Q_PENDING_VAL, &lock->val); |
116 | } | 116 | } |
117 | 117 | ||
118 | static __always_inline void clear_pending(struct qspinlock *lock) | 118 | static __always_inline void clear_pending(struct qspinlock *lock) |
119 | { | 119 | { |
120 | atomic_clear_mask(_Q_PENDING_VAL, &lock->val); | 120 | atomic_andnot(_Q_PENDING_VAL, &lock->val); |
121 | } | 121 | } |
122 | 122 | ||
123 | static __always_inline int trylock_clear_pending(struct qspinlock *lock) | 123 | static __always_inline int trylock_clear_pending(struct qspinlock *lock) |
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 2031281bb940..447e08de1fab 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c | |||
@@ -153,7 +153,7 @@ __rwsem_mark_wake(struct rw_semaphore *sem, | |||
153 | if (wake_type != RWSEM_WAKE_READ_OWNED) { | 153 | if (wake_type != RWSEM_WAKE_READ_OWNED) { |
154 | adjustment = RWSEM_ACTIVE_READ_BIAS; | 154 | adjustment = RWSEM_ACTIVE_READ_BIAS; |
155 | try_reader_grant: | 155 | try_reader_grant: |
156 | oldcount = atomic_long_add_return(adjustment, &sem->count) - adjustment; | 156 | oldcount = atomic_long_fetch_add(adjustment, &sem->count); |
157 | 157 | ||
158 | if (unlikely(oldcount < RWSEM_WAITING_BIAS)) { | 158 | if (unlikely(oldcount < RWSEM_WAITING_BIAS)) { |
159 | /* | 159 | /* |
diff --git a/lib/atomic64.c b/lib/atomic64.c index 2886ebac6567..53c2d5edc826 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
@@ -96,17 +96,41 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \ | |||
96 | } \ | 96 | } \ |
97 | EXPORT_SYMBOL(atomic64_##op##_return); | 97 | EXPORT_SYMBOL(atomic64_##op##_return); |
98 | 98 | ||
99 | #define ATOMIC64_FETCH_OP(op, c_op) \ | ||
100 | long long atomic64_fetch_##op(long long a, atomic64_t *v) \ | ||
101 | { \ | ||
102 | unsigned long flags; \ | ||
103 | raw_spinlock_t *lock = lock_addr(v); \ | ||
104 | long long val; \ | ||
105 | \ | ||
106 | raw_spin_lock_irqsave(lock, flags); \ | ||
107 | val = v->counter; \ | ||
108 | v->counter c_op a; \ | ||
109 | raw_spin_unlock_irqrestore(lock, flags); \ | ||
110 | return val; \ | ||
111 | } \ | ||
112 | EXPORT_SYMBOL(atomic64_fetch_##op); | ||
113 | |||
99 | #define ATOMIC64_OPS(op, c_op) \ | 114 | #define ATOMIC64_OPS(op, c_op) \ |
100 | ATOMIC64_OP(op, c_op) \ | 115 | ATOMIC64_OP(op, c_op) \ |
101 | ATOMIC64_OP_RETURN(op, c_op) | 116 | ATOMIC64_OP_RETURN(op, c_op) \ |
117 | ATOMIC64_FETCH_OP(op, c_op) | ||
102 | 118 | ||
103 | ATOMIC64_OPS(add, +=) | 119 | ATOMIC64_OPS(add, +=) |
104 | ATOMIC64_OPS(sub, -=) | 120 | ATOMIC64_OPS(sub, -=) |
105 | ATOMIC64_OP(and, &=) | ||
106 | ATOMIC64_OP(or, |=) | ||
107 | ATOMIC64_OP(xor, ^=) | ||
108 | 121 | ||
109 | #undef ATOMIC64_OPS | 122 | #undef ATOMIC64_OPS |
123 | #define ATOMIC64_OPS(op, c_op) \ | ||
124 | ATOMIC64_OP(op, c_op) \ | ||
125 | ATOMIC64_OP_RETURN(op, c_op) \ | ||
126 | ATOMIC64_FETCH_OP(op, c_op) | ||
127 | |||
128 | ATOMIC64_OPS(and, &=) | ||
129 | ATOMIC64_OPS(or, |=) | ||
130 | ATOMIC64_OPS(xor, ^=) | ||
131 | |||
132 | #undef ATOMIC64_OPS | ||
133 | #undef ATOMIC64_FETCH_OP | ||
110 | #undef ATOMIC64_OP_RETURN | 134 | #undef ATOMIC64_OP_RETURN |
111 | #undef ATOMIC64_OP | 135 | #undef ATOMIC64_OP |
112 | 136 | ||
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index 123481814320..dbb369145dda 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c | |||
@@ -53,11 +53,25 @@ do { \ | |||
53 | BUG_ON(atomic##bit##_read(&v) != r); \ | 53 | BUG_ON(atomic##bit##_read(&v) != r); \ |
54 | } while (0) | 54 | } while (0) |
55 | 55 | ||
56 | #define TEST_FETCH(bit, op, c_op, val) \ | ||
57 | do { \ | ||
58 | atomic##bit##_set(&v, v0); \ | ||
59 | r = v0; \ | ||
60 | r c_op val; \ | ||
61 | BUG_ON(atomic##bit##_##op(val, &v) != v0); \ | ||
62 | BUG_ON(atomic##bit##_read(&v) != r); \ | ||
63 | } while (0) | ||
64 | |||
56 | #define RETURN_FAMILY_TEST(bit, op, c_op, val) \ | 65 | #define RETURN_FAMILY_TEST(bit, op, c_op, val) \ |
57 | do { \ | 66 | do { \ |
58 | FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \ | 67 | FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \ |
59 | } while (0) | 68 | } while (0) |
60 | 69 | ||
70 | #define FETCH_FAMILY_TEST(bit, op, c_op, val) \ | ||
71 | do { \ | ||
72 | FAMILY_TEST(TEST_FETCH, bit, op, c_op, val); \ | ||
73 | } while (0) | ||
74 | |||
61 | #define TEST_ARGS(bit, op, init, ret, expect, args...) \ | 75 | #define TEST_ARGS(bit, op, init, ret, expect, args...) \ |
62 | do { \ | 76 | do { \ |
63 | atomic##bit##_set(&v, init); \ | 77 | atomic##bit##_set(&v, init); \ |
@@ -114,6 +128,16 @@ static __init void test_atomic(void) | |||
114 | RETURN_FAMILY_TEST(, sub_return, -=, onestwos); | 128 | RETURN_FAMILY_TEST(, sub_return, -=, onestwos); |
115 | RETURN_FAMILY_TEST(, sub_return, -=, -one); | 129 | RETURN_FAMILY_TEST(, sub_return, -=, -one); |
116 | 130 | ||
131 | FETCH_FAMILY_TEST(, fetch_add, +=, onestwos); | ||
132 | FETCH_FAMILY_TEST(, fetch_add, +=, -one); | ||
133 | FETCH_FAMILY_TEST(, fetch_sub, -=, onestwos); | ||
134 | FETCH_FAMILY_TEST(, fetch_sub, -=, -one); | ||
135 | |||
136 | FETCH_FAMILY_TEST(, fetch_or, |=, v1); | ||
137 | FETCH_FAMILY_TEST(, fetch_and, &=, v1); | ||
138 | FETCH_FAMILY_TEST(, fetch_andnot, &= ~, v1); | ||
139 | FETCH_FAMILY_TEST(, fetch_xor, ^=, v1); | ||
140 | |||
117 | INC_RETURN_FAMILY_TEST(, v0); | 141 | INC_RETURN_FAMILY_TEST(, v0); |
118 | DEC_RETURN_FAMILY_TEST(, v0); | 142 | DEC_RETURN_FAMILY_TEST(, v0); |
119 | 143 | ||
@@ -154,6 +178,16 @@ static __init void test_atomic64(void) | |||
154 | RETURN_FAMILY_TEST(64, sub_return, -=, onestwos); | 178 | RETURN_FAMILY_TEST(64, sub_return, -=, onestwos); |
155 | RETURN_FAMILY_TEST(64, sub_return, -=, -one); | 179 | RETURN_FAMILY_TEST(64, sub_return, -=, -one); |
156 | 180 | ||
181 | FETCH_FAMILY_TEST(64, fetch_add, +=, onestwos); | ||
182 | FETCH_FAMILY_TEST(64, fetch_add, +=, -one); | ||
183 | FETCH_FAMILY_TEST(64, fetch_sub, -=, onestwos); | ||
184 | FETCH_FAMILY_TEST(64, fetch_sub, -=, -one); | ||
185 | |||
186 | FETCH_FAMILY_TEST(64, fetch_or, |=, v1); | ||
187 | FETCH_FAMILY_TEST(64, fetch_and, &=, v1); | ||
188 | FETCH_FAMILY_TEST(64, fetch_andnot, &= ~, v1); | ||
189 | FETCH_FAMILY_TEST(64, fetch_xor, ^=, v1); | ||
190 | |||
157 | INIT(v0); | 191 | INIT(v0); |
158 | atomic64_inc(&v); | 192 | atomic64_inc(&v); |
159 | r += one; | 193 | r += one; |