aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/atomic.h')
-rw-r--r--arch/arm/include/asm/atomic.h108
1 files changed, 46 insertions, 62 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index da1c77d39327..62d2cb53b069 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -12,6 +12,7 @@
12#define __ASM_ARM_ATOMIC_H 12#define __ASM_ARM_ATOMIC_H
13 13
14#include <linux/compiler.h> 14#include <linux/compiler.h>
15#include <linux/prefetch.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/irqflags.h> 17#include <linux/irqflags.h>
17#include <asm/barrier.h> 18#include <asm/barrier.h>
@@ -41,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v)
41 unsigned long tmp; 42 unsigned long tmp;
42 int result; 43 int result;
43 44
45 prefetchw(&v->counter);
44 __asm__ __volatile__("@ atomic_add\n" 46 __asm__ __volatile__("@ atomic_add\n"
45"1: ldrex %0, [%3]\n" 47"1: ldrex %0, [%3]\n"
46" add %0, %0, %4\n" 48" add %0, %0, %4\n"
@@ -79,6 +81,7 @@ static inline void atomic_sub(int i, atomic_t *v)
79 unsigned long tmp; 81 unsigned long tmp;
80 int result; 82 int result;
81 83
84 prefetchw(&v->counter);
82 __asm__ __volatile__("@ atomic_sub\n" 85 __asm__ __volatile__("@ atomic_sub\n"
83"1: ldrex %0, [%3]\n" 86"1: ldrex %0, [%3]\n"
84" sub %0, %0, %4\n" 87" sub %0, %0, %4\n"
@@ -114,7 +117,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
114 117
115static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) 118static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
116{ 119{
117 unsigned long oldval, res; 120 int oldval;
121 unsigned long res;
118 122
119 smp_mb(); 123 smp_mb();
120 124
@@ -134,21 +138,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
134 return oldval; 138 return oldval;
135} 139}
136 140
137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
138{
139 unsigned long tmp, tmp2;
140
141 __asm__ __volatile__("@ atomic_clear_mask\n"
142"1: ldrex %0, [%3]\n"
143" bic %0, %0, %4\n"
144" strex %1, %0, [%3]\n"
145" teq %1, #0\n"
146" bne 1b"
147 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
148 : "r" (addr), "Ir" (mask)
149 : "cc");
150}
151
152#else /* ARM_ARCH_6 */ 141#else /* ARM_ARCH_6 */
153 142
154#ifdef CONFIG_SMP 143#ifdef CONFIG_SMP
@@ -197,15 +186,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
197 return ret; 186 return ret;
198} 187}
199 188
200static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
201{
202 unsigned long flags;
203
204 raw_local_irq_save(flags);
205 *addr &= ~mask;
206 raw_local_irq_restore(flags);
207}
208
209#endif /* __LINUX_ARM_ARCH__ */ 189#endif /* __LINUX_ARM_ARCH__ */
210 190
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 191#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -238,15 +218,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
238 218
239#ifndef CONFIG_GENERIC_ATOMIC64 219#ifndef CONFIG_GENERIC_ATOMIC64
240typedef struct { 220typedef struct {
241 u64 __aligned(8) counter; 221 long long counter;
242} atomic64_t; 222} atomic64_t;
243 223
244#define ATOMIC64_INIT(i) { (i) } 224#define ATOMIC64_INIT(i) { (i) }
245 225
246#ifdef CONFIG_ARM_LPAE 226#ifdef CONFIG_ARM_LPAE
247static inline u64 atomic64_read(const atomic64_t *v) 227static inline long long atomic64_read(const atomic64_t *v)
248{ 228{
249 u64 result; 229 long long result;
250 230
251 __asm__ __volatile__("@ atomic64_read\n" 231 __asm__ __volatile__("@ atomic64_read\n"
252" ldrd %0, %H0, [%1]" 232" ldrd %0, %H0, [%1]"
@@ -257,7 +237,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
257 return result; 237 return result;
258} 238}
259 239
260static inline void atomic64_set(atomic64_t *v, u64 i) 240static inline void atomic64_set(atomic64_t *v, long long i)
261{ 241{
262 __asm__ __volatile__("@ atomic64_set\n" 242 __asm__ __volatile__("@ atomic64_set\n"
263" strd %2, %H2, [%1]" 243" strd %2, %H2, [%1]"
@@ -266,9 +246,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
266 ); 246 );
267} 247}
268#else 248#else
269static inline u64 atomic64_read(const atomic64_t *v) 249static inline long long atomic64_read(const atomic64_t *v)
270{ 250{
271 u64 result; 251 long long result;
272 252
273 __asm__ __volatile__("@ atomic64_read\n" 253 __asm__ __volatile__("@ atomic64_read\n"
274" ldrexd %0, %H0, [%1]" 254" ldrexd %0, %H0, [%1]"
@@ -279,10 +259,11 @@ static inline u64 atomic64_read(const atomic64_t *v)
279 return result; 259 return result;
280} 260}
281 261
282static inline void atomic64_set(atomic64_t *v, u64 i) 262static inline void atomic64_set(atomic64_t *v, long long i)
283{ 263{
284 u64 tmp; 264 long long tmp;
285 265
266 prefetchw(&v->counter);
286 __asm__ __volatile__("@ atomic64_set\n" 267 __asm__ __volatile__("@ atomic64_set\n"
287"1: ldrexd %0, %H0, [%2]\n" 268"1: ldrexd %0, %H0, [%2]\n"
288" strexd %0, %3, %H3, [%2]\n" 269" strexd %0, %3, %H3, [%2]\n"
@@ -294,15 +275,16 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
294} 275}
295#endif 276#endif
296 277
297static inline void atomic64_add(u64 i, atomic64_t *v) 278static inline void atomic64_add(long long i, atomic64_t *v)
298{ 279{
299 u64 result; 280 long long result;
300 unsigned long tmp; 281 unsigned long tmp;
301 282
283 prefetchw(&v->counter);
302 __asm__ __volatile__("@ atomic64_add\n" 284 __asm__ __volatile__("@ atomic64_add\n"
303"1: ldrexd %0, %H0, [%3]\n" 285"1: ldrexd %0, %H0, [%3]\n"
304" adds %0, %0, %4\n" 286" adds %Q0, %Q0, %Q4\n"
305" adc %H0, %H0, %H4\n" 287" adc %R0, %R0, %R4\n"
306" strexd %1, %0, %H0, [%3]\n" 288" strexd %1, %0, %H0, [%3]\n"
307" teq %1, #0\n" 289" teq %1, #0\n"
308" bne 1b" 290" bne 1b"
@@ -311,17 +293,17 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
311 : "cc"); 293 : "cc");
312} 294}
313 295
314static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 296static inline long long atomic64_add_return(long long i, atomic64_t *v)
315{ 297{
316 u64 result; 298 long long result;
317 unsigned long tmp; 299 unsigned long tmp;
318 300
319 smp_mb(); 301 smp_mb();
320 302
321 __asm__ __volatile__("@ atomic64_add_return\n" 303 __asm__ __volatile__("@ atomic64_add_return\n"
322"1: ldrexd %0, %H0, [%3]\n" 304"1: ldrexd %0, %H0, [%3]\n"
323" adds %0, %0, %4\n" 305" adds %Q0, %Q0, %Q4\n"
324" adc %H0, %H0, %H4\n" 306" adc %R0, %R0, %R4\n"
325" strexd %1, %0, %H0, [%3]\n" 307" strexd %1, %0, %H0, [%3]\n"
326" teq %1, #0\n" 308" teq %1, #0\n"
327" bne 1b" 309" bne 1b"
@@ -334,15 +316,16 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
334 return result; 316 return result;
335} 317}
336 318
337static inline void atomic64_sub(u64 i, atomic64_t *v) 319static inline void atomic64_sub(long long i, atomic64_t *v)
338{ 320{
339 u64 result; 321 long long result;
340 unsigned long tmp; 322 unsigned long tmp;
341 323
324 prefetchw(&v->counter);
342 __asm__ __volatile__("@ atomic64_sub\n" 325 __asm__ __volatile__("@ atomic64_sub\n"
343"1: ldrexd %0, %H0, [%3]\n" 326"1: ldrexd %0, %H0, [%3]\n"
344" subs %0, %0, %4\n" 327" subs %Q0, %Q0, %Q4\n"
345" sbc %H0, %H0, %H4\n" 328" sbc %R0, %R0, %R4\n"
346" strexd %1, %0, %H0, [%3]\n" 329" strexd %1, %0, %H0, [%3]\n"
347" teq %1, #0\n" 330" teq %1, #0\n"
348" bne 1b" 331" bne 1b"
@@ -351,17 +334,17 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
351 : "cc"); 334 : "cc");
352} 335}
353 336
354static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) 337static inline long long atomic64_sub_return(long long i, atomic64_t *v)
355{ 338{
356 u64 result; 339 long long result;
357 unsigned long tmp; 340 unsigned long tmp;
358 341
359 smp_mb(); 342 smp_mb();
360 343
361 __asm__ __volatile__("@ atomic64_sub_return\n" 344 __asm__ __volatile__("@ atomic64_sub_return\n"
362"1: ldrexd %0, %H0, [%3]\n" 345"1: ldrexd %0, %H0, [%3]\n"
363" subs %0, %0, %4\n" 346" subs %Q0, %Q0, %Q4\n"
364" sbc %H0, %H0, %H4\n" 347" sbc %R0, %R0, %R4\n"
365" strexd %1, %0, %H0, [%3]\n" 348" strexd %1, %0, %H0, [%3]\n"
366" teq %1, #0\n" 349" teq %1, #0\n"
367" bne 1b" 350" bne 1b"
@@ -374,9 +357,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
374 return result; 357 return result;
375} 358}
376 359
377static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) 360static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
361 long long new)
378{ 362{
379 u64 oldval; 363 long long oldval;
380 unsigned long res; 364 unsigned long res;
381 365
382 smp_mb(); 366 smp_mb();
@@ -398,9 +382,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
398 return oldval; 382 return oldval;
399} 383}
400 384
401static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) 385static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
402{ 386{
403 u64 result; 387 long long result;
404 unsigned long tmp; 388 unsigned long tmp;
405 389
406 smp_mb(); 390 smp_mb();
@@ -419,18 +403,18 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
419 return result; 403 return result;
420} 404}
421 405
422static inline u64 atomic64_dec_if_positive(atomic64_t *v) 406static inline long long atomic64_dec_if_positive(atomic64_t *v)
423{ 407{
424 u64 result; 408 long long result;
425 unsigned long tmp; 409 unsigned long tmp;
426 410
427 smp_mb(); 411 smp_mb();
428 412
429 __asm__ __volatile__("@ atomic64_dec_if_positive\n" 413 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
430"1: ldrexd %0, %H0, [%3]\n" 414"1: ldrexd %0, %H0, [%3]\n"
431" subs %0, %0, #1\n" 415" subs %Q0, %Q0, #1\n"
432" sbc %H0, %H0, #0\n" 416" sbc %R0, %R0, #0\n"
433" teq %H0, #0\n" 417" teq %R0, #0\n"
434" bmi 2f\n" 418" bmi 2f\n"
435" strexd %1, %0, %H0, [%3]\n" 419" strexd %1, %0, %H0, [%3]\n"
436" teq %1, #0\n" 420" teq %1, #0\n"
@@ -445,9 +429,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
445 return result; 429 return result;
446} 430}
447 431
448static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 432static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
449{ 433{
450 u64 val; 434 long long val;
451 unsigned long tmp; 435 unsigned long tmp;
452 int ret = 1; 436 int ret = 1;
453 437
@@ -459,8 +443,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
459" teqeq %H0, %H5\n" 443" teqeq %H0, %H5\n"
460" moveq %1, #0\n" 444" moveq %1, #0\n"
461" beq 2f\n" 445" beq 2f\n"
462" adds %0, %0, %6\n" 446" adds %Q0, %Q0, %Q6\n"
463" adc %H0, %H0, %H6\n" 447" adc %R0, %R0, %R6\n"
464" strexd %2, %0, %H0, [%4]\n" 448" strexd %2, %0, %H0, [%4]\n"
465" teq %2, #0\n" 449" teq %2, #0\n"
466" bne 1b\n" 450" bne 1b\n"