aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include/asm/atomic.h')
-rw-r--r--arch/arc/include/asm/atomic.h21
1 files changed, 21 insertions, 0 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 9917a45fc430..20b7dc17979e 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -43,6 +43,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
43{ \ 43{ \
44 unsigned int temp; \ 44 unsigned int temp; \
45 \ 45 \
46 /* \
47 * Explicit full memory barrier needed before/after as \
48 * LLOCK/SCOND thmeselves don't provide any such semantics \
49 */ \
50 smp_mb(); \
51 \
46 __asm__ __volatile__( \ 52 __asm__ __volatile__( \
47 "1: llock %0, [%1] \n" \ 53 "1: llock %0, [%1] \n" \
48 " " #asm_op " %0, %0, %2 \n" \ 54 " " #asm_op " %0, %0, %2 \n" \
@@ -52,6 +58,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
52 : "r"(&v->counter), "ir"(i) \ 58 : "r"(&v->counter), "ir"(i) \
53 : "cc"); \ 59 : "cc"); \
54 \ 60 \
61 smp_mb(); \
62 \
55 return temp; \ 63 return temp; \
56} 64}
57 65
@@ -105,6 +113,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
105 unsigned long flags; \ 113 unsigned long flags; \
106 unsigned long temp; \ 114 unsigned long temp; \
107 \ 115 \
116 /* \
117 * spin lock/unlock provides the needed smp_mb() before/after \
118 */ \
108 atomic_ops_lock(flags); \ 119 atomic_ops_lock(flags); \
109 temp = v->counter; \ 120 temp = v->counter; \
110 temp c_op i; \ 121 temp c_op i; \
@@ -142,9 +153,19 @@ ATOMIC_OP(and, &=, and)
142#define __atomic_add_unless(v, a, u) \ 153#define __atomic_add_unless(v, a, u) \
143({ \ 154({ \
144 int c, old; \ 155 int c, old; \
156 \
157 /* \
158 * Explicit full memory barrier needed before/after as \
159 * LLOCK/SCOND thmeselves don't provide any such semantics \
160 */ \
161 smp_mb(); \
162 \
145 c = atomic_read(v); \ 163 c = atomic_read(v); \
146 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\ 164 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
147 c = old; \ 165 c = old; \
166 \
167 smp_mb(); \
168 \
148 c; \ 169 c; \
149}) 170})
150 171