aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 09:48:00 -0400
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
-rw-r--r--arch/alpha/include/asm/atomic.h217
-rw-r--r--arch/arc/include/asm/atomic.h184
-rw-r--r--arch/arm/include/asm/atomic.h307
-rw-r--r--arch/arm64/include/asm/atomic.h201
-rw-r--r--arch/avr32/include/asm/atomic.h125
-rw-r--r--arch/cris/include/asm/atomic.h59
-rw-r--r--arch/frv/include/asm/atomic.h2
-rw-r--r--arch/hexagon/include/asm/atomic.h68
-rw-r--r--arch/ia64/include/asm/atomic.h192
-rw-r--r--arch/m32r/include/asm/atomic.h145
-rw-r--r--arch/m68k/include/asm/atomic.h111
-rw-r--r--arch/metag/include/asm/atomic_lnkget.h121
-rw-r--r--arch/metag/include/asm/atomic_lock1.h76
-rw-r--r--arch/mips/include/asm/atomic.h561
-rw-r--r--arch/mn10300/include/asm/atomic.h125
-rw-r--r--arch/parisc/include/asm/atomic.h117
-rw-r--r--arch/powerpc/include/asm/atomic.h198
-rw-r--r--arch/sh/include/asm/atomic-grb.h119
-rw-r--r--arch/sh/include/asm/atomic-irq.h62
-rw-r--r--arch/sh/include/asm/atomic-llsc.h101
-rw-r--r--arch/sh/include/asm/atomic.h2
-rw-r--r--arch/sparc/include/asm/atomic_32.h19
-rw-r--r--arch/sparc/include/asm/atomic_64.h49
-rw-r--r--arch/sparc/kernel/smp_64.c2
-rw-r--r--arch/sparc/lib/atomic32.c29
-rw-r--r--arch/sparc/lib/atomic_64.S163
-rw-r--r--arch/sparc/lib/ksyms.c25
-rw-r--r--arch/x86/include/asm/atomic.h17
-rw-r--r--arch/x86/include/asm/atomic64_64.h2
-rw-r--r--arch/xtensa/include/asm/atomic.h235
-rw-r--r--include/asm-generic/atomic.h194
-rw-r--r--include/asm-generic/atomic64.h20
-rw-r--r--lib/atomic64.c83
33 files changed, 1611 insertions, 2320 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index ed60a1ee1ed3..8f8eafbedd7c 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -17,8 +17,8 @@
17#define ATOMIC_INIT(i) { (i) } 17#define ATOMIC_INIT(i) { (i) }
18#define ATOMIC64_INIT(i) { (i) } 18#define ATOMIC64_INIT(i) { (i) }
19 19
20#define atomic_read(v) (*(volatile int *)&(v)->counter) 20#define atomic_read(v) ACCESS_ONCE((v)->counter)
21#define atomic64_read(v) (*(volatile long *)&(v)->counter) 21#define atomic64_read(v) ACCESS_ONCE((v)->counter)
22 22
23#define atomic_set(v,i) ((v)->counter = (i)) 23#define atomic_set(v,i) ((v)->counter = (i))
24#define atomic64_set(v,i) ((v)->counter = (i)) 24#define atomic64_set(v,i) ((v)->counter = (i))
@@ -29,145 +29,92 @@
29 * branch back to restart the operation. 29 * branch back to restart the operation.
30 */ 30 */
31 31
32static __inline__ void atomic_add(int i, atomic_t * v) 32#define ATOMIC_OP(op) \
33{ 33static __inline__ void atomic_##op(int i, atomic_t * v) \
34 unsigned long temp; 34{ \
35 __asm__ __volatile__( 35 unsigned long temp; \
36 "1: ldl_l %0,%1\n" 36 __asm__ __volatile__( \
37 " addl %0,%2,%0\n" 37 "1: ldl_l %0,%1\n" \
38 " stl_c %0,%1\n" 38 " " #op "l %0,%2,%0\n" \
39 " beq %0,2f\n" 39 " stl_c %0,%1\n" \
40 ".subsection 2\n" 40 " beq %0,2f\n" \
41 "2: br 1b\n" 41 ".subsection 2\n" \
42 ".previous" 42 "2: br 1b\n" \
43 :"=&r" (temp), "=m" (v->counter) 43 ".previous" \
44 :"Ir" (i), "m" (v->counter)); 44 :"=&r" (temp), "=m" (v->counter) \
45} 45 :"Ir" (i), "m" (v->counter)); \
46 46} \
47static __inline__ void atomic64_add(long i, atomic64_t * v) 47
48{ 48#define ATOMIC_OP_RETURN(op) \
49 unsigned long temp; 49static inline int atomic_##op##_return(int i, atomic_t *v) \
50 __asm__ __volatile__( 50{ \
51 "1: ldq_l %0,%1\n" 51 long temp, result; \
52 " addq %0,%2,%0\n" 52 smp_mb(); \
53 " stq_c %0,%1\n" 53 __asm__ __volatile__( \
54 " beq %0,2f\n" 54 "1: ldl_l %0,%1\n" \
55 ".subsection 2\n" 55 " " #op "l %0,%3,%2\n" \
56 "2: br 1b\n" 56 " " #op "l %0,%3,%0\n" \
57 ".previous" 57 " stl_c %0,%1\n" \
58 :"=&r" (temp), "=m" (v->counter) 58 " beq %0,2f\n" \
59 :"Ir" (i), "m" (v->counter)); 59 ".subsection 2\n" \
60} 60 "2: br 1b\n" \
61 61 ".previous" \
62static __inline__ void atomic_sub(int i, atomic_t * v) 62 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
63{ 63 :"Ir" (i), "m" (v->counter) : "memory"); \
64 unsigned long temp; 64 smp_mb(); \
65 __asm__ __volatile__( 65 return result; \
66 "1: ldl_l %0,%1\n"
67 " subl %0,%2,%0\n"
68 " stl_c %0,%1\n"
69 " beq %0,2f\n"
70 ".subsection 2\n"
71 "2: br 1b\n"
72 ".previous"
73 :"=&r" (temp), "=m" (v->counter)
74 :"Ir" (i), "m" (v->counter));
75} 66}
76 67
77static __inline__ void atomic64_sub(long i, atomic64_t * v) 68#define ATOMIC64_OP(op) \
78{ 69static __inline__ void atomic64_##op(long i, atomic64_t * v) \
79 unsigned long temp; 70{ \
80 __asm__ __volatile__( 71 unsigned long temp; \
81 "1: ldq_l %0,%1\n" 72 __asm__ __volatile__( \
82 " subq %0,%2,%0\n" 73 "1: ldq_l %0,%1\n" \
83 " stq_c %0,%1\n" 74 " " #op "q %0,%2,%0\n" \
84 " beq %0,2f\n" 75 " stq_c %0,%1\n" \
85 ".subsection 2\n" 76 " beq %0,2f\n" \
86 "2: br 1b\n" 77 ".subsection 2\n" \
87 ".previous" 78 "2: br 1b\n" \
88 :"=&r" (temp), "=m" (v->counter) 79 ".previous" \
89 :"Ir" (i), "m" (v->counter)); 80 :"=&r" (temp), "=m" (v->counter) \
90} 81 :"Ir" (i), "m" (v->counter)); \
91 82} \
92 83
93/* 84#define ATOMIC64_OP_RETURN(op) \
94 * Same as above, but return the result value 85static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
95 */ 86{ \
96static inline int atomic_add_return(int i, atomic_t *v) 87 long temp, result; \
97{ 88 smp_mb(); \
98 long temp, result; 89 __asm__ __volatile__( \
99 smp_mb(); 90 "1: ldq_l %0,%1\n" \
100 __asm__ __volatile__( 91 " " #op "q %0,%3,%2\n" \
101 "1: ldl_l %0,%1\n" 92 " " #op "q %0,%3,%0\n" \
102 " addl %0,%3,%2\n" 93 " stq_c %0,%1\n" \
103 " addl %0,%3,%0\n" 94 " beq %0,2f\n" \
104 " stl_c %0,%1\n" 95 ".subsection 2\n" \
105 " beq %0,2f\n" 96 "2: br 1b\n" \
106 ".subsection 2\n" 97 ".previous" \
107 "2: br 1b\n" 98 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
108 ".previous" 99 :"Ir" (i), "m" (v->counter) : "memory"); \
109 :"=&r" (temp), "=m" (v->counter), "=&r" (result) 100 smp_mb(); \
110 :"Ir" (i), "m" (v->counter) : "memory"); 101 return result; \
111 smp_mb();
112 return result;
113} 102}
114 103
115static __inline__ long atomic64_add_return(long i, atomic64_t * v) 104#define ATOMIC_OPS(opg) \
116{ 105 ATOMIC_OP(opg) \
117 long temp, result; 106 ATOMIC_OP_RETURN(opg) \
118 smp_mb(); 107 ATOMIC64_OP(opg) \
119 __asm__ __volatile__( 108 ATOMIC64_OP_RETURN(opg)
120 "1: ldq_l %0,%1\n"
121 " addq %0,%3,%2\n"
122 " addq %0,%3,%0\n"
123 " stq_c %0,%1\n"
124 " beq %0,2f\n"
125 ".subsection 2\n"
126 "2: br 1b\n"
127 ".previous"
128 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
129 :"Ir" (i), "m" (v->counter) : "memory");
130 smp_mb();
131 return result;
132}
133 109
134static __inline__ long atomic_sub_return(int i, atomic_t * v) 110ATOMIC_OPS(add)
135{ 111ATOMIC_OPS(sub)
136 long temp, result;
137 smp_mb();
138 __asm__ __volatile__(
139 "1: ldl_l %0,%1\n"
140 " subl %0,%3,%2\n"
141 " subl %0,%3,%0\n"
142 " stl_c %0,%1\n"
143 " beq %0,2f\n"
144 ".subsection 2\n"
145 "2: br 1b\n"
146 ".previous"
147 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
148 :"Ir" (i), "m" (v->counter) : "memory");
149 smp_mb();
150 return result;
151}
152 112
153static __inline__ long atomic64_sub_return(long i, atomic64_t * v) 113#undef ATOMIC_OPS
154{ 114#undef ATOMIC64_OP_RETURN
155 long temp, result; 115#undef ATOMIC64_OP
156 smp_mb(); 116#undef ATOMIC_OP_RETURN
157 __asm__ __volatile__( 117#undef ATOMIC_OP
158 "1: ldq_l %0,%1\n"
159 " subq %0,%3,%2\n"
160 " subq %0,%3,%0\n"
161 " stq_c %0,%1\n"
162 " beq %0,2f\n"
163 ".subsection 2\n"
164 "2: br 1b\n"
165 ".previous"
166 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
167 :"Ir" (i), "m" (v->counter) : "memory");
168 smp_mb();
169 return result;
170}
171 118
172#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 119#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
173#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 120#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 83f03ca6caf6..173f303a868f 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -25,79 +25,36 @@
25 25
26#define atomic_set(v, i) (((v)->counter) = (i)) 26#define atomic_set(v, i) (((v)->counter) = (i))
27 27
28static inline void atomic_add(int i, atomic_t *v) 28#define ATOMIC_OP(op, c_op, asm_op) \
29{ 29static inline void atomic_##op(int i, atomic_t *v) \
30 unsigned int temp; 30{ \
31 31 unsigned int temp; \
32 __asm__ __volatile__( 32 \
33 "1: llock %0, [%1] \n" 33 __asm__ __volatile__( \
34 " add %0, %0, %2 \n" 34 "1: llock %0, [%1] \n" \
35 " scond %0, [%1] \n" 35 " " #asm_op " %0, %0, %2 \n" \
36 " bnz 1b \n" 36 " scond %0, [%1] \n" \
37 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ 37 " bnz 1b \n" \
38 : "r"(&v->counter), "ir"(i) 38 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
39 : "cc"); 39 : "r"(&v->counter), "ir"(i) \
40} 40 : "cc"); \
41 41} \
42static inline void atomic_sub(int i, atomic_t *v) 42
43{ 43#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
44 unsigned int temp; 44static inline int atomic_##op##_return(int i, atomic_t *v) \
45 45{ \
46 __asm__ __volatile__( 46 unsigned int temp; \
47 "1: llock %0, [%1] \n" 47 \
48 " sub %0, %0, %2 \n" 48 __asm__ __volatile__( \
49 " scond %0, [%1] \n" 49 "1: llock %0, [%1] \n" \
50 " bnz 1b \n" 50 " " #asm_op " %0, %0, %2 \n" \
51 : "=&r"(temp) 51 " scond %0, [%1] \n" \
52 : "r"(&v->counter), "ir"(i) 52 " bnz 1b \n" \
53 : "cc"); 53 : "=&r"(temp) \
54} 54 : "r"(&v->counter), "ir"(i) \
55 55 : "cc"); \
56/* add and also return the new value */ 56 \
57static inline int atomic_add_return(int i, atomic_t *v) 57 return temp; \
58{
59 unsigned int temp;
60
61 __asm__ __volatile__(
62 "1: llock %0, [%1] \n"
63 " add %0, %0, %2 \n"
64 " scond %0, [%1] \n"
65 " bnz 1b \n"
66 : "=&r"(temp)
67 : "r"(&v->counter), "ir"(i)
68 : "cc");
69
70 return temp;
71}
72
73static inline int atomic_sub_return(int i, atomic_t *v)
74{
75 unsigned int temp;
76
77 __asm__ __volatile__(
78 "1: llock %0, [%1] \n"
79 " sub %0, %0, %2 \n"
80 " scond %0, [%1] \n"
81 " bnz 1b \n"
82 : "=&r"(temp)
83 : "r"(&v->counter), "ir"(i)
84 : "cc");
85
86 return temp;
87}
88
89static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
90{
91 unsigned int temp;
92
93 __asm__ __volatile__(
94 "1: llock %0, [%1] \n"
95 " bic %0, %0, %2 \n"
96 " scond %0, [%1] \n"
97 " bnz 1b \n"
98 : "=&r"(temp)
99 : "r"(addr), "ir"(mask)
100 : "cc");
101} 58}
102 59
103#else /* !CONFIG_ARC_HAS_LLSC */ 60#else /* !CONFIG_ARC_HAS_LLSC */
@@ -126,6 +83,7 @@ static inline void atomic_set(atomic_t *v, int i)
126 v->counter = i; 83 v->counter = i;
127 atomic_ops_unlock(flags); 84 atomic_ops_unlock(flags);
128} 85}
86
129#endif 87#endif
130 88
131/* 89/*
@@ -133,62 +91,46 @@ static inline void atomic_set(atomic_t *v, int i)
133 * Locking would change to irq-disabling only (UP) and spinlocks (SMP) 91 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
134 */ 92 */
135 93
136static inline void atomic_add(int i, atomic_t *v) 94#define ATOMIC_OP(op, c_op, asm_op) \
137{ 95static inline void atomic_##op(int i, atomic_t *v) \
138 unsigned long flags; 96{ \
139 97 unsigned long flags; \
140 atomic_ops_lock(flags); 98 \
141 v->counter += i; 99 atomic_ops_lock(flags); \
142 atomic_ops_unlock(flags); 100 v->counter c_op i; \
101 atomic_ops_unlock(flags); \
143} 102}
144 103
145static inline void atomic_sub(int i, atomic_t *v) 104#define ATOMIC_OP_RETURN(op, c_op) \
146{ 105static inline int atomic_##op##_return(int i, atomic_t *v) \
147 unsigned long flags; 106{ \
148 107 unsigned long flags; \
149 atomic_ops_lock(flags); 108 unsigned long temp; \
150 v->counter -= i; 109 \
151 atomic_ops_unlock(flags); 110 atomic_ops_lock(flags); \
111 temp = v->counter; \
112 temp c_op i; \
113 v->counter = temp; \
114 atomic_ops_unlock(flags); \
115 \
116 return temp; \
152} 117}
153 118
154static inline int atomic_add_return(int i, atomic_t *v) 119#endif /* !CONFIG_ARC_HAS_LLSC */
155{
156 unsigned long flags;
157 unsigned long temp;
158
159 atomic_ops_lock(flags);
160 temp = v->counter;
161 temp += i;
162 v->counter = temp;
163 atomic_ops_unlock(flags);
164
165 return temp;
166}
167
168static inline int atomic_sub_return(int i, atomic_t *v)
169{
170 unsigned long flags;
171 unsigned long temp;
172
173 atomic_ops_lock(flags);
174 temp = v->counter;
175 temp -= i;
176 v->counter = temp;
177 atomic_ops_unlock(flags);
178 120
179 return temp; 121#define ATOMIC_OPS(op, c_op, asm_op) \
180} 122 ATOMIC_OP(op, c_op, asm_op) \
123 ATOMIC_OP_RETURN(op, c_op, asm_op)
181 124
182static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 125ATOMIC_OPS(add, +=, add)
183{ 126ATOMIC_OPS(sub, -=, sub)
184 unsigned long flags; 127ATOMIC_OP(and, &=, and)
185 128
186 atomic_ops_lock(flags); 129#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
187 *addr &= ~mask;
188 atomic_ops_unlock(flags);
189}
190 130
191#endif /* !CONFIG_ARC_HAS_LLSC */ 131#undef ATOMIC_OPS
132#undef ATOMIC_OP_RETURN
133#undef ATOMIC_OP
192 134
193/** 135/**
194 * __atomic_add_unless - add unless the number is a given value 136 * __atomic_add_unless - add unless the number is a given value
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 3040359094d9..e22c11970b7b 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -27,7 +27,7 @@
27 * strex/ldrex monitor on some implementations. The reason we can use it for 27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return. 28 * atomic_set() is the clrex or dummy strex done on every exception return.
29 */ 29 */
30#define atomic_read(v) (*(volatile int *)&(v)->counter) 30#define atomic_read(v) ACCESS_ONCE((v)->counter)
31#define atomic_set(v,i) (((v)->counter) = (i)) 31#define atomic_set(v,i) (((v)->counter) = (i))
32 32
33#if __LINUX_ARM_ARCH__ >= 6 33#if __LINUX_ARM_ARCH__ >= 6
@@ -37,84 +37,47 @@
37 * store exclusive to ensure that these are atomic. We may loop 37 * store exclusive to ensure that these are atomic. We may loop
38 * to ensure that the update happens. 38 * to ensure that the update happens.
39 */ 39 */
40static inline void atomic_add(int i, atomic_t *v)
41{
42 unsigned long tmp;
43 int result;
44
45 prefetchw(&v->counter);
46 __asm__ __volatile__("@ atomic_add\n"
47"1: ldrex %0, [%3]\n"
48" add %0, %0, %4\n"
49" strex %1, %0, [%3]\n"
50" teq %1, #0\n"
51" bne 1b"
52 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
53 : "r" (&v->counter), "Ir" (i)
54 : "cc");
55}
56 40
57static inline int atomic_add_return(int i, atomic_t *v) 41#define ATOMIC_OP(op, c_op, asm_op) \
58{ 42static inline void atomic_##op(int i, atomic_t *v) \
59 unsigned long tmp; 43{ \
60 int result; 44 unsigned long tmp; \
61 45 int result; \
62 smp_mb(); 46 \
63 prefetchw(&v->counter); 47 prefetchw(&v->counter); \
64 48 __asm__ __volatile__("@ atomic_" #op "\n" \
65 __asm__ __volatile__("@ atomic_add_return\n" 49"1: ldrex %0, [%3]\n" \
66"1: ldrex %0, [%3]\n" 50" " #asm_op " %0, %0, %4\n" \
67" add %0, %0, %4\n" 51" strex %1, %0, [%3]\n" \
68" strex %1, %0, [%3]\n" 52" teq %1, #0\n" \
69" teq %1, #0\n" 53" bne 1b" \
70" bne 1b" 54 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
71 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 55 : "r" (&v->counter), "Ir" (i) \
72 : "r" (&v->counter), "Ir" (i) 56 : "cc"); \
73 : "cc"); 57} \
74 58
75 smp_mb(); 59#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
76 60static inline int atomic_##op##_return(int i, atomic_t *v) \
77 return result; 61{ \
78} 62 unsigned long tmp; \
79 63 int result; \
80static inline void atomic_sub(int i, atomic_t *v) 64 \
81{ 65 smp_mb(); \
82 unsigned long tmp; 66 prefetchw(&v->counter); \
83 int result; 67 \
84 68 __asm__ __volatile__("@ atomic_" #op "_return\n" \
85 prefetchw(&v->counter); 69"1: ldrex %0, [%3]\n" \
86 __asm__ __volatile__("@ atomic_sub\n" 70" " #asm_op " %0, %0, %4\n" \
87"1: ldrex %0, [%3]\n" 71" strex %1, %0, [%3]\n" \
88" sub %0, %0, %4\n" 72" teq %1, #0\n" \
89" strex %1, %0, [%3]\n" 73" bne 1b" \
90" teq %1, #0\n" 74 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
91" bne 1b" 75 : "r" (&v->counter), "Ir" (i) \
92 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 76 : "cc"); \
93 : "r" (&v->counter), "Ir" (i) 77 \
94 : "cc"); 78 smp_mb(); \
95} 79 \
96 80 return result; \
97static inline int atomic_sub_return(int i, atomic_t *v)
98{
99 unsigned long tmp;
100 int result;
101
102 smp_mb();
103 prefetchw(&v->counter);
104
105 __asm__ __volatile__("@ atomic_sub_return\n"
106"1: ldrex %0, [%3]\n"
107" sub %0, %0, %4\n"
108" strex %1, %0, [%3]\n"
109" teq %1, #0\n"
110" bne 1b"
111 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
112 : "r" (&v->counter), "Ir" (i)
113 : "cc");
114
115 smp_mb();
116
117 return result;
118} 81}
119 82
120static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) 83static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
@@ -174,33 +137,29 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
174#error SMP not supported on pre-ARMv6 CPUs 137#error SMP not supported on pre-ARMv6 CPUs
175#endif 138#endif
176 139
177static inline int atomic_add_return(int i, atomic_t *v) 140#define ATOMIC_OP(op, c_op, asm_op) \
178{ 141static inline void atomic_##op(int i, atomic_t *v) \
179 unsigned long flags; 142{ \
180 int val; 143 unsigned long flags; \
181 144 \
182 raw_local_irq_save(flags); 145 raw_local_irq_save(flags); \
183 val = v->counter; 146 v->counter c_op i; \
184 v->counter = val += i; 147 raw_local_irq_restore(flags); \
185 raw_local_irq_restore(flags); 148} \
186 149
187 return val; 150#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
188} 151static inline int atomic_##op##_return(int i, atomic_t *v) \
189#define atomic_add(i, v) (void) atomic_add_return(i, v) 152{ \
190 153 unsigned long flags; \
191static inline int atomic_sub_return(int i, atomic_t *v) 154 int val; \
192{ 155 \
193 unsigned long flags; 156 raw_local_irq_save(flags); \
194 int val; 157 v->counter c_op i; \
195 158 val = v->counter; \
196 raw_local_irq_save(flags); 159 raw_local_irq_restore(flags); \
197 val = v->counter; 160 \
198 v->counter = val -= i; 161 return val; \
199 raw_local_irq_restore(flags);
200
201 return val;
202} 162}
203#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
204 163
205static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 164static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
206{ 165{
@@ -228,6 +187,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
228 187
229#endif /* __LINUX_ARM_ARCH__ */ 188#endif /* __LINUX_ARM_ARCH__ */
230 189
190#define ATOMIC_OPS(op, c_op, asm_op) \
191 ATOMIC_OP(op, c_op, asm_op) \
192 ATOMIC_OP_RETURN(op, c_op, asm_op)
193
194ATOMIC_OPS(add, +=, add)
195ATOMIC_OPS(sub, -=, sub)
196
197#undef ATOMIC_OPS
198#undef ATOMIC_OP_RETURN
199#undef ATOMIC_OP
200
231#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 201#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
232 202
233#define atomic_inc(v) atomic_add(1, v) 203#define atomic_inc(v) atomic_add(1, v)
@@ -300,89 +270,60 @@ static inline void atomic64_set(atomic64_t *v, long long i)
300} 270}
301#endif 271#endif
302 272
303static inline void atomic64_add(long long i, atomic64_t *v) 273#define ATOMIC64_OP(op, op1, op2) \
304{ 274static inline void atomic64_##op(long long i, atomic64_t *v) \
305 long long result; 275{ \
306 unsigned long tmp; 276 long long result; \
307 277 unsigned long tmp; \
308 prefetchw(&v->counter); 278 \
309 __asm__ __volatile__("@ atomic64_add\n" 279 prefetchw(&v->counter); \
310"1: ldrexd %0, %H0, [%3]\n" 280 __asm__ __volatile__("@ atomic64_" #op "\n" \
311" adds %Q0, %Q0, %Q4\n" 281"1: ldrexd %0, %H0, [%3]\n" \
312" adc %R0, %R0, %R4\n" 282" " #op1 " %Q0, %Q0, %Q4\n" \
313" strexd %1, %0, %H0, [%3]\n" 283" " #op2 " %R0, %R0, %R4\n" \
314" teq %1, #0\n" 284" strexd %1, %0, %H0, [%3]\n" \
315" bne 1b" 285" teq %1, #0\n" \
316 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 286" bne 1b" \
317 : "r" (&v->counter), "r" (i) 287 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
318 : "cc"); 288 : "r" (&v->counter), "r" (i) \
319} 289 : "cc"); \
320 290} \
321static inline long long atomic64_add_return(long long i, atomic64_t *v) 291
322{ 292#define ATOMIC64_OP_RETURN(op, op1, op2) \
323 long long result; 293static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
324 unsigned long tmp; 294{ \
325 295 long long result; \
326 smp_mb(); 296 unsigned long tmp; \
327 prefetchw(&v->counter); 297 \
328 298 smp_mb(); \
329 __asm__ __volatile__("@ atomic64_add_return\n" 299 prefetchw(&v->counter); \
330"1: ldrexd %0, %H0, [%3]\n" 300 \
331" adds %Q0, %Q0, %Q4\n" 301 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
332" adc %R0, %R0, %R4\n" 302"1: ldrexd %0, %H0, [%3]\n" \
333" strexd %1, %0, %H0, [%3]\n" 303" " #op1 " %Q0, %Q0, %Q4\n" \
334" teq %1, #0\n" 304" " #op2 " %R0, %R0, %R4\n" \
335" bne 1b" 305" strexd %1, %0, %H0, [%3]\n" \
336 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) 306" teq %1, #0\n" \
337 : "r" (&v->counter), "r" (i) 307" bne 1b" \
338 : "cc"); 308 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
339 309 : "r" (&v->counter), "r" (i) \
340 smp_mb(); 310 : "cc"); \
341 311 \
342 return result; 312 smp_mb(); \
343} 313 \
344 314 return result; \
345static inline void atomic64_sub(long long i, atomic64_t *v)
346{
347 long long result;
348 unsigned long tmp;
349
350 prefetchw(&v->counter);
351 __asm__ __volatile__("@ atomic64_sub\n"
352"1: ldrexd %0, %H0, [%3]\n"
353" subs %Q0, %Q0, %Q4\n"
354" sbc %R0, %R0, %R4\n"
355" strexd %1, %0, %H0, [%3]\n"
356" teq %1, #0\n"
357" bne 1b"
358 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
359 : "r" (&v->counter), "r" (i)
360 : "cc");
361} 315}
362 316
363static inline long long atomic64_sub_return(long long i, atomic64_t *v) 317#define ATOMIC64_OPS(op, op1, op2) \
364{ 318 ATOMIC64_OP(op, op1, op2) \
365 long long result; 319 ATOMIC64_OP_RETURN(op, op1, op2)
366 unsigned long tmp;
367
368 smp_mb();
369 prefetchw(&v->counter);
370
371 __asm__ __volatile__("@ atomic64_sub_return\n"
372"1: ldrexd %0, %H0, [%3]\n"
373" subs %Q0, %Q0, %Q4\n"
374" sbc %R0, %R0, %R4\n"
375" strexd %1, %0, %H0, [%3]\n"
376" teq %1, #0\n"
377" bne 1b"
378 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
379 : "r" (&v->counter), "r" (i)
380 : "cc");
381 320
382 smp_mb(); 321ATOMIC64_OPS(add, adds, adc)
322ATOMIC64_OPS(sub, subs, sbc)
383 323
384 return result; 324#undef ATOMIC64_OPS
385} 325#undef ATOMIC64_OP_RETURN
326#undef ATOMIC64_OP
386 327
387static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, 328static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
388 long long new) 329 long long new)
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 65f1569ac96e..7047051ded40 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -35,7 +35,7 @@
35 * strex/ldrex monitor on some implementations. The reason we can use it for 35 * strex/ldrex monitor on some implementations. The reason we can use it for
36 * atomic_set() is the clrex or dummy strex done on every exception return. 36 * atomic_set() is the clrex or dummy strex done on every exception return.
37 */ 37 */
38#define atomic_read(v) (*(volatile int *)&(v)->counter) 38#define atomic_read(v) ACCESS_ONCE((v)->counter)
39#define atomic_set(v,i) (((v)->counter) = (i)) 39#define atomic_set(v,i) (((v)->counter) = (i))
40 40
41/* 41/*
@@ -43,69 +43,51 @@
43 * store exclusive to ensure that these are atomic. We may loop 43 * store exclusive to ensure that these are atomic. We may loop
44 * to ensure that the update happens. 44 * to ensure that the update happens.
45 */ 45 */
46static inline void atomic_add(int i, atomic_t *v)
47{
48 unsigned long tmp;
49 int result;
50
51 asm volatile("// atomic_add\n"
52"1: ldxr %w0, %2\n"
53" add %w0, %w0, %w3\n"
54" stxr %w1, %w0, %2\n"
55" cbnz %w1, 1b"
56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
57 : "Ir" (i));
58}
59
60static inline int atomic_add_return(int i, atomic_t *v)
61{
62 unsigned long tmp;
63 int result;
64
65 asm volatile("// atomic_add_return\n"
66"1: ldxr %w0, %2\n"
67" add %w0, %w0, %w3\n"
68" stlxr %w1, %w0, %2\n"
69" cbnz %w1, 1b"
70 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
71 : "Ir" (i)
72 : "memory");
73
74 smp_mb();
75 return result;
76}
77
78static inline void atomic_sub(int i, atomic_t *v)
79{
80 unsigned long tmp;
81 int result;
82 46
83 asm volatile("// atomic_sub\n" 47#define ATOMIC_OP(op, asm_op) \
84"1: ldxr %w0, %2\n" 48static inline void atomic_##op(int i, atomic_t *v) \
85" sub %w0, %w0, %w3\n" 49{ \
86" stxr %w1, %w0, %2\n" 50 unsigned long tmp; \
87" cbnz %w1, 1b" 51 int result; \
88 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 52 \
89 : "Ir" (i)); 53 asm volatile("// atomic_" #op "\n" \
54"1: ldxr %w0, %2\n" \
55" " #asm_op " %w0, %w0, %w3\n" \
56" stxr %w1, %w0, %2\n" \
57" cbnz %w1, 1b" \
58 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
59 : "Ir" (i)); \
60} \
61
62#define ATOMIC_OP_RETURN(op, asm_op) \
63static inline int atomic_##op##_return(int i, atomic_t *v) \
64{ \
65 unsigned long tmp; \
66 int result; \
67 \
68 asm volatile("// atomic_" #op "_return\n" \
69"1: ldxr %w0, %2\n" \
70" " #asm_op " %w0, %w0, %w3\n" \
71" stlxr %w1, %w0, %2\n" \
72" cbnz %w1, 1b" \
73 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
74 : "Ir" (i) \
75 : "memory"); \
76 \
77 smp_mb(); \
78 return result; \
90} 79}
91 80
92static inline int atomic_sub_return(int i, atomic_t *v) 81#define ATOMIC_OPS(op, asm_op) \
93{ 82 ATOMIC_OP(op, asm_op) \
94 unsigned long tmp; 83 ATOMIC_OP_RETURN(op, asm_op)
95 int result;
96 84
97 asm volatile("// atomic_sub_return\n" 85ATOMIC_OPS(add, add)
98"1: ldxr %w0, %2\n" 86ATOMIC_OPS(sub, sub)
99" sub %w0, %w0, %w3\n"
100" stlxr %w1, %w0, %2\n"
101" cbnz %w1, 1b"
102 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
103 : "Ir" (i)
104 : "memory");
105 87
106 smp_mb(); 88#undef ATOMIC_OPS
107 return result; 89#undef ATOMIC_OP_RETURN
108} 90#undef ATOMIC_OP
109 91
110static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) 92static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
111{ 93{
@@ -157,72 +139,53 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
157 */ 139 */
158#define ATOMIC64_INIT(i) { (i) } 140#define ATOMIC64_INIT(i) { (i) }
159 141
160#define atomic64_read(v) (*(volatile long *)&(v)->counter) 142#define atomic64_read(v) ACCESS_ONCE((v)->counter)
161#define atomic64_set(v,i) (((v)->counter) = (i)) 143#define atomic64_set(v,i) (((v)->counter) = (i))
162 144
163static inline void atomic64_add(u64 i, atomic64_t *v) 145#define ATOMIC64_OP(op, asm_op) \
164{ 146static inline void atomic64_##op(long i, atomic64_t *v) \
165 long result; 147{ \
166 unsigned long tmp; 148 long result; \
167 149 unsigned long tmp; \
168 asm volatile("// atomic64_add\n" 150 \
169"1: ldxr %0, %2\n" 151 asm volatile("// atomic64_" #op "\n" \
170" add %0, %0, %3\n" 152"1: ldxr %0, %2\n" \
171" stxr %w1, %0, %2\n" 153" " #asm_op " %0, %0, %3\n" \
172" cbnz %w1, 1b" 154" stxr %w1, %0, %2\n" \
173 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) 155" cbnz %w1, 1b" \
174 : "Ir" (i)); 156 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
157 : "Ir" (i)); \
158} \
159
160#define ATOMIC64_OP_RETURN(op, asm_op) \
161static inline long atomic64_##op##_return(long i, atomic64_t *v) \
162{ \
163 long result; \
164 unsigned long tmp; \
165 \
166 asm volatile("// atomic64_" #op "_return\n" \
167"1: ldxr %0, %2\n" \
168" " #asm_op " %0, %0, %3\n" \
169" stlxr %w1, %0, %2\n" \
170" cbnz %w1, 1b" \
171 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
172 : "Ir" (i) \
173 : "memory"); \
174 \
175 smp_mb(); \
176 return result; \
175} 177}
176 178
177static inline long atomic64_add_return(long i, atomic64_t *v) 179#define ATOMIC64_OPS(op, asm_op) \
178{ 180 ATOMIC64_OP(op, asm_op) \
179 long result; 181 ATOMIC64_OP_RETURN(op, asm_op)
180 unsigned long tmp;
181 182
182 asm volatile("// atomic64_add_return\n" 183ATOMIC64_OPS(add, add)
183"1: ldxr %0, %2\n" 184ATOMIC64_OPS(sub, sub)
184" add %0, %0, %3\n"
185" stlxr %w1, %0, %2\n"
186" cbnz %w1, 1b"
187 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
188 : "Ir" (i)
189 : "memory");
190 185
191 smp_mb(); 186#undef ATOMIC64_OPS
192 return result; 187#undef ATOMIC64_OP_RETURN
193} 188#undef ATOMIC64_OP
194
195static inline void atomic64_sub(u64 i, atomic64_t *v)
196{
197 long result;
198 unsigned long tmp;
199
200 asm volatile("// atomic64_sub\n"
201"1: ldxr %0, %2\n"
202" sub %0, %0, %3\n"
203" stxr %w1, %0, %2\n"
204" cbnz %w1, 1b"
205 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
206 : "Ir" (i));
207}
208
209static inline long atomic64_sub_return(long i, atomic64_t *v)
210{
211 long result;
212 unsigned long tmp;
213
214 asm volatile("// atomic64_sub_return\n"
215"1: ldxr %0, %2\n"
216" sub %0, %0, %3\n"
217" stlxr %w1, %0, %2\n"
218" cbnz %w1, 1b"
219 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
220 : "Ir" (i)
221 : "memory");
222
223 smp_mb();
224 return result;
225}
226 189
227static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) 190static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
228{ 191{
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 0780f3f2415b..2d07ce1c5327 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -19,33 +19,46 @@
19 19
20#define ATOMIC_INIT(i) { (i) } 20#define ATOMIC_INIT(i) { (i) }
21 21
22#define atomic_read(v) (*(volatile int *)&(v)->counter) 22#define atomic_read(v) ACCESS_ONCE((v)->counter)
23#define atomic_set(v, i) (((v)->counter) = i) 23#define atomic_set(v, i) (((v)->counter) = i)
24 24
25#define ATOMIC_OP_RETURN(op, asm_op, asm_con) \
26static inline int __atomic_##op##_return(int i, atomic_t *v) \
27{ \
28 int result; \
29 \
30 asm volatile( \
31 "/* atomic_" #op "_return */\n" \
32 "1: ssrf 5\n" \
33 " ld.w %0, %2\n" \
34 " " #asm_op " %0, %3\n" \
35 " stcond %1, %0\n" \
36 " brne 1b" \
37 : "=&r" (result), "=o" (v->counter) \
38 : "m" (v->counter), #asm_con (i) \
39 : "cc"); \
40 \
41 return result; \
42}
43
44ATOMIC_OP_RETURN(sub, sub, rKs21)
45ATOMIC_OP_RETURN(add, add, r)
46
47#undef ATOMIC_OP_RETURN
48
25/* 49/*
26 * atomic_sub_return - subtract the atomic variable 50 * Probably found the reason why we want to use sub with the signed 21-bit
27 * @i: integer value to subtract 51 * limit, it uses one less register than the add instruction that can add up to
28 * @v: pointer of type atomic_t 52 * 32-bit values.
29 * 53 *
30 * Atomically subtracts @i from @v. Returns the resulting value. 54 * Both instructions are 32-bit, to use a 16-bit instruction the immediate is
55 * very small; 4 bit.
56 *
57 * sub 32-bit, type IV, takes a register and subtracts a 21-bit immediate.
58 * add 32-bit, type II, adds two register values together.
31 */ 59 */
32static inline int atomic_sub_return(int i, atomic_t *v) 60#define IS_21BIT_CONST(i) \
33{ 61 (__builtin_constant_p(i) && ((i) >= -1048575) && ((i) <= 1048576))
34 int result;
35
36 asm volatile(
37 "/* atomic_sub_return */\n"
38 "1: ssrf 5\n"
39 " ld.w %0, %2\n"
40 " sub %0, %3\n"
41 " stcond %1, %0\n"
42 " brne 1b"
43 : "=&r"(result), "=o"(v->counter)
44 : "m"(v->counter), "rKs21"(i)
45 : "cc");
46
47 return result;
48}
49 62
50/* 63/*
51 * atomic_add_return - add integer to atomic variable 64 * atomic_add_return - add integer to atomic variable
@@ -56,51 +69,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
56 */ 69 */
57static inline int atomic_add_return(int i, atomic_t *v) 70static inline int atomic_add_return(int i, atomic_t *v)
58{ 71{
59 int result; 72 if (IS_21BIT_CONST(i))
60 73 return __atomic_sub_return(-i, v);
61 if (__builtin_constant_p(i) && (i >= -1048575) && (i <= 1048576))
62 result = atomic_sub_return(-i, v);
63 else
64 asm volatile(
65 "/* atomic_add_return */\n"
66 "1: ssrf 5\n"
67 " ld.w %0, %1\n"
68 " add %0, %3\n"
69 " stcond %2, %0\n"
70 " brne 1b"
71 : "=&r"(result), "=o"(v->counter)
72 : "m"(v->counter), "r"(i)
73 : "cc", "memory");
74 74
75 return result; 75 return __atomic_add_return(i, v);
76} 76}
77 77
78/* 78/*
79 * atomic_sub_unless - sub unless the number is a given value 79 * atomic_sub_return - subtract the atomic variable
80 * @i: integer value to subtract
80 * @v: pointer of type atomic_t 81 * @v: pointer of type atomic_t
81 * @a: the amount to subtract from v...
82 * @u: ...unless v is equal to u.
83 * 82 *
84 * Atomically subtract @a from @v, so long as it was not @u. 83 * Atomically subtracts @i from @v. Returns the resulting value.
85 * Returns the old value of @v. 84 */
86*/ 85static inline int atomic_sub_return(int i, atomic_t *v)
87static inline void atomic_sub_unless(atomic_t *v, int a, int u)
88{ 86{
89 int tmp; 87 if (IS_21BIT_CONST(i))
88 return __atomic_sub_return(i, v);
90 89
91 asm volatile( 90 return __atomic_add_return(-i, v);
92 "/* atomic_sub_unless */\n"
93 "1: ssrf 5\n"
94 " ld.w %0, %2\n"
95 " cp.w %0, %4\n"
96 " breq 1f\n"
97 " sub %0, %3\n"
98 " stcond %1, %0\n"
99 " brne 1b\n"
100 "1:"
101 : "=&r"(tmp), "=o"(v->counter)
102 : "m"(v->counter), "rKs21"(a), "rKs21"(u)
103 : "cc", "memory");
104} 91}
105 92
106/* 93/*
@@ -116,9 +103,21 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
116{ 103{
117 int tmp, old = atomic_read(v); 104 int tmp, old = atomic_read(v);
118 105
119 if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576)) 106 if (IS_21BIT_CONST(a)) {
120 atomic_sub_unless(v, -a, u); 107 asm volatile(
121 else { 108 "/* __atomic_sub_unless */\n"
109 "1: ssrf 5\n"
110 " ld.w %0, %2\n"
111 " cp.w %0, %4\n"
112 " breq 1f\n"
113 " sub %0, %3\n"
114 " stcond %1, %0\n"
115 " brne 1b\n"
116 "1:"
117 : "=&r"(tmp), "=o"(v->counter)
118 : "m"(v->counter), "rKs21"(-a), "rKs21"(u)
119 : "cc", "memory");
120 } else {
122 asm volatile( 121 asm volatile(
123 "/* __atomic_add_unless */\n" 122 "/* __atomic_add_unless */\n"
124 "1: ssrf 5\n" 123 "1: ssrf 5\n"
@@ -137,6 +136,8 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
137 return old; 136 return old;
138} 137}
139 138
139#undef IS_21BIT_CONST
140
140/* 141/*
141 * atomic_sub_if_positive - conditionally subtract integer from atomic variable 142 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
142 * @i: integer value to subtract 143 * @i: integer value to subtract
diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h
index aa429baebaf9..279766a70664 100644
--- a/arch/cris/include/asm/atomic.h
+++ b/arch/cris/include/asm/atomic.h
@@ -17,48 +17,41 @@
17 17
18#define ATOMIC_INIT(i) { (i) } 18#define ATOMIC_INIT(i) { (i) }
19 19
20#define atomic_read(v) (*(volatile int *)&(v)->counter) 20#define atomic_read(v) ACCESS_ONCE((v)->counter)
21#define atomic_set(v,i) (((v)->counter) = (i)) 21#define atomic_set(v,i) (((v)->counter) = (i))
22 22
23/* These should be written in asm but we do it in C for now. */ 23/* These should be written in asm but we do it in C for now. */
24 24
25static inline void atomic_add(int i, volatile atomic_t *v) 25#define ATOMIC_OP(op, c_op) \
26{ 26static inline void atomic_##op(int i, volatile atomic_t *v) \
27 unsigned long flags; 27{ \
28 cris_atomic_save(v, flags); 28 unsigned long flags; \
29 v->counter += i; 29 cris_atomic_save(v, flags); \
30 cris_atomic_restore(v, flags); 30 v->counter c_op i; \
31 cris_atomic_restore(v, flags); \
32} \
33
34#define ATOMIC_OP_RETURN(op, c_op) \
35static inline int atomic_##op##_return(int i, volatile atomic_t *v) \
36{ \
37 unsigned long flags; \
38 int retval; \
39 cris_atomic_save(v, flags); \
40 retval = (v->counter c_op i); \
41 cris_atomic_restore(v, flags); \
42 return retval; \
31} 43}
32 44
33static inline void atomic_sub(int i, volatile atomic_t *v) 45#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
34{
35 unsigned long flags;
36 cris_atomic_save(v, flags);
37 v->counter -= i;
38 cris_atomic_restore(v, flags);
39}
40 46
41static inline int atomic_add_return(int i, volatile atomic_t *v) 47ATOMIC_OPS(add, +=)
42{ 48ATOMIC_OPS(sub, -=)
43 unsigned long flags;
44 int retval;
45 cris_atomic_save(v, flags);
46 retval = (v->counter += i);
47 cris_atomic_restore(v, flags);
48 return retval;
49}
50 49
51#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 50#undef ATOMIC_OPS
51#undef ATOMIC_OP_RETURN
52#undef ATOMIC_OP
52 53
53static inline int atomic_sub_return(int i, volatile atomic_t *v) 54#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
54{
55 unsigned long flags;
56 int retval;
57 cris_atomic_save(v, flags);
58 retval = (v->counter -= i);
59 cris_atomic_restore(v, flags);
60 return retval;
61}
62 55
63static inline int atomic_sub_and_test(int i, volatile atomic_t *v) 56static inline int atomic_sub_and_test(int i, volatile atomic_t *v)
64{ 57{
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index f6c3a1690101..102190a61d65 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -31,7 +31,7 @@
31 */ 31 */
32 32
33#define ATOMIC_INIT(i) { (i) } 33#define ATOMIC_INIT(i) { (i) }
34#define atomic_read(v) (*(volatile int *)&(v)->counter) 34#define atomic_read(v) ACCESS_ONCE((v)->counter)
35#define atomic_set(v, i) (((v)->counter) = (i)) 35#define atomic_set(v, i) (((v)->counter) = (i))
36 36
37#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 37#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index de916b11bff5..93d07025f183 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -94,41 +94,47 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
94 return __oldval; 94 return __oldval;
95} 95}
96 96
97static inline int atomic_add_return(int i, atomic_t *v) 97#define ATOMIC_OP(op) \
98{ 98static inline void atomic_##op(int i, atomic_t *v) \
99 int output; 99{ \
100 100 int output; \
101 __asm__ __volatile__ ( 101 \
102 "1: %0 = memw_locked(%1);\n" 102 __asm__ __volatile__ ( \
103 " %0 = add(%0,%2);\n" 103 "1: %0 = memw_locked(%1);\n" \
104 " memw_locked(%1,P3)=%0;\n" 104 " %0 = "#op "(%0,%2);\n" \
105 " if !P3 jump 1b;\n" 105 " memw_locked(%1,P3)=%0;\n" \
106 : "=&r" (output) 106 " if !P3 jump 1b;\n" \
107 : "r" (&v->counter), "r" (i) 107 : "=&r" (output) \
108 : "memory", "p3" 108 : "r" (&v->counter), "r" (i) \
109 ); 109 : "memory", "p3" \
110 return output; 110 ); \
111 111} \
112
113#define ATOMIC_OP_RETURN(op) \
114static inline int atomic_##op##_return(int i, atomic_t *v) \
115{ \
116 int output; \
117 \
118 __asm__ __volatile__ ( \
119 "1: %0 = memw_locked(%1);\n" \
120 " %0 = "#op "(%0,%2);\n" \
121 " memw_locked(%1,P3)=%0;\n" \
122 " if !P3 jump 1b;\n" \
123 : "=&r" (output) \
124 : "r" (&v->counter), "r" (i) \
125 : "memory", "p3" \
126 ); \
127 return output; \
112} 128}
113 129
114#define atomic_add(i, v) atomic_add_return(i, (v)) 130#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
115 131
116static inline int atomic_sub_return(int i, atomic_t *v) 132ATOMIC_OPS(add)
117{ 133ATOMIC_OPS(sub)
118 int output;
119 __asm__ __volatile__ (
120 "1: %0 = memw_locked(%1);\n"
121 " %0 = sub(%0,%2);\n"
122 " memw_locked(%1,P3)=%0\n"
123 " if !P3 jump 1b;\n"
124 : "=&r" (output)
125 : "r" (&v->counter), "r" (i)
126 : "memory", "p3"
127 );
128 return output;
129}
130 134
131#define atomic_sub(i, v) atomic_sub_return(i, (v)) 135#undef ATOMIC_OPS
136#undef ATOMIC_OP_RETURN
137#undef ATOMIC_OP
132 138
133/** 139/**
134 * __atomic_add_unless - add unless the number is a given value 140 * __atomic_add_unless - add unless the number is a given value
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 0f8bf48dadf3..0bf03501fe5c 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -21,68 +21,100 @@
21#define ATOMIC_INIT(i) { (i) } 21#define ATOMIC_INIT(i) { (i) }
22#define ATOMIC64_INIT(i) { (i) } 22#define ATOMIC64_INIT(i) { (i) }
23 23
24#define atomic_read(v) (*(volatile int *)&(v)->counter) 24#define atomic_read(v) ACCESS_ONCE((v)->counter)
25#define atomic64_read(v) (*(volatile long *)&(v)->counter) 25#define atomic64_read(v) ACCESS_ONCE((v)->counter)
26 26
27#define atomic_set(v,i) (((v)->counter) = (i)) 27#define atomic_set(v,i) (((v)->counter) = (i))
28#define atomic64_set(v,i) (((v)->counter) = (i)) 28#define atomic64_set(v,i) (((v)->counter) = (i))
29 29
30static __inline__ int 30#define ATOMIC_OP(op, c_op) \
31ia64_atomic_add (int i, atomic_t *v) 31static __inline__ int \
32{ 32ia64_atomic_##op (int i, atomic_t *v) \
33 __s32 old, new; 33{ \
34 CMPXCHG_BUGCHECK_DECL 34 __s32 old, new; \
35 35 CMPXCHG_BUGCHECK_DECL \
36 do { 36 \
37 CMPXCHG_BUGCHECK(v); 37 do { \
38 old = atomic_read(v); 38 CMPXCHG_BUGCHECK(v); \
39 new = old + i; 39 old = atomic_read(v); \
40 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); 40 new = old c_op i; \
41 return new; 41 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
42 return new; \
42} 43}
43 44
44static __inline__ long 45ATOMIC_OP(add, +)
45ia64_atomic64_add (__s64 i, atomic64_t *v) 46ATOMIC_OP(sub, -)
46{
47 __s64 old, new;
48 CMPXCHG_BUGCHECK_DECL
49
50 do {
51 CMPXCHG_BUGCHECK(v);
52 old = atomic64_read(v);
53 new = old + i;
54 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
55 return new;
56}
57 47
58static __inline__ int 48#undef ATOMIC_OP
59ia64_atomic_sub (int i, atomic_t *v)
60{
61 __s32 old, new;
62 CMPXCHG_BUGCHECK_DECL
63
64 do {
65 CMPXCHG_BUGCHECK(v);
66 old = atomic_read(v);
67 new = old - i;
68 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
69 return new;
70}
71 49
72static __inline__ long 50#define atomic_add_return(i,v) \
73ia64_atomic64_sub (__s64 i, atomic64_t *v) 51({ \
74{ 52 int __ia64_aar_i = (i); \
75 __s64 old, new; 53 (__builtin_constant_p(i) \
76 CMPXCHG_BUGCHECK_DECL 54 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
77 55 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
78 do { 56 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
79 CMPXCHG_BUGCHECK(v); 57 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
80 old = atomic64_read(v); 58 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
81 new = old - i; 59 : ia64_atomic_add(__ia64_aar_i, v); \
82 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); 60})
83 return new; 61
62#define atomic_sub_return(i,v) \
63({ \
64 int __ia64_asr_i = (i); \
65 (__builtin_constant_p(i) \
66 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
67 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
68 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
69 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
70 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
71 : ia64_atomic_sub(__ia64_asr_i, v); \
72})
73
74#define ATOMIC64_OP(op, c_op) \
75static __inline__ long \
76ia64_atomic64_##op (__s64 i, atomic64_t *v) \
77{ \
78 __s64 old, new; \
79 CMPXCHG_BUGCHECK_DECL \
80 \
81 do { \
82 CMPXCHG_BUGCHECK(v); \
83 old = atomic64_read(v); \
84 new = old c_op i; \
85 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
86 return new; \
84} 87}
85 88
89ATOMIC64_OP(add, +)
90ATOMIC64_OP(sub, -)
91
92#undef ATOMIC64_OP
93
94#define atomic64_add_return(i,v) \
95({ \
96 long __ia64_aar_i = (i); \
97 (__builtin_constant_p(i) \
98 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
99 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
100 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
101 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
102 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
103 : ia64_atomic64_add(__ia64_aar_i, v); \
104})
105
106#define atomic64_sub_return(i,v) \
107({ \
108 long __ia64_asr_i = (i); \
109 (__builtin_constant_p(i) \
110 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
111 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
112 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
113 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
114 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
115 : ia64_atomic64_sub(__ia64_asr_i, v); \
116})
117
86#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 118#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
87#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 119#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
88 120
@@ -123,30 +155,6 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
123 155
124#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 156#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
125 157
126#define atomic_add_return(i,v) \
127({ \
128 int __ia64_aar_i = (i); \
129 (__builtin_constant_p(i) \
130 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
131 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
132 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
133 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
134 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
135 : ia64_atomic_add(__ia64_aar_i, v); \
136})
137
138#define atomic64_add_return(i,v) \
139({ \
140 long __ia64_aar_i = (i); \
141 (__builtin_constant_p(i) \
142 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
143 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
144 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
145 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
146 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
147 : ia64_atomic64_add(__ia64_aar_i, v); \
148})
149
150/* 158/*
151 * Atomically add I to V and return TRUE if the resulting value is 159 * Atomically add I to V and return TRUE if the resulting value is
152 * negative. 160 * negative.
@@ -163,30 +171,6 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
163 return atomic64_add_return(i, v) < 0; 171 return atomic64_add_return(i, v) < 0;
164} 172}
165 173
166#define atomic_sub_return(i,v) \
167({ \
168 int __ia64_asr_i = (i); \
169 (__builtin_constant_p(i) \
170 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
171 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
172 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
173 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
174 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
175 : ia64_atomic_sub(__ia64_asr_i, v); \
176})
177
178#define atomic64_sub_return(i,v) \
179({ \
180 long __ia64_asr_i = (i); \
181 (__builtin_constant_p(i) \
182 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
183 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
184 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
185 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
186 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
187 : ia64_atomic64_sub(__ia64_asr_i, v); \
188})
189
190#define atomic_dec_return(v) atomic_sub_return(1, (v)) 174#define atomic_dec_return(v) atomic_sub_return(1, (v))
191#define atomic_inc_return(v) atomic_add_return(1, (v)) 175#define atomic_inc_return(v) atomic_add_return(1, (v))
192#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) 176#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
@@ -199,13 +183,13 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
199#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) 183#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
200#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) 184#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
201 185
202#define atomic_add(i,v) atomic_add_return((i), (v)) 186#define atomic_add(i,v) (void)atomic_add_return((i), (v))
203#define atomic_sub(i,v) atomic_sub_return((i), (v)) 187#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
204#define atomic_inc(v) atomic_add(1, (v)) 188#define atomic_inc(v) atomic_add(1, (v))
205#define atomic_dec(v) atomic_sub(1, (v)) 189#define atomic_dec(v) atomic_sub(1, (v))
206 190
207#define atomic64_add(i,v) atomic64_add_return((i), (v)) 191#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
208#define atomic64_sub(i,v) atomic64_sub_return((i), (v)) 192#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
209#define atomic64_inc(v) atomic64_add(1, (v)) 193#define atomic64_inc(v) atomic64_add(1, (v))
210#define atomic64_dec(v) atomic64_sub(1, (v)) 194#define atomic64_dec(v) atomic64_sub(1, (v))
211 195
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 8ad0ed4182a5..31bb74adba08 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -28,7 +28,7 @@
28 * 28 *
29 * Atomically reads the value of @v. 29 * Atomically reads the value of @v.
30 */ 30 */
31#define atomic_read(v) (*(volatile int *)&(v)->counter) 31#define atomic_read(v) ACCESS_ONCE((v)->counter)
32 32
33/** 33/**
34 * atomic_set - set atomic variable 34 * atomic_set - set atomic variable
@@ -39,85 +39,64 @@
39 */ 39 */
40#define atomic_set(v,i) (((v)->counter) = (i)) 40#define atomic_set(v,i) (((v)->counter) = (i))
41 41
42/**
43 * atomic_add_return - add integer to atomic variable and return it
44 * @i: integer value to add
45 * @v: pointer of type atomic_t
46 *
47 * Atomically adds @i to @v and return (@i + @v).
48 */
49static __inline__ int atomic_add_return(int i, atomic_t *v)
50{
51 unsigned long flags;
52 int result;
53
54 local_irq_save(flags);
55 __asm__ __volatile__ (
56 "# atomic_add_return \n\t"
57 DCACHE_CLEAR("%0", "r4", "%1")
58 M32R_LOCK" %0, @%1; \n\t"
59 "add %0, %2; \n\t"
60 M32R_UNLOCK" %0, @%1; \n\t"
61 : "=&r" (result)
62 : "r" (&v->counter), "r" (i)
63 : "memory"
64#ifdef CONFIG_CHIP_M32700_TS1 42#ifdef CONFIG_CHIP_M32700_TS1
65 , "r4" 43#define __ATOMIC_CLOBBER , "r4"
66#endif /* CONFIG_CHIP_M32700_TS1 */ 44#else
67 ); 45#define __ATOMIC_CLOBBER
68 local_irq_restore(flags); 46#endif
69 47
70 return result; 48#define ATOMIC_OP(op) \
49static __inline__ void atomic_##op(int i, atomic_t *v) \
50{ \
51 unsigned long flags; \
52 int result; \
53 \
54 local_irq_save(flags); \
55 __asm__ __volatile__ ( \
56 "# atomic_" #op " \n\t" \
57 DCACHE_CLEAR("%0", "r4", "%1") \
58 M32R_LOCK" %0, @%1; \n\t" \
59 #op " %0, %2; \n\t" \
60 M32R_UNLOCK" %0, @%1; \n\t" \
61 : "=&r" (result) \
62 : "r" (&v->counter), "r" (i) \
63 : "memory" \
64 __ATOMIC_CLOBBER \
65 ); \
66 local_irq_restore(flags); \
67} \
68
69#define ATOMIC_OP_RETURN(op) \
70static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
71{ \
72 unsigned long flags; \
73 int result; \
74 \
75 local_irq_save(flags); \
76 __asm__ __volatile__ ( \
77 "# atomic_" #op "_return \n\t" \
78 DCACHE_CLEAR("%0", "r4", "%1") \
79 M32R_LOCK" %0, @%1; \n\t" \
80 #op " %0, %2; \n\t" \
81 M32R_UNLOCK" %0, @%1; \n\t" \
82 : "=&r" (result) \
83 : "r" (&v->counter), "r" (i) \
84 : "memory" \
85 __ATOMIC_CLOBBER \
86 ); \
87 local_irq_restore(flags); \
88 \
89 return result; \
71} 90}
72 91
73/** 92#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
74 * atomic_sub_return - subtract integer from atomic variable and return it
75 * @i: integer value to subtract
76 * @v: pointer of type atomic_t
77 *
78 * Atomically subtracts @i from @v and return (@v - @i).
79 */
80static __inline__ int atomic_sub_return(int i, atomic_t *v)
81{
82 unsigned long flags;
83 int result;
84
85 local_irq_save(flags);
86 __asm__ __volatile__ (
87 "# atomic_sub_return \n\t"
88 DCACHE_CLEAR("%0", "r4", "%1")
89 M32R_LOCK" %0, @%1; \n\t"
90 "sub %0, %2; \n\t"
91 M32R_UNLOCK" %0, @%1; \n\t"
92 : "=&r" (result)
93 : "r" (&v->counter), "r" (i)
94 : "memory"
95#ifdef CONFIG_CHIP_M32700_TS1
96 , "r4"
97#endif /* CONFIG_CHIP_M32700_TS1 */
98 );
99 local_irq_restore(flags);
100
101 return result;
102}
103 93
104/** 94ATOMIC_OPS(add)
105 * atomic_add - add integer to atomic variable 95ATOMIC_OPS(sub)
106 * @i: integer value to add
107 * @v: pointer of type atomic_t
108 *
109 * Atomically adds @i to @v.
110 */
111#define atomic_add(i,v) ((void) atomic_add_return((i), (v)))
112 96
113/** 97#undef ATOMIC_OPS
114 * atomic_sub - subtract the atomic variable 98#undef ATOMIC_OP_RETURN
115 * @i: integer value to subtract 99#undef ATOMIC_OP
116 * @v: pointer of type atomic_t
117 *
118 * Atomically subtracts @i from @v.
119 */
120#define atomic_sub(i,v) ((void) atomic_sub_return((i), (v)))
121 100
122/** 101/**
123 * atomic_sub_and_test - subtract value from variable and test result 102 * atomic_sub_and_test - subtract value from variable and test result
@@ -151,9 +130,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
151 : "=&r" (result) 130 : "=&r" (result)
152 : "r" (&v->counter) 131 : "r" (&v->counter)
153 : "memory" 132 : "memory"
154#ifdef CONFIG_CHIP_M32700_TS1 133 __ATOMIC_CLOBBER
155 , "r4"
156#endif /* CONFIG_CHIP_M32700_TS1 */
157 ); 134 );
158 local_irq_restore(flags); 135 local_irq_restore(flags);
159 136
@@ -181,9 +158,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
181 : "=&r" (result) 158 : "=&r" (result)
182 : "r" (&v->counter) 159 : "r" (&v->counter)
183 : "memory" 160 : "memory"
184#ifdef CONFIG_CHIP_M32700_TS1 161 __ATOMIC_CLOBBER
185 , "r4"
186#endif /* CONFIG_CHIP_M32700_TS1 */
187 ); 162 );
188 local_irq_restore(flags); 163 local_irq_restore(flags);
189 164
@@ -280,9 +255,7 @@ static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
280 : "=&r" (tmp) 255 : "=&r" (tmp)
281 : "r" (addr), "r" (~mask) 256 : "r" (addr), "r" (~mask)
282 : "memory" 257 : "memory"
283#ifdef CONFIG_CHIP_M32700_TS1 258 __ATOMIC_CLOBBER
284 , "r5"
285#endif /* CONFIG_CHIP_M32700_TS1 */
286 ); 259 );
287 local_irq_restore(flags); 260 local_irq_restore(flags);
288} 261}
@@ -302,9 +275,7 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
302 : "=&r" (tmp) 275 : "=&r" (tmp)
303 : "r" (addr), "r" (mask) 276 : "r" (addr), "r" (mask)
304 : "memory" 277 : "memory"
305#ifdef CONFIG_CHIP_M32700_TS1 278 __ATOMIC_CLOBBER
306 , "r5"
307#endif /* CONFIG_CHIP_M32700_TS1 */
308 ); 279 );
309 local_irq_restore(flags); 280 local_irq_restore(flags);
310} 281}
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 55695212a2ae..e85f047fb072 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -17,7 +17,7 @@
17 17
18#define ATOMIC_INIT(i) { (i) } 18#define ATOMIC_INIT(i) { (i) }
19 19
20#define atomic_read(v) (*(volatile int *)&(v)->counter) 20#define atomic_read(v) ACCESS_ONCE((v)->counter)
21#define atomic_set(v, i) (((v)->counter) = i) 21#define atomic_set(v, i) (((v)->counter) = i)
22 22
23/* 23/*
@@ -30,16 +30,57 @@
30#define ASM_DI "di" 30#define ASM_DI "di"
31#endif 31#endif
32 32
33static inline void atomic_add(int i, atomic_t *v) 33#define ATOMIC_OP(op, c_op, asm_op) \
34{ 34static inline void atomic_##op(int i, atomic_t *v) \
35 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i)); 35{ \
36 __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
37} \
38
39#ifdef CONFIG_RMW_INSNS
40
41#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
42static inline int atomic_##op##_return(int i, atomic_t *v) \
43{ \
44 int t, tmp; \
45 \
46 __asm__ __volatile__( \
47 "1: movel %2,%1\n" \
48 " " #asm_op "l %3,%1\n" \
49 " casl %2,%1,%0\n" \
50 " jne 1b" \
51 : "+m" (*v), "=&d" (t), "=&d" (tmp) \
52 : "g" (i), "2" (atomic_read(v))); \
53 return t; \
36} 54}
37 55
38static inline void atomic_sub(int i, atomic_t *v) 56#else
39{ 57
40 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i)); 58#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
59static inline int atomic_##op##_return(int i, atomic_t * v) \
60{ \
61 unsigned long flags; \
62 int t; \
63 \
64 local_irq_save(flags); \
65 t = (v->counter c_op i); \
66 local_irq_restore(flags); \
67 \
68 return t; \
41} 69}
42 70
71#endif /* CONFIG_RMW_INSNS */
72
73#define ATOMIC_OPS(op, c_op, asm_op) \
74 ATOMIC_OP(op, c_op, asm_op) \
75 ATOMIC_OP_RETURN(op, c_op, asm_op)
76
77ATOMIC_OPS(add, +=, add)
78ATOMIC_OPS(sub, -=, sub)
79
80#undef ATOMIC_OPS
81#undef ATOMIC_OP_RETURN
82#undef ATOMIC_OP
83
43static inline void atomic_inc(atomic_t *v) 84static inline void atomic_inc(atomic_t *v)
44{ 85{
45 __asm__ __volatile__("addql #1,%0" : "+m" (*v)); 86 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
@@ -76,67 +117,11 @@ static inline int atomic_inc_and_test(atomic_t *v)
76 117
77#ifdef CONFIG_RMW_INSNS 118#ifdef CONFIG_RMW_INSNS
78 119
79static inline int atomic_add_return(int i, atomic_t *v)
80{
81 int t, tmp;
82
83 __asm__ __volatile__(
84 "1: movel %2,%1\n"
85 " addl %3,%1\n"
86 " casl %2,%1,%0\n"
87 " jne 1b"
88 : "+m" (*v), "=&d" (t), "=&d" (tmp)
89 : "g" (i), "2" (atomic_read(v)));
90 return t;
91}
92
93static inline int atomic_sub_return(int i, atomic_t *v)
94{
95 int t, tmp;
96
97 __asm__ __volatile__(
98 "1: movel %2,%1\n"
99 " subl %3,%1\n"
100 " casl %2,%1,%0\n"
101 " jne 1b"
102 : "+m" (*v), "=&d" (t), "=&d" (tmp)
103 : "g" (i), "2" (atomic_read(v)));
104 return t;
105}
106
107#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 120#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
108#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 121#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
109 122
110#else /* !CONFIG_RMW_INSNS */ 123#else /* !CONFIG_RMW_INSNS */
111 124
112static inline int atomic_add_return(int i, atomic_t * v)
113{
114 unsigned long flags;
115 int t;
116
117 local_irq_save(flags);
118 t = atomic_read(v);
119 t += i;
120 atomic_set(v, t);
121 local_irq_restore(flags);
122
123 return t;
124}
125
126static inline int atomic_sub_return(int i, atomic_t * v)
127{
128 unsigned long flags;
129 int t;
130
131 local_irq_save(flags);
132 t = atomic_read(v);
133 t -= i;
134 atomic_set(v, t);
135 local_irq_restore(flags);
136
137 return t;
138}
139
140static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 125static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
141{ 126{
142 unsigned long flags; 127 unsigned long flags;
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index d2e60a18986c..948d8688643c 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -27,85 +27,56 @@ static inline int atomic_read(const atomic_t *v)
27 return temp; 27 return temp;
28} 28}
29 29
30static inline void atomic_add(int i, atomic_t *v) 30#define ATOMIC_OP(op) \
31{ 31static inline void atomic_##op(int i, atomic_t *v) \
32 int temp; 32{ \
33 33 int temp; \
34 asm volatile ( 34 \
35 "1: LNKGETD %0, [%1]\n" 35 asm volatile ( \
36 " ADD %0, %0, %2\n" 36 "1: LNKGETD %0, [%1]\n" \
37 " LNKSETD [%1], %0\n" 37 " " #op " %0, %0, %2\n" \
38 " DEFR %0, TXSTAT\n" 38 " LNKSETD [%1], %0\n" \
39 " ANDT %0, %0, #HI(0x3f000000)\n" 39 " DEFR %0, TXSTAT\n" \
40 " CMPT %0, #HI(0x02000000)\n" 40 " ANDT %0, %0, #HI(0x3f000000)\n" \
41 " BNZ 1b\n" 41 " CMPT %0, #HI(0x02000000)\n" \
42 : "=&d" (temp) 42 " BNZ 1b\n" \
43 : "da" (&v->counter), "bd" (i) 43 : "=&d" (temp) \
44 : "cc"); 44 : "da" (&v->counter), "bd" (i) \
45 : "cc"); \
46} \
47
48#define ATOMIC_OP_RETURN(op) \
49static inline int atomic_##op##_return(int i, atomic_t *v) \
50{ \
51 int result, temp; \
52 \
53 smp_mb(); \
54 \
55 asm volatile ( \
56 "1: LNKGETD %1, [%2]\n" \
57 " " #op " %1, %1, %3\n" \
58 " LNKSETD [%2], %1\n" \
59 " DEFR %0, TXSTAT\n" \
60 " ANDT %0, %0, #HI(0x3f000000)\n" \
61 " CMPT %0, #HI(0x02000000)\n" \
62 " BNZ 1b\n" \
63 : "=&d" (temp), "=&da" (result) \
64 : "da" (&v->counter), "bd" (i) \
65 : "cc"); \
66 \
67 smp_mb(); \
68 \
69 return result; \
45} 70}
46 71
47static inline void atomic_sub(int i, atomic_t *v) 72#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
48{
49 int temp;
50 73
51 asm volatile ( 74ATOMIC_OPS(add)
52 "1: LNKGETD %0, [%1]\n" 75ATOMIC_OPS(sub)
53 " SUB %0, %0, %2\n"
54 " LNKSETD [%1], %0\n"
55 " DEFR %0, TXSTAT\n"
56 " ANDT %0, %0, #HI(0x3f000000)\n"
57 " CMPT %0, #HI(0x02000000)\n"
58 " BNZ 1b\n"
59 : "=&d" (temp)
60 : "da" (&v->counter), "bd" (i)
61 : "cc");
62}
63 76
64static inline int atomic_add_return(int i, atomic_t *v) 77#undef ATOMIC_OPS
65{ 78#undef ATOMIC_OP_RETURN
66 int result, temp; 79#undef ATOMIC_OP
67
68 smp_mb();
69
70 asm volatile (
71 "1: LNKGETD %1, [%2]\n"
72 " ADD %1, %1, %3\n"
73 " LNKSETD [%2], %1\n"
74 " DEFR %0, TXSTAT\n"
75 " ANDT %0, %0, #HI(0x3f000000)\n"
76 " CMPT %0, #HI(0x02000000)\n"
77 " BNZ 1b\n"
78 : "=&d" (temp), "=&da" (result)
79 : "da" (&v->counter), "bd" (i)
80 : "cc");
81
82 smp_mb();
83
84 return result;
85}
86
87static inline int atomic_sub_return(int i, atomic_t *v)
88{
89 int result, temp;
90
91 smp_mb();
92
93 asm volatile (
94 "1: LNKGETD %1, [%2]\n"
95 " SUB %1, %1, %3\n"
96 " LNKSETD [%2], %1\n"
97 " DEFR %0, TXSTAT\n"
98 " ANDT %0, %0, #HI(0x3f000000)\n"
99 " CMPT %0, #HI(0x02000000)\n"
100 " BNZ 1b\n"
101 : "=&d" (temp), "=&da" (result)
102 : "da" (&v->counter), "bd" (i)
103 : "cc");
104
105 smp_mb();
106
107 return result;
108}
109 80
110static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 81static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
111{ 82{
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index e578955e674b..f5d5898c1020 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -37,55 +37,41 @@ static inline int atomic_set(atomic_t *v, int i)
37 return i; 37 return i;
38} 38}
39 39
40static inline void atomic_add(int i, atomic_t *v) 40#define ATOMIC_OP(op, c_op) \
41{ 41static inline void atomic_##op(int i, atomic_t *v) \
42 unsigned long flags; 42{ \
43 43 unsigned long flags; \
44 __global_lock1(flags); 44 \
45 fence(); 45 __global_lock1(flags); \
46 v->counter += i; 46 fence(); \
47 __global_unlock1(flags); 47 v->counter c_op i; \
48 __global_unlock1(flags); \
49} \
50
51#define ATOMIC_OP_RETURN(op, c_op) \
52static inline int atomic_##op##_return(int i, atomic_t *v) \
53{ \
54 unsigned long result; \
55 unsigned long flags; \
56 \
57 __global_lock1(flags); \
58 result = v->counter; \
59 result c_op i; \
60 fence(); \
61 v->counter = result; \
62 __global_unlock1(flags); \
63 \
64 return result; \
48} 65}
49 66
50static inline void atomic_sub(int i, atomic_t *v) 67#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
51{
52 unsigned long flags;
53 68
54 __global_lock1(flags); 69ATOMIC_OPS(add, +=)
55 fence(); 70ATOMIC_OPS(sub, -=)
56 v->counter -= i;
57 __global_unlock1(flags);
58}
59
60static inline int atomic_add_return(int i, atomic_t *v)
61{
62 unsigned long result;
63 unsigned long flags;
64 71
65 __global_lock1(flags); 72#undef ATOMIC_OPS
66 result = v->counter; 73#undef ATOMIC_OP_RETURN
67 result += i; 74#undef ATOMIC_OP
68 fence();
69 v->counter = result;
70 __global_unlock1(flags);
71
72 return result;
73}
74
75static inline int atomic_sub_return(int i, atomic_t *v)
76{
77 unsigned long result;
78 unsigned long flags;
79
80 __global_lock1(flags);
81 result = v->counter;
82 result -= i;
83 fence();
84 v->counter = result;
85 __global_unlock1(flags);
86
87 return result;
88}
89 75
90static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 76static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
91{ 77{
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 37b2befe651a..6dd6bfc607e9 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -29,7 +29,7 @@
29 * 29 *
30 * Atomically reads the value of @v. 30 * Atomically reads the value of @v.
31 */ 31 */
32#define atomic_read(v) (*(volatile int *)&(v)->counter) 32#define atomic_read(v) ACCESS_ONCE((v)->counter)
33 33
34/* 34/*
35 * atomic_set - set atomic variable 35 * atomic_set - set atomic variable
@@ -40,195 +40,103 @@
40 */ 40 */
41#define atomic_set(v, i) ((v)->counter = (i)) 41#define atomic_set(v, i) ((v)->counter = (i))
42 42
43/* 43#define ATOMIC_OP(op, c_op, asm_op) \
44 * atomic_add - add integer to atomic variable 44static __inline__ void atomic_##op(int i, atomic_t * v) \
45 * @i: integer value to add 45{ \
46 * @v: pointer of type atomic_t 46 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
47 * 47 int temp; \
48 * Atomically adds @i to @v. 48 \
49 */ 49 __asm__ __volatile__( \
50static __inline__ void atomic_add(int i, atomic_t * v) 50 " .set arch=r4000 \n" \
51{ 51 "1: ll %0, %1 # atomic_" #op " \n" \
52 if (kernel_uses_llsc && R10000_LLSC_WAR) { 52 " " #asm_op " %0, %2 \n" \
53 int temp; 53 " sc %0, %1 \n" \
54 54 " beqzl %0, 1b \n" \
55 __asm__ __volatile__( 55 " .set mips0 \n" \
56 " .set arch=r4000 \n" 56 : "=&r" (temp), "+m" (v->counter) \
57 "1: ll %0, %1 # atomic_add \n" 57 : "Ir" (i)); \
58 " addu %0, %2 \n" 58 } else if (kernel_uses_llsc) { \
59 " sc %0, %1 \n" 59 int temp; \
60 " beqzl %0, 1b \n" 60 \
61 " .set mips0 \n" 61 do { \
62 : "=&r" (temp), "+m" (v->counter) 62 __asm__ __volatile__( \
63 : "Ir" (i)); 63 " .set arch=r4000 \n" \
64 } else if (kernel_uses_llsc) { 64 " ll %0, %1 # atomic_" #op "\n" \
65 int temp; 65 " " #asm_op " %0, %2 \n" \
66 66 " sc %0, %1 \n" \
67 do { 67 " .set mips0 \n" \
68 __asm__ __volatile__( 68 : "=&r" (temp), "+m" (v->counter) \
69 " .set arch=r4000 \n" 69 : "Ir" (i)); \
70 " ll %0, %1 # atomic_add \n" 70 } while (unlikely(!temp)); \
71 " addu %0, %2 \n" 71 } else { \
72 " sc %0, %1 \n" 72 unsigned long flags; \
73 " .set mips0 \n" 73 \
74 : "=&r" (temp), "+m" (v->counter) 74 raw_local_irq_save(flags); \
75 : "Ir" (i)); 75 v->counter c_op i; \
76 } while (unlikely(!temp)); 76 raw_local_irq_restore(flags); \
77 } else { 77 } \
78 unsigned long flags; 78} \
79 79
80 raw_local_irq_save(flags); 80#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
81 v->counter += i; 81static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
82 raw_local_irq_restore(flags); 82{ \
83 } 83 int result; \
84} 84 \
85 85 smp_mb__before_llsc(); \
86/* 86 \
87 * atomic_sub - subtract the atomic variable 87 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
88 * @i: integer value to subtract 88 int temp; \
89 * @v: pointer of type atomic_t 89 \
90 * 90 __asm__ __volatile__( \
91 * Atomically subtracts @i from @v. 91 " .set arch=r4000 \n" \
92 */ 92 "1: ll %1, %2 # atomic_" #op "_return \n" \
93static __inline__ void atomic_sub(int i, atomic_t * v) 93 " " #asm_op " %0, %1, %3 \n" \
94{ 94 " sc %0, %2 \n" \
95 if (kernel_uses_llsc && R10000_LLSC_WAR) { 95 " beqzl %0, 1b \n" \
96 int temp; 96 " " #asm_op " %0, %1, %3 \n" \
97 97 " .set mips0 \n" \
98 __asm__ __volatile__( 98 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
99 " .set arch=r4000 \n" 99 : "Ir" (i)); \
100 "1: ll %0, %1 # atomic_sub \n" 100 } else if (kernel_uses_llsc) { \
101 " subu %0, %2 \n" 101 int temp; \
102 " sc %0, %1 \n" 102 \
103 " beqzl %0, 1b \n" 103 do { \
104 " .set mips0 \n" 104 __asm__ __volatile__( \
105 : "=&r" (temp), "+m" (v->counter) 105 " .set arch=r4000 \n" \
106 : "Ir" (i)); 106 " ll %1, %2 # atomic_" #op "_return \n" \
107 } else if (kernel_uses_llsc) { 107 " " #asm_op " %0, %1, %3 \n" \
108 int temp; 108 " sc %0, %2 \n" \
109 109 " .set mips0 \n" \
110 do { 110 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
111 __asm__ __volatile__( 111 : "Ir" (i)); \
112 " .set arch=r4000 \n" 112 } while (unlikely(!result)); \
113 " ll %0, %1 # atomic_sub \n" 113 \
114 " subu %0, %2 \n" 114 result = temp; result c_op i; \
115 " sc %0, %1 \n" 115 } else { \
116 " .set mips0 \n" 116 unsigned long flags; \
117 : "=&r" (temp), "+m" (v->counter) 117 \
118 : "Ir" (i)); 118 raw_local_irq_save(flags); \
119 } while (unlikely(!temp)); 119 result = v->counter; \
120 } else { 120 result c_op i; \
121 unsigned long flags; 121 v->counter = result; \
122 122 raw_local_irq_restore(flags); \
123 raw_local_irq_save(flags); 123 } \
124 v->counter -= i; 124 \
125 raw_local_irq_restore(flags); 125 smp_llsc_mb(); \
126 } 126 \
127} 127 return result; \
128
129/*
130 * Same as above, but return the result value
131 */
132static __inline__ int atomic_add_return(int i, atomic_t * v)
133{
134 int result;
135
136 smp_mb__before_llsc();
137
138 if (kernel_uses_llsc && R10000_LLSC_WAR) {
139 int temp;
140
141 __asm__ __volatile__(
142 " .set arch=r4000 \n"
143 "1: ll %1, %2 # atomic_add_return \n"
144 " addu %0, %1, %3 \n"
145 " sc %0, %2 \n"
146 " beqzl %0, 1b \n"
147 " addu %0, %1, %3 \n"
148 " .set mips0 \n"
149 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
150 : "Ir" (i));
151 } else if (kernel_uses_llsc) {
152 int temp;
153
154 do {
155 __asm__ __volatile__(
156 " .set arch=r4000 \n"
157 " ll %1, %2 # atomic_add_return \n"
158 " addu %0, %1, %3 \n"
159 " sc %0, %2 \n"
160 " .set mips0 \n"
161 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
162 : "Ir" (i));
163 } while (unlikely(!result));
164
165 result = temp + i;
166 } else {
167 unsigned long flags;
168
169 raw_local_irq_save(flags);
170 result = v->counter;
171 result += i;
172 v->counter = result;
173 raw_local_irq_restore(flags);
174 }
175
176 smp_llsc_mb();
177
178 return result;
179} 128}
180 129
181static __inline__ int atomic_sub_return(int i, atomic_t * v) 130#define ATOMIC_OPS(op, c_op, asm_op) \
182{ 131 ATOMIC_OP(op, c_op, asm_op) \
183 int result; 132 ATOMIC_OP_RETURN(op, c_op, asm_op)
184 133
185 smp_mb__before_llsc(); 134ATOMIC_OPS(add, +=, addu)
135ATOMIC_OPS(sub, -=, subu)
186 136
187 if (kernel_uses_llsc && R10000_LLSC_WAR) { 137#undef ATOMIC_OPS
188 int temp; 138#undef ATOMIC_OP_RETURN
189 139#undef ATOMIC_OP
190 __asm__ __volatile__(
191 " .set arch=r4000 \n"
192 "1: ll %1, %2 # atomic_sub_return \n"
193 " subu %0, %1, %3 \n"
194 " sc %0, %2 \n"
195 " beqzl %0, 1b \n"
196 " subu %0, %1, %3 \n"
197 " .set mips0 \n"
198 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
199 : "Ir" (i), "m" (v->counter)
200 : "memory");
201
202 result = temp - i;
203 } else if (kernel_uses_llsc) {
204 int temp;
205
206 do {
207 __asm__ __volatile__(
208 " .set arch=r4000 \n"
209 " ll %1, %2 # atomic_sub_return \n"
210 " subu %0, %1, %3 \n"
211 " sc %0, %2 \n"
212 " .set mips0 \n"
213 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
214 : "Ir" (i));
215 } while (unlikely(!result));
216
217 result = temp - i;
218 } else {
219 unsigned long flags;
220
221 raw_local_irq_save(flags);
222 result = v->counter;
223 result -= i;
224 v->counter = result;
225 raw_local_irq_restore(flags);
226 }
227
228 smp_llsc_mb();
229
230 return result;
231}
232 140
233/* 141/*
234 * atomic_sub_if_positive - conditionally subtract integer from atomic variable 142 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
@@ -398,7 +306,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
398 * @v: pointer of type atomic64_t 306 * @v: pointer of type atomic64_t
399 * 307 *
400 */ 308 */
401#define atomic64_read(v) (*(volatile long *)&(v)->counter) 309#define atomic64_read(v) ACCESS_ONCE((v)->counter)
402 310
403/* 311/*
404 * atomic64_set - set atomic variable 312 * atomic64_set - set atomic variable
@@ -407,195 +315,104 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
407 */ 315 */
408#define atomic64_set(v, i) ((v)->counter = (i)) 316#define atomic64_set(v, i) ((v)->counter = (i))
409 317
410/* 318#define ATOMIC64_OP(op, c_op, asm_op) \
411 * atomic64_add - add integer to atomic variable 319static __inline__ void atomic64_##op(long i, atomic64_t * v) \
412 * @i: integer value to add 320{ \
413 * @v: pointer of type atomic64_t 321 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
414 * 322 long temp; \
415 * Atomically adds @i to @v. 323 \
416 */ 324 __asm__ __volatile__( \
417static __inline__ void atomic64_add(long i, atomic64_t * v) 325 " .set arch=r4000 \n" \
418{ 326 "1: lld %0, %1 # atomic64_" #op " \n" \
419 if (kernel_uses_llsc && R10000_LLSC_WAR) { 327 " " #asm_op " %0, %2 \n" \
420 long temp; 328 " scd %0, %1 \n" \
421 329 " beqzl %0, 1b \n" \
422 __asm__ __volatile__( 330 " .set mips0 \n" \
423 " .set arch=r4000 \n" 331 : "=&r" (temp), "+m" (v->counter) \
424 "1: lld %0, %1 # atomic64_add \n" 332 : "Ir" (i)); \
425 " daddu %0, %2 \n" 333 } else if (kernel_uses_llsc) { \
426 " scd %0, %1 \n" 334 long temp; \
427 " beqzl %0, 1b \n" 335 \
428 " .set mips0 \n" 336 do { \
429 : "=&r" (temp), "+m" (v->counter) 337 __asm__ __volatile__( \
430 : "Ir" (i)); 338 " .set arch=r4000 \n" \
431 } else if (kernel_uses_llsc) { 339 " lld %0, %1 # atomic64_" #op "\n" \
432 long temp; 340 " " #asm_op " %0, %2 \n" \
433 341 " scd %0, %1 \n" \
434 do { 342 " .set mips0 \n" \
435 __asm__ __volatile__( 343 : "=&r" (temp), "+m" (v->counter) \
436 " .set arch=r4000 \n" 344 : "Ir" (i)); \
437 " lld %0, %1 # atomic64_add \n" 345 } while (unlikely(!temp)); \
438 " daddu %0, %2 \n" 346 } else { \
439 " scd %0, %1 \n" 347 unsigned long flags; \
440 " .set mips0 \n" 348 \
441 : "=&r" (temp), "+m" (v->counter) 349 raw_local_irq_save(flags); \
442 : "Ir" (i)); 350 v->counter c_op i; \
443 } while (unlikely(!temp)); 351 raw_local_irq_restore(flags); \
444 } else { 352 } \
445 unsigned long flags; 353} \
446 354
447 raw_local_irq_save(flags); 355#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
448 v->counter += i; 356static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
449 raw_local_irq_restore(flags); 357{ \
450 } 358 long result; \
359 \
360 smp_mb__before_llsc(); \
361 \
362 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
363 long temp; \
364 \
365 __asm__ __volatile__( \
366 " .set arch=r4000 \n" \
367 "1: lld %1, %2 # atomic64_" #op "_return\n" \
368 " " #asm_op " %0, %1, %3 \n" \
369 " scd %0, %2 \n" \
370 " beqzl %0, 1b \n" \
371 " " #asm_op " %0, %1, %3 \n" \
372 " .set mips0 \n" \
373 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
374 : "Ir" (i)); \
375 } else if (kernel_uses_llsc) { \
376 long temp; \
377 \
378 do { \
379 __asm__ __volatile__( \
380 " .set arch=r4000 \n" \
381 " lld %1, %2 # atomic64_" #op "_return\n" \
382 " " #asm_op " %0, %1, %3 \n" \
383 " scd %0, %2 \n" \
384 " .set mips0 \n" \
385 : "=&r" (result), "=&r" (temp), "=m" (v->counter) \
386 : "Ir" (i), "m" (v->counter) \
387 : "memory"); \
388 } while (unlikely(!result)); \
389 \
390 result = temp; result c_op i; \
391 } else { \
392 unsigned long flags; \
393 \
394 raw_local_irq_save(flags); \
395 result = v->counter; \
396 result c_op i; \
397 v->counter = result; \
398 raw_local_irq_restore(flags); \
399 } \
400 \
401 smp_llsc_mb(); \
402 \
403 return result; \
451} 404}
452 405
453/* 406#define ATOMIC64_OPS(op, c_op, asm_op) \
454 * atomic64_sub - subtract the atomic variable 407 ATOMIC64_OP(op, c_op, asm_op) \
455 * @i: integer value to subtract 408 ATOMIC64_OP_RETURN(op, c_op, asm_op)
456 * @v: pointer of type atomic64_t
457 *
458 * Atomically subtracts @i from @v.
459 */
460static __inline__ void atomic64_sub(long i, atomic64_t * v)
461{
462 if (kernel_uses_llsc && R10000_LLSC_WAR) {
463 long temp;
464
465 __asm__ __volatile__(
466 " .set arch=r4000 \n"
467 "1: lld %0, %1 # atomic64_sub \n"
468 " dsubu %0, %2 \n"
469 " scd %0, %1 \n"
470 " beqzl %0, 1b \n"
471 " .set mips0 \n"
472 : "=&r" (temp), "+m" (v->counter)
473 : "Ir" (i));
474 } else if (kernel_uses_llsc) {
475 long temp;
476
477 do {
478 __asm__ __volatile__(
479 " .set arch=r4000 \n"
480 " lld %0, %1 # atomic64_sub \n"
481 " dsubu %0, %2 \n"
482 " scd %0, %1 \n"
483 " .set mips0 \n"
484 : "=&r" (temp), "+m" (v->counter)
485 : "Ir" (i));
486 } while (unlikely(!temp));
487 } else {
488 unsigned long flags;
489
490 raw_local_irq_save(flags);
491 v->counter -= i;
492 raw_local_irq_restore(flags);
493 }
494}
495
496/*
497 * Same as above, but return the result value
498 */
499static __inline__ long atomic64_add_return(long i, atomic64_t * v)
500{
501 long result;
502 409
503 smp_mb__before_llsc(); 410ATOMIC64_OPS(add, +=, daddu)
411ATOMIC64_OPS(sub, -=, dsubu)
504 412
505 if (kernel_uses_llsc && R10000_LLSC_WAR) { 413#undef ATOMIC64_OPS
506 long temp; 414#undef ATOMIC64_OP_RETURN
507 415#undef ATOMIC64_OP
508 __asm__ __volatile__(
509 " .set arch=r4000 \n"
510 "1: lld %1, %2 # atomic64_add_return \n"
511 " daddu %0, %1, %3 \n"
512 " scd %0, %2 \n"
513 " beqzl %0, 1b \n"
514 " daddu %0, %1, %3 \n"
515 " .set mips0 \n"
516 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
517 : "Ir" (i));
518 } else if (kernel_uses_llsc) {
519 long temp;
520
521 do {
522 __asm__ __volatile__(
523 " .set arch=r4000 \n"
524 " lld %1, %2 # atomic64_add_return \n"
525 " daddu %0, %1, %3 \n"
526 " scd %0, %2 \n"
527 " .set mips0 \n"
528 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
529 : "Ir" (i), "m" (v->counter)
530 : "memory");
531 } while (unlikely(!result));
532
533 result = temp + i;
534 } else {
535 unsigned long flags;
536
537 raw_local_irq_save(flags);
538 result = v->counter;
539 result += i;
540 v->counter = result;
541 raw_local_irq_restore(flags);
542 }
543
544 smp_llsc_mb();
545
546 return result;
547}
548
549static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
550{
551 long result;
552
553 smp_mb__before_llsc();
554
555 if (kernel_uses_llsc && R10000_LLSC_WAR) {
556 long temp;
557
558 __asm__ __volatile__(
559 " .set arch=r4000 \n"
560 "1: lld %1, %2 # atomic64_sub_return \n"
561 " dsubu %0, %1, %3 \n"
562 " scd %0, %2 \n"
563 " beqzl %0, 1b \n"
564 " dsubu %0, %1, %3 \n"
565 " .set mips0 \n"
566 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
567 : "Ir" (i), "m" (v->counter)
568 : "memory");
569 } else if (kernel_uses_llsc) {
570 long temp;
571
572 do {
573 __asm__ __volatile__(
574 " .set arch=r4000 \n"
575 " lld %1, %2 # atomic64_sub_return \n"
576 " dsubu %0, %1, %3 \n"
577 " scd %0, %2 \n"
578 " .set mips0 \n"
579 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
580 : "Ir" (i), "m" (v->counter)
581 : "memory");
582 } while (unlikely(!result));
583
584 result = temp - i;
585 } else {
586 unsigned long flags;
587
588 raw_local_irq_save(flags);
589 result = v->counter;
590 result -= i;
591 v->counter = result;
592 raw_local_irq_restore(flags);
593 }
594
595 smp_llsc_mb();
596
597 return result;
598}
599 416
600/* 417/*
601 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable 418 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index cadeb1e2cdfc..5be655e83e70 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -33,7 +33,6 @@
33 * @v: pointer of type atomic_t 33 * @v: pointer of type atomic_t
34 * 34 *
35 * Atomically reads the value of @v. Note that the guaranteed 35 * Atomically reads the value of @v. Note that the guaranteed
36 * useful range of an atomic_t is only 24 bits.
37 */ 36 */
38#define atomic_read(v) (ACCESS_ONCE((v)->counter)) 37#define atomic_read(v) (ACCESS_ONCE((v)->counter))
39 38
@@ -43,102 +42,62 @@
43 * @i: required value 42 * @i: required value
44 * 43 *
45 * Atomically sets the value of @v to @i. Note that the guaranteed 44 * Atomically sets the value of @v to @i. Note that the guaranteed
46 * useful range of an atomic_t is only 24 bits.
47 */ 45 */
48#define atomic_set(v, i) (((v)->counter) = (i)) 46#define atomic_set(v, i) (((v)->counter) = (i))
49 47
50/** 48#define ATOMIC_OP(op) \
51 * atomic_add_return - add integer to atomic variable 49static inline void atomic_##op(int i, atomic_t *v) \
52 * @i: integer value to add 50{ \
53 * @v: pointer of type atomic_t 51 int retval, status; \
54 * 52 \
55 * Atomically adds @i to @v and returns the result 53 asm volatile( \
56 * Note that the guaranteed useful range of an atomic_t is only 24 bits. 54 "1: mov %4,(_AAR,%3) \n" \
57 */ 55 " mov (_ADR,%3),%1 \n" \
58static inline int atomic_add_return(int i, atomic_t *v) 56 " " #op " %5,%1 \n" \
59{ 57 " mov %1,(_ADR,%3) \n" \
60 int retval; 58 " mov (_ADR,%3),%0 \n" /* flush */ \
61#ifdef CONFIG_SMP 59 " mov (_ASR,%3),%0 \n" \
62 int status; 60 " or %0,%0 \n" \
63 61 " bne 1b \n" \
64 asm volatile( 62 : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
65 "1: mov %4,(_AAR,%3) \n" 63 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
66 " mov (_ADR,%3),%1 \n" 64 : "memory", "cc"); \
67 " add %5,%1 \n" 65}
68 " mov %1,(_ADR,%3) \n"
69 " mov (_ADR,%3),%0 \n" /* flush */
70 " mov (_ASR,%3),%0 \n"
71 " or %0,%0 \n"
72 " bne 1b \n"
73 : "=&r"(status), "=&r"(retval), "=m"(v->counter)
74 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
75 : "memory", "cc");
76
77#else
78 unsigned long flags;
79 66
80 flags = arch_local_cli_save(); 67#define ATOMIC_OP_RETURN(op) \
81 retval = v->counter; 68static inline int atomic_##op##_return(int i, atomic_t *v) \
82 retval += i; 69{ \
83 v->counter = retval; 70 int retval, status; \
84 arch_local_irq_restore(flags); 71 \
85#endif 72 asm volatile( \
86 return retval; 73 "1: mov %4,(_AAR,%3) \n" \
74 " mov (_ADR,%3),%1 \n" \
75 " " #op " %5,%1 \n" \
76 " mov %1,(_ADR,%3) \n" \
77 " mov (_ADR,%3),%0 \n" /* flush */ \
78 " mov (_ASR,%3),%0 \n" \
79 " or %0,%0 \n" \
80 " bne 1b \n" \
81 : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
82 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
83 : "memory", "cc"); \
84 return retval; \
87} 85}
88 86
89/** 87#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
90 * atomic_sub_return - subtract integer from atomic variable
91 * @i: integer value to subtract
92 * @v: pointer of type atomic_t
93 *
94 * Atomically subtracts @i from @v and returns the result
95 * Note that the guaranteed useful range of an atomic_t is only 24 bits.
96 */
97static inline int atomic_sub_return(int i, atomic_t *v)
98{
99 int retval;
100#ifdef CONFIG_SMP
101 int status;
102 88
103 asm volatile( 89ATOMIC_OPS(add)
104 "1: mov %4,(_AAR,%3) \n" 90ATOMIC_OPS(sub)
105 " mov (_ADR,%3),%1 \n"
106 " sub %5,%1 \n"
107 " mov %1,(_ADR,%3) \n"
108 " mov (_ADR,%3),%0 \n" /* flush */
109 " mov (_ASR,%3),%0 \n"
110 " or %0,%0 \n"
111 " bne 1b \n"
112 : "=&r"(status), "=&r"(retval), "=m"(v->counter)
113 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
114 : "memory", "cc");
115 91
116#else 92#undef ATOMIC_OPS
117 unsigned long flags; 93#undef ATOMIC_OP_RETURN
118 flags = arch_local_cli_save(); 94#undef ATOMIC_OP
119 retval = v->counter;
120 retval -= i;
121 v->counter = retval;
122 arch_local_irq_restore(flags);
123#endif
124 return retval;
125}
126 95
127static inline int atomic_add_negative(int i, atomic_t *v) 96static inline int atomic_add_negative(int i, atomic_t *v)
128{ 97{
129 return atomic_add_return(i, v) < 0; 98 return atomic_add_return(i, v) < 0;
130} 99}
131 100
132static inline void atomic_add(int i, atomic_t *v)
133{
134 atomic_add_return(i, v);
135}
136
137static inline void atomic_sub(int i, atomic_t *v)
138{
139 atomic_sub_return(i, v);
140}
141
142static inline void atomic_inc(atomic_t *v) 101static inline void atomic_inc(atomic_t *v)
143{ 102{
144 atomic_add_return(1, v); 103 atomic_add_return(1, v);
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 0be2db2c7d44..226f8ca993f6 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -55,24 +55,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
55 * are atomic, so a reader never sees inconsistent values. 55 * are atomic, so a reader never sees inconsistent values.
56 */ 56 */
57 57
58/* It's possible to reduce all atomic operations to either 58static __inline__ void atomic_set(atomic_t *v, int i)
59 * __atomic_add_return, atomic_set and atomic_read (the latter
60 * is there only for consistency).
61 */
62
63static __inline__ int __atomic_add_return(int i, atomic_t *v)
64{
65 int ret;
66 unsigned long flags;
67 _atomic_spin_lock_irqsave(v, flags);
68
69 ret = (v->counter += i);
70
71 _atomic_spin_unlock_irqrestore(v, flags);
72 return ret;
73}
74
75static __inline__ void atomic_set(atomic_t *v, int i)
76{ 59{
77 unsigned long flags; 60 unsigned long flags;
78 _atomic_spin_lock_irqsave(v, flags); 61 _atomic_spin_lock_irqsave(v, flags);
@@ -84,7 +67,7 @@ static __inline__ void atomic_set(atomic_t *v, int i)
84 67
85static __inline__ int atomic_read(const atomic_t *v) 68static __inline__ int atomic_read(const atomic_t *v)
86{ 69{
87 return (*(volatile int *)&(v)->counter); 70 return ACCESS_ONCE((v)->counter);
88} 71}
89 72
90/* exported interface */ 73/* exported interface */
@@ -115,16 +98,43 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
115 return c; 98 return c;
116} 99}
117 100
101#define ATOMIC_OP(op, c_op) \
102static __inline__ void atomic_##op(int i, atomic_t *v) \
103{ \
104 unsigned long flags; \
105 \
106 _atomic_spin_lock_irqsave(v, flags); \
107 v->counter c_op i; \
108 _atomic_spin_unlock_irqrestore(v, flags); \
109} \
110
111#define ATOMIC_OP_RETURN(op, c_op) \
112static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
113{ \
114 unsigned long flags; \
115 int ret; \
116 \
117 _atomic_spin_lock_irqsave(v, flags); \
118 ret = (v->counter c_op i); \
119 _atomic_spin_unlock_irqrestore(v, flags); \
120 \
121 return ret; \
122}
123
124#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
125
126ATOMIC_OPS(add, +=)
127ATOMIC_OPS(sub, -=)
128
129#undef ATOMIC_OPS
130#undef ATOMIC_OP_RETURN
131#undef ATOMIC_OP
118 132
119#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v)))) 133#define atomic_inc(v) (atomic_add( 1,(v)))
120#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v)))) 134#define atomic_dec(v) (atomic_add( -1,(v)))
121#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
122#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
123 135
124#define atomic_add_return(i,v) (__atomic_add_return( (i),(v))) 136#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
125#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v))) 137#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
126#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
127#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
128 138
129#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 139#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
130 140
@@ -148,18 +158,37 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
148 158
149#define ATOMIC64_INIT(i) { (i) } 159#define ATOMIC64_INIT(i) { (i) }
150 160
151static __inline__ s64 161#define ATOMIC64_OP(op, c_op) \
152__atomic64_add_return(s64 i, atomic64_t *v) 162static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
153{ 163{ \
154 s64 ret; 164 unsigned long flags; \
155 unsigned long flags; 165 \
156 _atomic_spin_lock_irqsave(v, flags); 166 _atomic_spin_lock_irqsave(v, flags); \
167 v->counter c_op i; \
168 _atomic_spin_unlock_irqrestore(v, flags); \
169} \
170
171#define ATOMIC64_OP_RETURN(op, c_op) \
172static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
173{ \
174 unsigned long flags; \
175 s64 ret; \
176 \
177 _atomic_spin_lock_irqsave(v, flags); \
178 ret = (v->counter c_op i); \
179 _atomic_spin_unlock_irqrestore(v, flags); \
180 \
181 return ret; \
182}
157 183
158 ret = (v->counter += i); 184#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
159 185
160 _atomic_spin_unlock_irqrestore(v, flags); 186ATOMIC64_OPS(add, +=)
161 return ret; 187ATOMIC64_OPS(sub, -=)
162} 188
189#undef ATOMIC64_OPS
190#undef ATOMIC64_OP_RETURN
191#undef ATOMIC64_OP
163 192
164static __inline__ void 193static __inline__ void
165atomic64_set(atomic64_t *v, s64 i) 194atomic64_set(atomic64_t *v, s64 i)
@@ -175,18 +204,14 @@ atomic64_set(atomic64_t *v, s64 i)
175static __inline__ s64 204static __inline__ s64
176atomic64_read(const atomic64_t *v) 205atomic64_read(const atomic64_t *v)
177{ 206{
178 return (*(volatile long *)&(v)->counter); 207 return ACCESS_ONCE((v)->counter);
179} 208}
180 209
181#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v)))) 210#define atomic64_inc(v) (atomic64_add( 1,(v)))
182#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v)))) 211#define atomic64_dec(v) (atomic64_add( -1,(v)))
183#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
184#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
185 212
186#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v))) 213#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
187#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v))) 214#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
188#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
189#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
190 215
191#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 216#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
192 217
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 28992d012926..512d2782b043 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -26,76 +26,53 @@ static __inline__ void atomic_set(atomic_t *v, int i)
26 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); 26 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
27} 27}
28 28
29static __inline__ void atomic_add(int a, atomic_t *v) 29#define ATOMIC_OP(op, asm_op) \
30{ 30static __inline__ void atomic_##op(int a, atomic_t *v) \
31 int t; 31{ \
32 32 int t; \
33 __asm__ __volatile__( 33 \
34"1: lwarx %0,0,%3 # atomic_add\n\ 34 __asm__ __volatile__( \
35 add %0,%2,%0\n" 35"1: lwarx %0,0,%3 # atomic_" #op "\n" \
36 PPC405_ERR77(0,%3) 36 #asm_op " %0,%2,%0\n" \
37" stwcx. %0,0,%3 \n\ 37 PPC405_ERR77(0,%3) \
38 bne- 1b" 38" stwcx. %0,0,%3 \n" \
39 : "=&r" (t), "+m" (v->counter) 39" bne- 1b\n" \
40 : "r" (a), "r" (&v->counter) 40 : "=&r" (t), "+m" (v->counter) \
41 : "cc"); 41 : "r" (a), "r" (&v->counter) \
42 : "cc"); \
43} \
44
45#define ATOMIC_OP_RETURN(op, asm_op) \
46static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
47{ \
48 int t; \
49 \
50 __asm__ __volatile__( \
51 PPC_ATOMIC_ENTRY_BARRIER \
52"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
53 #asm_op " %0,%1,%0\n" \
54 PPC405_ERR77(0,%2) \
55" stwcx. %0,0,%2 \n" \
56" bne- 1b\n" \
57 PPC_ATOMIC_EXIT_BARRIER \
58 : "=&r" (t) \
59 : "r" (a), "r" (&v->counter) \
60 : "cc", "memory"); \
61 \
62 return t; \
42} 63}
43 64
44static __inline__ int atomic_add_return(int a, atomic_t *v) 65#define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
45{
46 int t;
47 66
48 __asm__ __volatile__( 67ATOMIC_OPS(add, add)
49 PPC_ATOMIC_ENTRY_BARRIER 68ATOMIC_OPS(sub, subf)
50"1: lwarx %0,0,%2 # atomic_add_return\n\
51 add %0,%1,%0\n"
52 PPC405_ERR77(0,%2)
53" stwcx. %0,0,%2 \n\
54 bne- 1b"
55 PPC_ATOMIC_EXIT_BARRIER
56 : "=&r" (t)
57 : "r" (a), "r" (&v->counter)
58 : "cc", "memory");
59 69
60 return t; 70#undef ATOMIC_OPS
61} 71#undef ATOMIC_OP_RETURN
72#undef ATOMIC_OP
62 73
63#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 74#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
64 75
65static __inline__ void atomic_sub(int a, atomic_t *v)
66{
67 int t;
68
69 __asm__ __volatile__(
70"1: lwarx %0,0,%3 # atomic_sub\n\
71 subf %0,%2,%0\n"
72 PPC405_ERR77(0,%3)
73" stwcx. %0,0,%3 \n\
74 bne- 1b"
75 : "=&r" (t), "+m" (v->counter)
76 : "r" (a), "r" (&v->counter)
77 : "cc");
78}
79
80static __inline__ int atomic_sub_return(int a, atomic_t *v)
81{
82 int t;
83
84 __asm__ __volatile__(
85 PPC_ATOMIC_ENTRY_BARRIER
86"1: lwarx %0,0,%2 # atomic_sub_return\n\
87 subf %0,%1,%0\n"
88 PPC405_ERR77(0,%2)
89" stwcx. %0,0,%2 \n\
90 bne- 1b"
91 PPC_ATOMIC_EXIT_BARRIER
92 : "=&r" (t)
93 : "r" (a), "r" (&v->counter)
94 : "cc", "memory");
95
96 return t;
97}
98
99static __inline__ void atomic_inc(atomic_t *v) 76static __inline__ void atomic_inc(atomic_t *v)
100{ 77{
101 int t; 78 int t;
@@ -289,71 +266,50 @@ static __inline__ void atomic64_set(atomic64_t *v, long i)
289 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); 266 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
290} 267}
291 268
292static __inline__ void atomic64_add(long a, atomic64_t *v) 269#define ATOMIC64_OP(op, asm_op) \
293{ 270static __inline__ void atomic64_##op(long a, atomic64_t *v) \
294 long t; 271{ \
295 272 long t; \
296 __asm__ __volatile__( 273 \
297"1: ldarx %0,0,%3 # atomic64_add\n\ 274 __asm__ __volatile__( \
298 add %0,%2,%0\n\ 275"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
299 stdcx. %0,0,%3 \n\ 276 #asm_op " %0,%2,%0\n" \
300 bne- 1b" 277" stdcx. %0,0,%3 \n" \
301 : "=&r" (t), "+m" (v->counter) 278" bne- 1b\n" \
302 : "r" (a), "r" (&v->counter) 279 : "=&r" (t), "+m" (v->counter) \
303 : "cc"); 280 : "r" (a), "r" (&v->counter) \
281 : "cc"); \
304} 282}
305 283
306static __inline__ long atomic64_add_return(long a, atomic64_t *v) 284#define ATOMIC64_OP_RETURN(op, asm_op) \
307{ 285static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
308 long t; 286{ \
309 287 long t; \
310 __asm__ __volatile__( 288 \
311 PPC_ATOMIC_ENTRY_BARRIER 289 __asm__ __volatile__( \
312"1: ldarx %0,0,%2 # atomic64_add_return\n\ 290 PPC_ATOMIC_ENTRY_BARRIER \
313 add %0,%1,%0\n\ 291"1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
314 stdcx. %0,0,%2 \n\ 292 #asm_op " %0,%1,%0\n" \
315 bne- 1b" 293" stdcx. %0,0,%2 \n" \
316 PPC_ATOMIC_EXIT_BARRIER 294" bne- 1b\n" \
317 : "=&r" (t) 295 PPC_ATOMIC_EXIT_BARRIER \
318 : "r" (a), "r" (&v->counter) 296 : "=&r" (t) \
319 : "cc", "memory"); 297 : "r" (a), "r" (&v->counter) \
320 298 : "cc", "memory"); \
321 return t; 299 \
300 return t; \
322} 301}
323 302
324#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 303#define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
325
326static __inline__ void atomic64_sub(long a, atomic64_t *v)
327{
328 long t;
329
330 __asm__ __volatile__(
331"1: ldarx %0,0,%3 # atomic64_sub\n\
332 subf %0,%2,%0\n\
333 stdcx. %0,0,%3 \n\
334 bne- 1b"
335 : "=&r" (t), "+m" (v->counter)
336 : "r" (a), "r" (&v->counter)
337 : "cc");
338}
339 304
340static __inline__ long atomic64_sub_return(long a, atomic64_t *v) 305ATOMIC64_OPS(add, add)
341{ 306ATOMIC64_OPS(sub, subf)
342 long t;
343 307
344 __asm__ __volatile__( 308#undef ATOMIC64_OPS
345 PPC_ATOMIC_ENTRY_BARRIER 309#undef ATOMIC64_OP_RETURN
346"1: ldarx %0,0,%2 # atomic64_sub_return\n\ 310#undef ATOMIC64_OP
347 subf %0,%1,%0\n\
348 stdcx. %0,0,%2 \n\
349 bne- 1b"
350 PPC_ATOMIC_EXIT_BARRIER
351 : "=&r" (t)
352 : "r" (a), "r" (&v->counter)
353 : "cc", "memory");
354 311
355 return t; 312#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
356}
357 313
358static __inline__ void atomic64_inc(atomic64_t *v) 314static __inline__ void atomic64_inc(atomic64_t *v)
359{ 315{
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index a273c88578fc..97a5fda83450 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -1,85 +1,56 @@
1#ifndef __ASM_SH_ATOMIC_GRB_H 1#ifndef __ASM_SH_ATOMIC_GRB_H
2#define __ASM_SH_ATOMIC_GRB_H 2#define __ASM_SH_ATOMIC_GRB_H
3 3
4static inline void atomic_add(int i, atomic_t *v) 4#define ATOMIC_OP(op) \
5{ 5static inline void atomic_##op(int i, atomic_t *v) \
6 int tmp; 6{ \
7 7 int tmp; \
8 __asm__ __volatile__ ( 8 \
9 " .align 2 \n\t" 9 __asm__ __volatile__ ( \
10 " mova 1f, r0 \n\t" /* r0 = end point */ 10 " .align 2 \n\t" \
11 " mov r15, r1 \n\t" /* r1 = saved sp */ 11 " mova 1f, r0 \n\t" /* r0 = end point */ \
12 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ 12 " mov r15, r1 \n\t" /* r1 = saved sp */ \
13 " mov.l @%1, %0 \n\t" /* load old value */ 13 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
14 " add %2, %0 \n\t" /* add */ 14 " mov.l @%1, %0 \n\t" /* load old value */ \
15 " mov.l %0, @%1 \n\t" /* store new value */ 15 " " #op " %2, %0 \n\t" /* $op */ \
16 "1: mov r1, r15 \n\t" /* LOGOUT */ 16 " mov.l %0, @%1 \n\t" /* store new value */ \
17 : "=&r" (tmp), 17 "1: mov r1, r15 \n\t" /* LOGOUT */ \
18 "+r" (v) 18 : "=&r" (tmp), \
19 : "r" (i) 19 "+r" (v) \
20 : "memory" , "r0", "r1"); 20 : "r" (i) \
21} 21 : "memory" , "r0", "r1"); \
22 22} \
23static inline void atomic_sub(int i, atomic_t *v)
24{
25 int tmp;
26
27 __asm__ __volatile__ (
28 " .align 2 \n\t"
29 " mova 1f, r0 \n\t" /* r0 = end point */
30 " mov r15, r1 \n\t" /* r1 = saved sp */
31 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
32 " mov.l @%1, %0 \n\t" /* load old value */
33 " sub %2, %0 \n\t" /* sub */
34 " mov.l %0, @%1 \n\t" /* store new value */
35 "1: mov r1, r15 \n\t" /* LOGOUT */
36 : "=&r" (tmp),
37 "+r" (v)
38 : "r" (i)
39 : "memory" , "r0", "r1");
40}
41
42static inline int atomic_add_return(int i, atomic_t *v)
43{
44 int tmp;
45 23
46 __asm__ __volatile__ ( 24#define ATOMIC_OP_RETURN(op) \
47 " .align 2 \n\t" 25static inline int atomic_##op##_return(int i, atomic_t *v) \
48 " mova 1f, r0 \n\t" /* r0 = end point */ 26{ \
49 " mov r15, r1 \n\t" /* r1 = saved sp */ 27 int tmp; \
50 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ 28 \
51 " mov.l @%1, %0 \n\t" /* load old value */ 29 __asm__ __volatile__ ( \
52 " add %2, %0 \n\t" /* add */ 30 " .align 2 \n\t" \
53 " mov.l %0, @%1 \n\t" /* store new value */ 31 " mova 1f, r0 \n\t" /* r0 = end point */ \
54 "1: mov r1, r15 \n\t" /* LOGOUT */ 32 " mov r15, r1 \n\t" /* r1 = saved sp */ \
55 : "=&r" (tmp), 33 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
56 "+r" (v) 34 " mov.l @%1, %0 \n\t" /* load old value */ \
57 : "r" (i) 35 " " #op " %2, %0 \n\t" /* $op */ \
58 : "memory" , "r0", "r1"); 36 " mov.l %0, @%1 \n\t" /* store new value */ \
59 37 "1: mov r1, r15 \n\t" /* LOGOUT */ \
60 return tmp; 38 : "=&r" (tmp), \
39 "+r" (v) \
40 : "r" (i) \
41 : "memory" , "r0", "r1"); \
42 \
43 return tmp; \
61} 44}
62 45
63static inline int atomic_sub_return(int i, atomic_t *v) 46#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
64{
65 int tmp;
66 47
67 __asm__ __volatile__ ( 48ATOMIC_OPS(add)
68 " .align 2 \n\t" 49ATOMIC_OPS(sub)
69 " mova 1f, r0 \n\t" /* r0 = end point */
70 " mov r15, r1 \n\t" /* r1 = saved sp */
71 " mov #-6, r15 \n\t" /* LOGIN: r15 = size */
72 " mov.l @%1, %0 \n\t" /* load old value */
73 " sub %2, %0 \n\t" /* sub */
74 " mov.l %0, @%1 \n\t" /* store new value */
75 "1: mov r1, r15 \n\t" /* LOGOUT */
76 : "=&r" (tmp),
77 "+r" (v)
78 : "r" (i)
79 : "memory", "r0", "r1");
80 50
81 return tmp; 51#undef ATOMIC_OPS
82} 52#undef ATOMIC_OP_RETURN
53#undef ATOMIC_OP
83 54
84static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 55static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
85{ 56{
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 9f7c56609e53..61d107523f06 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -8,49 +8,39 @@
8 * forward to code at the end of this object's .text section, then 8 * forward to code at the end of this object's .text section, then
9 * branch back to restart the operation. 9 * branch back to restart the operation.
10 */ 10 */
11static inline void atomic_add(int i, atomic_t *v)
12{
13 unsigned long flags;
14
15 raw_local_irq_save(flags);
16 v->counter += i;
17 raw_local_irq_restore(flags);
18}
19 11
20static inline void atomic_sub(int i, atomic_t *v) 12#define ATOMIC_OP(op, c_op) \
21{ 13static inline void atomic_##op(int i, atomic_t *v) \
22 unsigned long flags; 14{ \
23 15 unsigned long flags; \
24 raw_local_irq_save(flags); 16 \
25 v->counter -= i; 17 raw_local_irq_save(flags); \
26 raw_local_irq_restore(flags); 18 v->counter c_op i; \
19 raw_local_irq_restore(flags); \
27} 20}
28 21
29static inline int atomic_add_return(int i, atomic_t *v) 22#define ATOMIC_OP_RETURN(op, c_op) \
30{ 23static inline int atomic_##op##_return(int i, atomic_t *v) \
31 unsigned long temp, flags; 24{ \
32 25 unsigned long temp, flags; \
33 raw_local_irq_save(flags); 26 \
34 temp = v->counter; 27 raw_local_irq_save(flags); \
35 temp += i; 28 temp = v->counter; \
36 v->counter = temp; 29 temp c_op i; \
37 raw_local_irq_restore(flags); 30 v->counter = temp; \
38 31 raw_local_irq_restore(flags); \
39 return temp; 32 \
33 return temp; \
40} 34}
41 35
42static inline int atomic_sub_return(int i, atomic_t *v) 36#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
43{
44 unsigned long temp, flags;
45 37
46 raw_local_irq_save(flags); 38ATOMIC_OPS(add, +=)
47 temp = v->counter; 39ATOMIC_OPS(sub, -=)
48 temp -= i;
49 v->counter = temp;
50 raw_local_irq_restore(flags);
51 40
52 return temp; 41#undef ATOMIC_OPS
53} 42#undef ATOMIC_OP_RETURN
43#undef ATOMIC_OP
54 44
55static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 45static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
56{ 46{
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 4b00b78e3f4f..8575dccb9ef7 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -2,39 +2,6 @@
2#define __ASM_SH_ATOMIC_LLSC_H 2#define __ASM_SH_ATOMIC_LLSC_H
3 3
4/* 4/*
5 * To get proper branch prediction for the main line, we must branch
6 * forward to code at the end of this object's .text section, then
7 * branch back to restart the operation.
8 */
9static inline void atomic_add(int i, atomic_t *v)
10{
11 unsigned long tmp;
12
13 __asm__ __volatile__ (
14"1: movli.l @%2, %0 ! atomic_add \n"
15" add %1, %0 \n"
16" movco.l %0, @%2 \n"
17" bf 1b \n"
18 : "=&z" (tmp)
19 : "r" (i), "r" (&v->counter)
20 : "t");
21}
22
23static inline void atomic_sub(int i, atomic_t *v)
24{
25 unsigned long tmp;
26
27 __asm__ __volatile__ (
28"1: movli.l @%2, %0 ! atomic_sub \n"
29" sub %1, %0 \n"
30" movco.l %0, @%2 \n"
31" bf 1b \n"
32 : "=&z" (tmp)
33 : "r" (i), "r" (&v->counter)
34 : "t");
35}
36
37/*
38 * SH-4A note: 5 * SH-4A note:
39 * 6 *
40 * We basically get atomic_xxx_return() for free compared with 7 * We basically get atomic_xxx_return() for free compared with
@@ -42,39 +9,53 @@ static inline void atomic_sub(int i, atomic_t *v)
42 * encoding, so the retval is automatically set without having to 9 * encoding, so the retval is automatically set without having to
43 * do any special work. 10 * do any special work.
44 */ 11 */
45static inline int atomic_add_return(int i, atomic_t *v) 12/*
46{ 13 * To get proper branch prediction for the main line, we must branch
47 unsigned long temp; 14 * forward to code at the end of this object's .text section, then
15 * branch back to restart the operation.
16 */
48 17
49 __asm__ __volatile__ ( 18#define ATOMIC_OP(op) \
50"1: movli.l @%2, %0 ! atomic_add_return \n" 19static inline void atomic_##op(int i, atomic_t *v) \
51" add %1, %0 \n" 20{ \
52" movco.l %0, @%2 \n" 21 unsigned long tmp; \
53" bf 1b \n" 22 \
54" synco \n" 23 __asm__ __volatile__ ( \
55 : "=&z" (temp) 24"1: movli.l @%2, %0 ! atomic_" #op "\n" \
56 : "r" (i), "r" (&v->counter) 25" " #op " %1, %0 \n" \
57 : "t"); 26" movco.l %0, @%2 \n" \
27" bf 1b \n" \
28 : "=&z" (tmp) \
29 : "r" (i), "r" (&v->counter) \
30 : "t"); \
31}
58 32
59 return temp; 33#define ATOMIC_OP_RETURN(op) \
34static inline int atomic_##op##_return(int i, atomic_t *v) \
35{ \
36 unsigned long temp; \
37 \
38 __asm__ __volatile__ ( \
39"1: movli.l @%2, %0 ! atomic_" #op "_return \n" \
40" " #op " %1, %0 \n" \
41" movco.l %0, @%2 \n" \
42" bf 1b \n" \
43" synco \n" \
44 : "=&z" (temp) \
45 : "r" (i), "r" (&v->counter) \
46 : "t"); \
47 \
48 return temp; \
60} 49}
61 50
62static inline int atomic_sub_return(int i, atomic_t *v) 51#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
63{
64 unsigned long temp;
65 52
66 __asm__ __volatile__ ( 53ATOMIC_OPS(add)
67"1: movli.l @%2, %0 ! atomic_sub_return \n" 54ATOMIC_OPS(sub)
68" sub %1, %0 \n"
69" movco.l %0, @%2 \n"
70" bf 1b \n"
71" synco \n"
72 : "=&z" (temp)
73 : "r" (i), "r" (&v->counter)
74 : "t");
75 55
76 return temp; 56#undef ATOMIC_OPS
77} 57#undef ATOMIC_OP_RETURN
58#undef ATOMIC_OP
78 59
79static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 60static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
80{ 61{
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index f57b8a6743b3..05b9f74ce2d5 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -14,7 +14,7 @@
14 14
15#define ATOMIC_INIT(i) { (i) } 15#define ATOMIC_INIT(i) { (i) }
16 16
17#define atomic_read(v) (*(volatile int *)&(v)->counter) 17#define atomic_read(v) ACCESS_ONCE((v)->counter)
18#define atomic_set(v,i) ((v)->counter = (i)) 18#define atomic_set(v,i) ((v)->counter = (i))
19 19
20#if defined(CONFIG_GUSA_RB) 20#if defined(CONFIG_GUSA_RB)
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 7aed2be45b44..765c1776ec9f 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -20,23 +20,22 @@
20 20
21#define ATOMIC_INIT(i) { (i) } 21#define ATOMIC_INIT(i) { (i) }
22 22
23int __atomic_add_return(int, atomic_t *); 23int atomic_add_return(int, atomic_t *);
24int atomic_cmpxchg(atomic_t *, int, int); 24int atomic_cmpxchg(atomic_t *, int, int);
25#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 25#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
26int __atomic_add_unless(atomic_t *, int, int); 26int __atomic_add_unless(atomic_t *, int, int);
27void atomic_set(atomic_t *, int); 27void atomic_set(atomic_t *, int);
28 28
29#define atomic_read(v) (*(volatile int *)&(v)->counter) 29#define atomic_read(v) ACCESS_ONCE((v)->counter)
30 30
31#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) 31#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
32#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) 32#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
33#define atomic_inc(v) ((void)__atomic_add_return( 1, (v))) 33#define atomic_inc(v) ((void)atomic_add_return( 1, (v)))
34#define atomic_dec(v) ((void)__atomic_add_return( -1, (v))) 34#define atomic_dec(v) ((void)atomic_add_return( -1, (v)))
35 35
36#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v))) 36#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
37#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v))) 37#define atomic_inc_return(v) (atomic_add_return( 1, (v)))
38#define atomic_inc_return(v) (__atomic_add_return( 1, (v))) 38#define atomic_dec_return(v) (atomic_add_return( -1, (v)))
39#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
40 39
41#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 40#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
42 41
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index bb894c8bec56..4082749913ce 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -14,33 +14,34 @@
14#define ATOMIC_INIT(i) { (i) } 14#define ATOMIC_INIT(i) { (i) }
15#define ATOMIC64_INIT(i) { (i) } 15#define ATOMIC64_INIT(i) { (i) }
16 16
17#define atomic_read(v) (*(volatile int *)&(v)->counter) 17#define atomic_read(v) ACCESS_ONCE((v)->counter)
18#define atomic64_read(v) (*(volatile long *)&(v)->counter) 18#define atomic64_read(v) ACCESS_ONCE((v)->counter)
19 19
20#define atomic_set(v, i) (((v)->counter) = i) 20#define atomic_set(v, i) (((v)->counter) = i)
21#define atomic64_set(v, i) (((v)->counter) = i) 21#define atomic64_set(v, i) (((v)->counter) = i)
22 22
23void atomic_add(int, atomic_t *); 23#define ATOMIC_OP(op) \
24void atomic64_add(long, atomic64_t *); 24void atomic_##op(int, atomic_t *); \
25void atomic_sub(int, atomic_t *); 25void atomic64_##op(long, atomic64_t *);
26void atomic64_sub(long, atomic64_t *);
27 26
28int atomic_add_ret(int, atomic_t *); 27#define ATOMIC_OP_RETURN(op) \
29long atomic64_add_ret(long, atomic64_t *); 28int atomic_##op##_return(int, atomic_t *); \
30int atomic_sub_ret(int, atomic_t *); 29long atomic64_##op##_return(long, atomic64_t *);
31long atomic64_sub_ret(long, atomic64_t *);
32 30
33#define atomic_dec_return(v) atomic_sub_ret(1, v) 31#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
34#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
35 32
36#define atomic_inc_return(v) atomic_add_ret(1, v) 33ATOMIC_OPS(add)
37#define atomic64_inc_return(v) atomic64_add_ret(1, v) 34ATOMIC_OPS(sub)
38 35
39#define atomic_sub_return(i, v) atomic_sub_ret(i, v) 36#undef ATOMIC_OPS
40#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) 37#undef ATOMIC_OP_RETURN
38#undef ATOMIC_OP
41 39
42#define atomic_add_return(i, v) atomic_add_ret(i, v) 40#define atomic_dec_return(v) atomic_sub_return(1, v)
43#define atomic64_add_return(i, v) atomic64_add_ret(i, v) 41#define atomic64_dec_return(v) atomic64_sub_return(1, v)
42
43#define atomic_inc_return(v) atomic_add_return(1, v)
44#define atomic64_inc_return(v) atomic64_add_return(1, v)
44 45
45/* 46/*
46 * atomic_inc_and_test - increment and test 47 * atomic_inc_and_test - increment and test
@@ -53,11 +54,11 @@ long atomic64_sub_ret(long, atomic64_t *);
53#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 54#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
54#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) 55#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
55 56
56#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) 57#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
57#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0) 58#define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0)
58 59
59#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0) 60#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
60#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) 61#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
61 62
62#define atomic_inc(v) atomic_add(1, v) 63#define atomic_inc(v) atomic_add(1, v)
63#define atomic64_inc(v) atomic64_add(1, v) 64#define atomic64_inc(v) atomic64_add(1, v)
@@ -65,8 +66,8 @@ long atomic64_sub_ret(long, atomic64_t *);
65#define atomic_dec(v) atomic_sub(1, v) 66#define atomic_dec(v) atomic_sub(1, v)
66#define atomic64_dec(v) atomic64_sub(1, v) 67#define atomic64_dec(v) atomic64_sub(1, v)
67 68
68#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) 69#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
69#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) 70#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
70 71
71#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) 72#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
72#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 73#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index c9300bfaee5a..302c476413d5 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1138,7 +1138,7 @@ static unsigned long penguins_are_doing_time;
1138 1138
1139void smp_capture(void) 1139void smp_capture(void)
1140{ 1140{
1141 int result = atomic_add_ret(1, &smp_capture_depth); 1141 int result = atomic_add_return(1, &smp_capture_depth);
1142 1142
1143 if (result == 1) { 1143 if (result == 1) {
1144 int ncpus = num_online_cpus(); 1144 int ncpus = num_online_cpus();
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 1d32b54089aa..a7c418ac26af 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -27,18 +27,23 @@ static DEFINE_SPINLOCK(dummy);
27 27
28#endif /* SMP */ 28#endif /* SMP */
29 29
30int __atomic_add_return(int i, atomic_t *v) 30#define ATOMIC_OP(op, cop) \
31{ 31int atomic_##op##_return(int i, atomic_t *v) \
32 int ret; 32{ \
33 unsigned long flags; 33 int ret; \
34 spin_lock_irqsave(ATOMIC_HASH(v), flags); 34 unsigned long flags; \
35 35 spin_lock_irqsave(ATOMIC_HASH(v), flags); \
36 ret = (v->counter += i); 36 \
37 37 ret = (v->counter cop i); \
38 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); 38 \
39 return ret; 39 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
40} 40 return ret; \
41EXPORT_SYMBOL(__atomic_add_return); 41} \
42EXPORT_SYMBOL(atomic_##op##_return);
43
44ATOMIC_OP(add, +=)
45
46#undef ATOMIC_OP
42 47
43int atomic_cmpxchg(atomic_t *v, int old, int new) 48int atomic_cmpxchg(atomic_t *v, int old, int new)
44{ 49{
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 85c233d0a340..05dac43907d1 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -14,109 +14,80 @@
14 * memory barriers, and a second which returns 14 * memory barriers, and a second which returns
15 * a value and does the barriers. 15 * a value and does the barriers.
16 */ 16 */
17ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
18 BACKOFF_SETUP(%o2)
191: lduw [%o1], %g1
20 add %g1, %o0, %g7
21 cas [%o1], %g1, %g7
22 cmp %g1, %g7
23 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
24 nop
25 retl
26 nop
272: BACKOFF_SPIN(%o2, %o3, 1b)
28ENDPROC(atomic_add)
29 17
30ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ 18#define ATOMIC_OP(op) \
31 BACKOFF_SETUP(%o2) 19ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
321: lduw [%o1], %g1 20 BACKOFF_SETUP(%o2); \
33 sub %g1, %o0, %g7 211: lduw [%o1], %g1; \
34 cas [%o1], %g1, %g7 22 op %g1, %o0, %g7; \
35 cmp %g1, %g7 23 cas [%o1], %g1, %g7; \
36 bne,pn %icc, BACKOFF_LABEL(2f, 1b) 24 cmp %g1, %g7; \
37 nop 25 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
38 retl 26 nop; \
39 nop 27 retl; \
402: BACKOFF_SPIN(%o2, %o3, 1b) 28 nop; \
41ENDPROC(atomic_sub) 292: BACKOFF_SPIN(%o2, %o3, 1b); \
30ENDPROC(atomic_##op); \
42 31
43ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ 32#define ATOMIC_OP_RETURN(op) \
44 BACKOFF_SETUP(%o2) 33ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
451: lduw [%o1], %g1 34 BACKOFF_SETUP(%o2); \
46 add %g1, %o0, %g7 351: lduw [%o1], %g1; \
47 cas [%o1], %g1, %g7 36 op %g1, %o0, %g7; \
48 cmp %g1, %g7 37 cas [%o1], %g1, %g7; \
49 bne,pn %icc, BACKOFF_LABEL(2f, 1b) 38 cmp %g1, %g7; \
50 add %g1, %o0, %g1 39 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
51 retl 40 op %g1, %o0, %g1; \
52 sra %g1, 0, %o0 41 retl; \
532: BACKOFF_SPIN(%o2, %o3, 1b) 42 sra %g1, 0, %o0; \
54ENDPROC(atomic_add_ret) 432: BACKOFF_SPIN(%o2, %o3, 1b); \
44ENDPROC(atomic_##op##_return);
55 45
56ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ 46#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
57 BACKOFF_SETUP(%o2)
581: lduw [%o1], %g1
59 sub %g1, %o0, %g7
60 cas [%o1], %g1, %g7
61 cmp %g1, %g7
62 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
63 sub %g1, %o0, %g1
64 retl
65 sra %g1, 0, %o0
662: BACKOFF_SPIN(%o2, %o3, 1b)
67ENDPROC(atomic_sub_ret)
68 47
69ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ 48ATOMIC_OPS(add)
70 BACKOFF_SETUP(%o2) 49ATOMIC_OPS(sub)
711: ldx [%o1], %g1
72 add %g1, %o0, %g7
73 casx [%o1], %g1, %g7
74 cmp %g1, %g7
75 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
76 nop
77 retl
78 nop
792: BACKOFF_SPIN(%o2, %o3, 1b)
80ENDPROC(atomic64_add)
81 50
82ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ 51#undef ATOMIC_OPS
83 BACKOFF_SETUP(%o2) 52#undef ATOMIC_OP_RETURN
841: ldx [%o1], %g1 53#undef ATOMIC_OP
85 sub %g1, %o0, %g7
86 casx [%o1], %g1, %g7
87 cmp %g1, %g7
88 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
89 nop
90 retl
91 nop
922: BACKOFF_SPIN(%o2, %o3, 1b)
93ENDPROC(atomic64_sub)
94 54
95ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ 55#define ATOMIC64_OP(op) \
96 BACKOFF_SETUP(%o2) 56ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
971: ldx [%o1], %g1 57 BACKOFF_SETUP(%o2); \
98 add %g1, %o0, %g7 581: ldx [%o1], %g1; \
99 casx [%o1], %g1, %g7 59 op %g1, %o0, %g7; \
100 cmp %g1, %g7 60 casx [%o1], %g1, %g7; \
101 bne,pn %xcc, BACKOFF_LABEL(2f, 1b) 61 cmp %g1, %g7; \
102 nop 62 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
103 retl 63 nop; \
104 add %g1, %o0, %o0 64 retl; \
1052: BACKOFF_SPIN(%o2, %o3, 1b) 65 nop; \
106ENDPROC(atomic64_add_ret) 662: BACKOFF_SPIN(%o2, %o3, 1b); \
67ENDPROC(atomic64_##op); \
107 68
108ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ 69#define ATOMIC64_OP_RETURN(op) \
109 BACKOFF_SETUP(%o2) 70ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
1101: ldx [%o1], %g1 71 BACKOFF_SETUP(%o2); \
111 sub %g1, %o0, %g7 721: ldx [%o1], %g1; \
112 casx [%o1], %g1, %g7 73 op %g1, %o0, %g7; \
113 cmp %g1, %g7 74 casx [%o1], %g1, %g7; \
114 bne,pn %xcc, BACKOFF_LABEL(2f, 1b) 75 cmp %g1, %g7; \
115 nop 76 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
116 retl 77 nop; \
117 sub %g1, %o0, %o0 78 retl; \
1182: BACKOFF_SPIN(%o2, %o3, 1b) 79 op %g1, %o0, %o0; \
119ENDPROC(atomic64_sub_ret) 802: BACKOFF_SPIN(%o2, %o3, 1b); \
81ENDPROC(atomic64_##op##_return);
82
83#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
84
85ATOMIC64_OPS(add)
86ATOMIC64_OPS(sub)
87
88#undef ATOMIC64_OPS
89#undef ATOMIC64_OP_RETURN
90#undef ATOMIC64_OP
120 91
121ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ 92ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
122 BACKOFF_SETUP(%o2) 93 BACKOFF_SETUP(%o2)
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 323335b9cd2b..1d649a95660c 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -99,14 +99,23 @@ EXPORT_SYMBOL(___copy_in_user);
99EXPORT_SYMBOL(__clear_user); 99EXPORT_SYMBOL(__clear_user);
100 100
101/* Atomic counter implementation. */ 101/* Atomic counter implementation. */
102EXPORT_SYMBOL(atomic_add); 102#define ATOMIC_OP(op) \
103EXPORT_SYMBOL(atomic_add_ret); 103EXPORT_SYMBOL(atomic_##op); \
104EXPORT_SYMBOL(atomic_sub); 104EXPORT_SYMBOL(atomic64_##op);
105EXPORT_SYMBOL(atomic_sub_ret); 105
106EXPORT_SYMBOL(atomic64_add); 106#define ATOMIC_OP_RETURN(op) \
107EXPORT_SYMBOL(atomic64_add_ret); 107EXPORT_SYMBOL(atomic_##op##_return); \
108EXPORT_SYMBOL(atomic64_sub); 108EXPORT_SYMBOL(atomic64_##op##_return);
109EXPORT_SYMBOL(atomic64_sub_ret); 109
110#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
111
112ATOMIC_OPS(add)
113ATOMIC_OPS(sub)
114
115#undef ATOMIC_OPS
116#undef ATOMIC_OP_RETURN
117#undef ATOMIC_OP
118
110EXPORT_SYMBOL(atomic64_dec_if_positive); 119EXPORT_SYMBOL(atomic64_dec_if_positive);
111 120
112/* Atomic bit operations. */ 121/* Atomic bit operations. */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 6dd1c7dd0473..5e5cd123fdfb 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -24,7 +24,7 @@
24 */ 24 */
25static inline int atomic_read(const atomic_t *v) 25static inline int atomic_read(const atomic_t *v)
26{ 26{
27 return (*(volatile int *)&(v)->counter); 27 return ACCESS_ONCE((v)->counter);
28} 28}
29 29
30/** 30/**
@@ -219,21 +219,6 @@ static inline short int atomic_inc_short(short int *v)
219 return *v; 219 return *v;
220} 220}
221 221
222#ifdef CONFIG_X86_64
223/**
224 * atomic_or_long - OR of two long integers
225 * @v1: pointer to type unsigned long
226 * @v2: pointer to type unsigned long
227 *
228 * Atomically ORs @v1 and @v2
229 * Returns the result of the OR
230 */
231static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
232{
233 asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2));
234}
235#endif
236
237/* These are x86-specific, used by some header files */ 222/* These are x86-specific, used by some header files */
238#define atomic_clear_mask(mask, addr) \ 223#define atomic_clear_mask(mask, addr) \
239 asm volatile(LOCK_PREFIX "andl %0,%1" \ 224 asm volatile(LOCK_PREFIX "andl %0,%1" \
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 46e9052bbd28..f8d273e18516 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -18,7 +18,7 @@
18 */ 18 */
19static inline long atomic64_read(const atomic64_t *v) 19static inline long atomic64_read(const atomic64_t *v)
20{ 20{
21 return (*(volatile long *)&(v)->counter); 21 return ACCESS_ONCE((v)->counter);
22} 22}
23 23
24/** 24/**
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index e5103b47a8ce..00b7d46b35b8 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -47,7 +47,7 @@
47 * 47 *
48 * Atomically reads the value of @v. 48 * Atomically reads the value of @v.
49 */ 49 */
50#define atomic_read(v) (*(volatile int *)&(v)->counter) 50#define atomic_read(v) ACCESS_ONCE((v)->counter)
51 51
52/** 52/**
53 * atomic_set - set atomic variable 53 * atomic_set - set atomic variable
@@ -58,165 +58,96 @@
58 */ 58 */
59#define atomic_set(v,i) ((v)->counter = (i)) 59#define atomic_set(v,i) ((v)->counter = (i))
60 60
61/**
62 * atomic_add - add integer to atomic variable
63 * @i: integer value to add
64 * @v: pointer of type atomic_t
65 *
66 * Atomically adds @i to @v.
67 */
68static inline void atomic_add(int i, atomic_t * v)
69{
70#if XCHAL_HAVE_S32C1I 61#if XCHAL_HAVE_S32C1I
71 unsigned long tmp; 62#define ATOMIC_OP(op) \
72 int result; 63static inline void atomic_##op(int i, atomic_t * v) \
73 64{ \
74 __asm__ __volatile__( 65 unsigned long tmp; \
75 "1: l32i %1, %3, 0\n" 66 int result; \
76 " wsr %1, scompare1\n" 67 \
77 " add %0, %1, %2\n" 68 __asm__ __volatile__( \
78 " s32c1i %0, %3, 0\n" 69 "1: l32i %1, %3, 0\n" \
79 " bne %0, %1, 1b\n" 70 " wsr %1, scompare1\n" \
80 : "=&a" (result), "=&a" (tmp) 71 " " #op " %0, %1, %2\n" \
81 : "a" (i), "a" (v) 72 " s32c1i %0, %3, 0\n" \
82 : "memory" 73 " bne %0, %1, 1b\n" \
83 ); 74 : "=&a" (result), "=&a" (tmp) \
84#else 75 : "a" (i), "a" (v) \
85 unsigned int vval; 76 : "memory" \
86 77 ); \
87 __asm__ __volatile__( 78} \
88 " rsil a15, "__stringify(LOCKLEVEL)"\n" 79
89 " l32i %0, %2, 0\n" 80#define ATOMIC_OP_RETURN(op) \
90 " add %0, %0, %1\n" 81static inline int atomic_##op##_return(int i, atomic_t * v) \
91 " s32i %0, %2, 0\n" 82{ \
92 " wsr a15, ps\n" 83 unsigned long tmp; \
93 " rsync\n" 84 int result; \
94 : "=&a" (vval) 85 \
95 : "a" (i), "a" (v) 86 __asm__ __volatile__( \
96 : "a15", "memory" 87 "1: l32i %1, %3, 0\n" \
97 ); 88 " wsr %1, scompare1\n" \
98#endif 89 " " #op " %0, %1, %2\n" \
99} 90 " s32c1i %0, %3, 0\n" \
100 91 " bne %0, %1, 1b\n" \
101/** 92 " " #op " %0, %0, %2\n" \
102 * atomic_sub - subtract the atomic variable 93 : "=&a" (result), "=&a" (tmp) \
103 * @i: integer value to subtract 94 : "a" (i), "a" (v) \
104 * @v: pointer of type atomic_t 95 : "memory" \
105 * 96 ); \
106 * Atomically subtracts @i from @v. 97 \
107 */ 98 return result; \
108static inline void atomic_sub(int i, atomic_t *v)
109{
110#if XCHAL_HAVE_S32C1I
111 unsigned long tmp;
112 int result;
113
114 __asm__ __volatile__(
115 "1: l32i %1, %3, 0\n"
116 " wsr %1, scompare1\n"
117 " sub %0, %1, %2\n"
118 " s32c1i %0, %3, 0\n"
119 " bne %0, %1, 1b\n"
120 : "=&a" (result), "=&a" (tmp)
121 : "a" (i), "a" (v)
122 : "memory"
123 );
124#else
125 unsigned int vval;
126
127 __asm__ __volatile__(
128 " rsil a15, "__stringify(LOCKLEVEL)"\n"
129 " l32i %0, %2, 0\n"
130 " sub %0, %0, %1\n"
131 " s32i %0, %2, 0\n"
132 " wsr a15, ps\n"
133 " rsync\n"
134 : "=&a" (vval)
135 : "a" (i), "a" (v)
136 : "a15", "memory"
137 );
138#endif
139} 99}
140 100
141/* 101#else /* XCHAL_HAVE_S32C1I */
142 * We use atomic_{add|sub}_return to define other functions. 102
143 */ 103#define ATOMIC_OP(op) \
144 104static inline void atomic_##op(int i, atomic_t * v) \
145static inline int atomic_add_return(int i, atomic_t * v) 105{ \
146{ 106 unsigned int vval; \
147#if XCHAL_HAVE_S32C1I 107 \
148 unsigned long tmp; 108 __asm__ __volatile__( \
149 int result; 109 " rsil a15, "__stringify(LOCKLEVEL)"\n"\
150 110 " l32i %0, %2, 0\n" \
151 __asm__ __volatile__( 111 " " #op " %0, %0, %1\n" \
152 "1: l32i %1, %3, 0\n" 112 " s32i %0, %2, 0\n" \
153 " wsr %1, scompare1\n" 113 " wsr a15, ps\n" \
154 " add %0, %1, %2\n" 114 " rsync\n" \
155 " s32c1i %0, %3, 0\n" 115 : "=&a" (vval) \
156 " bne %0, %1, 1b\n" 116 : "a" (i), "a" (v) \
157 " add %0, %0, %2\n" 117 : "a15", "memory" \
158 : "=&a" (result), "=&a" (tmp) 118 ); \
159 : "a" (i), "a" (v) 119} \
160 : "memory" 120
161 ); 121#define ATOMIC_OP_RETURN(op) \
162 122static inline int atomic_##op##_return(int i, atomic_t * v) \
163 return result; 123{ \
164#else 124 unsigned int vval; \
165 unsigned int vval; 125 \
166 126 __asm__ __volatile__( \
167 __asm__ __volatile__( 127 " rsil a15,"__stringify(LOCKLEVEL)"\n" \
168 " rsil a15,"__stringify(LOCKLEVEL)"\n" 128 " l32i %0, %2, 0\n" \
169 " l32i %0, %2, 0\n" 129 " " #op " %0, %0, %1\n" \
170 " add %0, %0, %1\n" 130 " s32i %0, %2, 0\n" \
171 " s32i %0, %2, 0\n" 131 " wsr a15, ps\n" \
172 " wsr a15, ps\n" 132 " rsync\n" \
173 " rsync\n" 133 : "=&a" (vval) \
174 : "=&a" (vval) 134 : "a" (i), "a" (v) \
175 : "a" (i), "a" (v) 135 : "a15", "memory" \
176 : "a15", "memory" 136 ); \
177 ); 137 \
178 138 return vval; \
179 return vval;
180#endif
181} 139}
182 140
183static inline int atomic_sub_return(int i, atomic_t * v) 141#endif /* XCHAL_HAVE_S32C1I */
184{
185#if XCHAL_HAVE_S32C1I
186 unsigned long tmp;
187 int result;
188 142
189 __asm__ __volatile__( 143#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
190 "1: l32i %1, %3, 0\n"
191 " wsr %1, scompare1\n"
192 " sub %0, %1, %2\n"
193 " s32c1i %0, %3, 0\n"
194 " bne %0, %1, 1b\n"
195 " sub %0, %0, %2\n"
196 : "=&a" (result), "=&a" (tmp)
197 : "a" (i), "a" (v)
198 : "memory"
199 );
200 144
201 return result; 145ATOMIC_OPS(add)
202#else 146ATOMIC_OPS(sub)
203 unsigned int vval;
204
205 __asm__ __volatile__(
206 " rsil a15,"__stringify(LOCKLEVEL)"\n"
207 " l32i %0, %2, 0\n"
208 " sub %0, %0, %1\n"
209 " s32i %0, %2, 0\n"
210 " wsr a15, ps\n"
211 " rsync\n"
212 : "=&a" (vval)
213 : "a" (i), "a" (v)
214 : "a15", "memory"
215 );
216 147
217 return vval; 148#undef ATOMIC_OPS
218#endif 149#undef ATOMIC_OP_RETURN
219} 150#undef ATOMIC_OP
220 151
221/** 152/**
222 * atomic_sub_and_test - subtract value from variable and test result 153 * atomic_sub_and_test - subtract value from variable and test result
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 9c79e7603459..1973ad2b13f4 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -18,14 +18,100 @@
18#include <asm/cmpxchg.h> 18#include <asm/cmpxchg.h>
19#include <asm/barrier.h> 19#include <asm/barrier.h>
20 20
21/*
22 * atomic_$op() - $op integer to atomic variable
23 * @i: integer value to $op
24 * @v: pointer to the atomic variable
25 *
26 * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
27 * smp_mb__{before,after}_atomic().
28 */
29
30/*
31 * atomic_$op_return() - $op interer to atomic variable and returns the result
32 * @i: integer value to $op
33 * @v: pointer to the atomic variable
34 *
35 * Atomically $ops @i to @v. Does imply a full memory barrier.
36 */
37
21#ifdef CONFIG_SMP 38#ifdef CONFIG_SMP
22/* Force people to define core atomics */ 39
23# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \ 40/* we can build all atomic primitives from cmpxchg */
24 !defined(atomic_clear_mask) || !defined(atomic_set_mask) 41
25# error "SMP requires a little arch-specific magic" 42#define ATOMIC_OP(op, c_op) \
26# endif 43static inline void atomic_##op(int i, atomic_t *v) \
44{ \
45 int c, old; \
46 \
47 c = v->counter; \
48 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
49 c = old; \
50}
51
52#define ATOMIC_OP_RETURN(op, c_op) \
53static inline int atomic_##op##_return(int i, atomic_t *v) \
54{ \
55 int c, old; \
56 \
57 c = v->counter; \
58 while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
59 c = old; \
60 \
61 return c c_op i; \
62}
63
64#else
65
66#include <linux/irqflags.h>
67
68#define ATOMIC_OP(op, c_op) \
69static inline void atomic_##op(int i, atomic_t *v) \
70{ \
71 unsigned long flags; \
72 \
73 raw_local_irq_save(flags); \
74 v->counter = v->counter c_op i; \
75 raw_local_irq_restore(flags); \
76}
77
78#define ATOMIC_OP_RETURN(op, c_op) \
79static inline int atomic_##op##_return(int i, atomic_t *v) \
80{ \
81 unsigned long flags; \
82 int ret; \
83 \
84 raw_local_irq_save(flags); \
85 ret = (v->counter = v->counter c_op i); \
86 raw_local_irq_restore(flags); \
87 \
88 return ret; \
89}
90
91#endif /* CONFIG_SMP */
92
93#ifndef atomic_add_return
94ATOMIC_OP_RETURN(add, +)
95#endif
96
97#ifndef atomic_sub_return
98ATOMIC_OP_RETURN(sub, -)
99#endif
100
101#ifndef atomic_clear_mask
102ATOMIC_OP(and, &)
103#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
27#endif 104#endif
28 105
106#ifndef atomic_set_mask
107#define CONFIG_ARCH_HAS_ATOMIC_OR
108ATOMIC_OP(or, |)
109#define atomic_set_mask(i, v) atomic_or((i), (v))
110#endif
111
112#undef ATOMIC_OP_RETURN
113#undef ATOMIC_OP
114
29/* 115/*
30 * Atomic operations that C can't guarantee us. Useful for 116 * Atomic operations that C can't guarantee us. Useful for
31 * resource counting etc.. 117 * resource counting etc..
@@ -33,8 +119,6 @@
33 119
34#define ATOMIC_INIT(i) { (i) } 120#define ATOMIC_INIT(i) { (i) }
35 121
36#ifdef __KERNEL__
37
38/** 122/**
39 * atomic_read - read atomic variable 123 * atomic_read - read atomic variable
40 * @v: pointer of type atomic_t 124 * @v: pointer of type atomic_t
@@ -42,7 +126,7 @@
42 * Atomically reads the value of @v. 126 * Atomically reads the value of @v.
43 */ 127 */
44#ifndef atomic_read 128#ifndef atomic_read
45#define atomic_read(v) (*(volatile int *)&(v)->counter) 129#define atomic_read(v) ACCESS_ONCE((v)->counter)
46#endif 130#endif
47 131
48/** 132/**
@@ -56,52 +140,6 @@
56 140
57#include <linux/irqflags.h> 141#include <linux/irqflags.h>
58 142
59/**
60 * atomic_add_return - add integer to atomic variable
61 * @i: integer value to add
62 * @v: pointer of type atomic_t
63 *
64 * Atomically adds @i to @v and returns the result
65 */
66#ifndef atomic_add_return
67static inline int atomic_add_return(int i, atomic_t *v)
68{
69 unsigned long flags;
70 int temp;
71
72 raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
73 temp = v->counter;
74 temp += i;
75 v->counter = temp;
76 raw_local_irq_restore(flags);
77
78 return temp;
79}
80#endif
81
82/**
83 * atomic_sub_return - subtract integer from atomic variable
84 * @i: integer value to subtract
85 * @v: pointer of type atomic_t
86 *
87 * Atomically subtracts @i from @v and returns the result
88 */
89#ifndef atomic_sub_return
90static inline int atomic_sub_return(int i, atomic_t *v)
91{
92 unsigned long flags;
93 int temp;
94
95 raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
96 temp = v->counter;
97 temp -= i;
98 v->counter = temp;
99 raw_local_irq_restore(flags);
100
101 return temp;
102}
103#endif
104
105static inline int atomic_add_negative(int i, atomic_t *v) 143static inline int atomic_add_negative(int i, atomic_t *v)
106{ 144{
107 return atomic_add_return(i, v) < 0; 145 return atomic_add_return(i, v) < 0;
@@ -139,49 +177,11 @@ static inline void atomic_dec(atomic_t *v)
139 177
140static inline int __atomic_add_unless(atomic_t *v, int a, int u) 178static inline int __atomic_add_unless(atomic_t *v, int a, int u)
141{ 179{
142 int c, old; 180 int c, old;
143 c = atomic_read(v); 181 c = atomic_read(v);
144 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) 182 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
145 c = old; 183 c = old;
146 return c; 184 return c;
147}
148
149/**
150 * atomic_clear_mask - Atomically clear bits in atomic variable
151 * @mask: Mask of the bits to be cleared
152 * @v: pointer of type atomic_t
153 *
154 * Atomically clears the bits set in @mask from @v
155 */
156#ifndef atomic_clear_mask
157static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
158{
159 unsigned long flags;
160
161 mask = ~mask;
162 raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
163 v->counter &= mask;
164 raw_local_irq_restore(flags);
165} 185}
166#endif
167
168/**
169 * atomic_set_mask - Atomically set bits in atomic variable
170 * @mask: Mask of the bits to be set
171 * @v: pointer of type atomic_t
172 *
173 * Atomically sets the bits set in @mask in @v
174 */
175#ifndef atomic_set_mask
176static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
177{
178 unsigned long flags;
179
180 raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
181 v->counter |= mask;
182 raw_local_irq_restore(flags);
183}
184#endif
185 186
186#endif /* __KERNEL__ */
187#endif /* __ASM_GENERIC_ATOMIC_H */ 187#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index b18ce4f9ee3d..30ad9c86cebb 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -20,10 +20,22 @@ typedef struct {
20 20
21extern long long atomic64_read(const atomic64_t *v); 21extern long long atomic64_read(const atomic64_t *v);
22extern void atomic64_set(atomic64_t *v, long long i); 22extern void atomic64_set(atomic64_t *v, long long i);
23extern void atomic64_add(long long a, atomic64_t *v); 23
24extern long long atomic64_add_return(long long a, atomic64_t *v); 24#define ATOMIC64_OP(op) \
25extern void atomic64_sub(long long a, atomic64_t *v); 25extern void atomic64_##op(long long a, atomic64_t *v);
26extern long long atomic64_sub_return(long long a, atomic64_t *v); 26
27#define ATOMIC64_OP_RETURN(op) \
28extern long long atomic64_##op##_return(long long a, atomic64_t *v);
29
30#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
31
32ATOMIC64_OPS(add)
33ATOMIC64_OPS(sub)
34
35#undef ATOMIC64_OPS
36#undef ATOMIC64_OP_RETURN
37#undef ATOMIC64_OP
38
27extern long long atomic64_dec_if_positive(atomic64_t *v); 39extern long long atomic64_dec_if_positive(atomic64_t *v);
28extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); 40extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
29extern long long atomic64_xchg(atomic64_t *v, long long new); 41extern long long atomic64_xchg(atomic64_t *v, long long new);
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 08a4f068e61e..1298c05ef528 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i)
70} 70}
71EXPORT_SYMBOL(atomic64_set); 71EXPORT_SYMBOL(atomic64_set);
72 72
73void atomic64_add(long long a, atomic64_t *v) 73#define ATOMIC64_OP(op, c_op) \
74{ 74void atomic64_##op(long long a, atomic64_t *v) \
75 unsigned long flags; 75{ \
76 raw_spinlock_t *lock = lock_addr(v); 76 unsigned long flags; \
77 77 raw_spinlock_t *lock = lock_addr(v); \
78 raw_spin_lock_irqsave(lock, flags); 78 \
79 v->counter += a; 79 raw_spin_lock_irqsave(lock, flags); \
80 raw_spin_unlock_irqrestore(lock, flags); 80 v->counter c_op a; \
81} 81 raw_spin_unlock_irqrestore(lock, flags); \
82EXPORT_SYMBOL(atomic64_add); 82} \
83 83EXPORT_SYMBOL(atomic64_##op);
84long long atomic64_add_return(long long a, atomic64_t *v) 84
85{ 85#define ATOMIC64_OP_RETURN(op, c_op) \
86 unsigned long flags; 86long long atomic64_##op##_return(long long a, atomic64_t *v) \
87 raw_spinlock_t *lock = lock_addr(v); 87{ \
88 long long val; 88 unsigned long flags; \
89 89 raw_spinlock_t *lock = lock_addr(v); \
90 raw_spin_lock_irqsave(lock, flags); 90 long long val; \
91 val = v->counter += a; 91 \
92 raw_spin_unlock_irqrestore(lock, flags); 92 raw_spin_lock_irqsave(lock, flags); \
93 return val; 93 val = (v->counter c_op a); \
94} 94 raw_spin_unlock_irqrestore(lock, flags); \
95EXPORT_SYMBOL(atomic64_add_return); 95 return val; \
96 96} \
97void atomic64_sub(long long a, atomic64_t *v) 97EXPORT_SYMBOL(atomic64_##op##_return);
98{ 98
99 unsigned long flags; 99#define ATOMIC64_OPS(op, c_op) \
100 raw_spinlock_t *lock = lock_addr(v); 100 ATOMIC64_OP(op, c_op) \
101 101 ATOMIC64_OP_RETURN(op, c_op)
102 raw_spin_lock_irqsave(lock, flags); 102
103 v->counter -= a; 103ATOMIC64_OPS(add, +=)
104 raw_spin_unlock_irqrestore(lock, flags); 104ATOMIC64_OPS(sub, -=)
105} 105
106EXPORT_SYMBOL(atomic64_sub); 106#undef ATOMIC64_OPS
107 107#undef ATOMIC64_OP_RETURN
108long long atomic64_sub_return(long long a, atomic64_t *v) 108#undef ATOMIC64_OP
109{
110 unsigned long flags;
111 raw_spinlock_t *lock = lock_addr(v);
112 long long val;
113
114 raw_spin_lock_irqsave(lock, flags);
115 val = v->counter -= a;
116 raw_spin_unlock_irqrestore(lock, flags);
117 return val;
118}
119EXPORT_SYMBOL(atomic64_sub_return);
120 109
121long long atomic64_dec_if_positive(atomic64_t *v) 110long long atomic64_dec_if_positive(atomic64_t *v)
122{ 111{