aboutsummaryrefslogtreecommitdiffstats
path: root/arch/xtensa
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-03-26 13:31:12 -0400
committerIngo Molnar <mingo@kernel.org>2014-08-14 06:48:14 -0400
commitd4608dd5b4ec13855680b89f719d8d4b2da92411 (patch)
tree7660d636c571ad0993f890085a88e829d7e0b9a0 /arch/xtensa
parent4f3316c2b5fe2062c26c9b66915b5a5c80c60a5c (diff)
locking,arch,xtensa: Fold atomic_ops
Many of the atomic op implementations are the same except for one instruction; fold the lot into a few CPP macros and reduce LoC. This also prepares for easy addition of new ops. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Chris Zankel <chris@zankel.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: linux-xtensa@linux-xtensa.org Link: http://lkml.kernel.org/r/20140508135852.879575796@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/include/asm/atomic.h233
1 files changed, 82 insertions, 151 deletions
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index e5103b47a8ce..626676660b80 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -58,165 +58,96 @@
58 */ 58 */
59#define atomic_set(v,i) ((v)->counter = (i)) 59#define atomic_set(v,i) ((v)->counter = (i))
60 60
61/**
62 * atomic_add - add integer to atomic variable
63 * @i: integer value to add
64 * @v: pointer of type atomic_t
65 *
66 * Atomically adds @i to @v.
67 */
68static inline void atomic_add(int i, atomic_t * v)
69{
70#if XCHAL_HAVE_S32C1I 61#if XCHAL_HAVE_S32C1I
71 unsigned long tmp; 62#define ATOMIC_OP(op) \
72 int result; 63static inline void atomic_##op(int i, atomic_t * v) \
73 64{ \
74 __asm__ __volatile__( 65 unsigned long tmp; \
75 "1: l32i %1, %3, 0\n" 66 int result; \
76 " wsr %1, scompare1\n" 67 \
77 " add %0, %1, %2\n" 68 __asm__ __volatile__( \
78 " s32c1i %0, %3, 0\n" 69 "1: l32i %1, %3, 0\n" \
79 " bne %0, %1, 1b\n" 70 " wsr %1, scompare1\n" \
80 : "=&a" (result), "=&a" (tmp) 71 " " #op " %0, %1, %2\n" \
81 : "a" (i), "a" (v) 72 " s32c1i %0, %3, 0\n" \
82 : "memory" 73 " bne %0, %1, 1b\n" \
83 ); 74 : "=&a" (result), "=&a" (tmp) \
84#else 75 : "a" (i), "a" (v) \
85 unsigned int vval; 76 : "memory" \
86 77 ); \
87 __asm__ __volatile__( 78} \
88 " rsil a15, "__stringify(LOCKLEVEL)"\n" 79
89 " l32i %0, %2, 0\n" 80#define ATOMIC_OP_RETURN(op) \
90 " add %0, %0, %1\n" 81static inline int atomic_##op##_return(int i, atomic_t * v) \
91 " s32i %0, %2, 0\n" 82{ \
92 " wsr a15, ps\n" 83 unsigned long tmp; \
93 " rsync\n" 84 int result; \
94 : "=&a" (vval) 85 \
95 : "a" (i), "a" (v) 86 __asm__ __volatile__( \
96 : "a15", "memory" 87 "1: l32i %1, %3, 0\n" \
97 ); 88 " wsr %1, scompare1\n" \
98#endif 89 " " #op " %0, %1, %2\n" \
99} 90 " s32c1i %0, %3, 0\n" \
100 91 " bne %0, %1, 1b\n" \
101/** 92 " " #op " %0, %0, %2\n" \
102 * atomic_sub - subtract the atomic variable 93 : "=&a" (result), "=&a" (tmp) \
103 * @i: integer value to subtract 94 : "a" (i), "a" (v) \
104 * @v: pointer of type atomic_t 95 : "memory" \
105 * 96 ); \
106 * Atomically subtracts @i from @v. 97 \
107 */ 98 return result; \
108static inline void atomic_sub(int i, atomic_t *v)
109{
110#if XCHAL_HAVE_S32C1I
111 unsigned long tmp;
112 int result;
113
114 __asm__ __volatile__(
115 "1: l32i %1, %3, 0\n"
116 " wsr %1, scompare1\n"
117 " sub %0, %1, %2\n"
118 " s32c1i %0, %3, 0\n"
119 " bne %0, %1, 1b\n"
120 : "=&a" (result), "=&a" (tmp)
121 : "a" (i), "a" (v)
122 : "memory"
123 );
124#else
125 unsigned int vval;
126
127 __asm__ __volatile__(
128 " rsil a15, "__stringify(LOCKLEVEL)"\n"
129 " l32i %0, %2, 0\n"
130 " sub %0, %0, %1\n"
131 " s32i %0, %2, 0\n"
132 " wsr a15, ps\n"
133 " rsync\n"
134 : "=&a" (vval)
135 : "a" (i), "a" (v)
136 : "a15", "memory"
137 );
138#endif
139} 99}
140 100
141/* 101#else /* XCHAL_HAVE_S32C1I */
142 * We use atomic_{add|sub}_return to define other functions. 102
143 */ 103#define ATOMIC_OP(op) \
144 104static inline void atomic_##op(int i, atomic_t * v) \
145static inline int atomic_add_return(int i, atomic_t * v) 105{ \
146{ 106 unsigned int vval; \
147#if XCHAL_HAVE_S32C1I 107 \
148 unsigned long tmp; 108 __asm__ __volatile__( \
149 int result; 109 " rsil a15, "__stringify(LOCKLEVEL)"\n"\
150 110 " l32i %0, %2, 0\n" \
151 __asm__ __volatile__( 111 " " #op " %0, %0, %1\n" \
152 "1: l32i %1, %3, 0\n" 112 " s32i %0, %2, 0\n" \
153 " wsr %1, scompare1\n" 113 " wsr a15, ps\n" \
154 " add %0, %1, %2\n" 114 " rsync\n" \
155 " s32c1i %0, %3, 0\n" 115 : "=&a" (vval) \
156 " bne %0, %1, 1b\n" 116 : "a" (i), "a" (v) \
157 " add %0, %0, %2\n" 117 : "a15", "memory" \
158 : "=&a" (result), "=&a" (tmp) 118 ); \
159 : "a" (i), "a" (v) 119} \
160 : "memory" 120
161 ); 121#define ATOMIC_OP_RETURN(op) \
162 122static inline int atomic_##op##_return(int i, atomic_t * v) \
163 return result; 123{ \
164#else 124 unsigned int vval; \
165 unsigned int vval; 125 \
166 126 __asm__ __volatile__( \
167 __asm__ __volatile__( 127 " rsil a15,"__stringify(LOCKLEVEL)"\n" \
168 " rsil a15,"__stringify(LOCKLEVEL)"\n" 128 " l32i %0, %2, 0\n" \
169 " l32i %0, %2, 0\n" 129 " " #op " %0, %0, %1\n" \
170 " add %0, %0, %1\n" 130 " s32i %0, %2, 0\n" \
171 " s32i %0, %2, 0\n" 131 " wsr a15, ps\n" \
172 " wsr a15, ps\n" 132 " rsync\n" \
173 " rsync\n" 133 : "=&a" (vval) \
174 : "=&a" (vval) 134 : "a" (i), "a" (v) \
175 : "a" (i), "a" (v) 135 : "a15", "memory" \
176 : "a15", "memory" 136 ); \
177 ); 137 \
178 138 return vval; \
179 return vval;
180#endif
181} 139}
182 140
183static inline int atomic_sub_return(int i, atomic_t * v) 141#endif /* XCHAL_HAVE_S32C1I */
184{
185#if XCHAL_HAVE_S32C1I
186 unsigned long tmp;
187 int result;
188 142
189 __asm__ __volatile__( 143#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
190 "1: l32i %1, %3, 0\n"
191 " wsr %1, scompare1\n"
192 " sub %0, %1, %2\n"
193 " s32c1i %0, %3, 0\n"
194 " bne %0, %1, 1b\n"
195 " sub %0, %0, %2\n"
196 : "=&a" (result), "=&a" (tmp)
197 : "a" (i), "a" (v)
198 : "memory"
199 );
200 144
201 return result; 145ATOMIC_OPS(add)
202#else 146ATOMIC_OPS(sub)
203 unsigned int vval;
204
205 __asm__ __volatile__(
206 " rsil a15,"__stringify(LOCKLEVEL)"\n"
207 " l32i %0, %2, 0\n"
208 " sub %0, %0, %1\n"
209 " s32i %0, %2, 0\n"
210 " wsr a15, ps\n"
211 " rsync\n"
212 : "=&a" (vval)
213 : "a" (i), "a" (v)
214 : "a15", "memory"
215 );
216 147
217 return vval; 148#undef ATOMIC_OPS
218#endif 149#undef ATOMIC_OP_RETURN
219} 150#undef ATOMIC_OP
220 151
221/** 152/**
222 * atomic_sub_and_test - subtract value from variable and test result 153 * atomic_sub_and_test - subtract value from variable and test result