aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNikolay Borisov <n.borisov.lkml@gmail.com>2016-09-26 14:11:18 -0400
committerIngo Molnar <mingo@kernel.org>2016-09-30 04:56:01 -0400
commit08645077b7f9f7824dbaf1959b0e014a894c8acc (patch)
tree6229e034f931df37cc1ef9f560922708bd8373e2
parentcfd8983f03c7b2f977faab8dfc4ec5f6dbf9c1f3 (diff)
x86/cmpxchg, locking/atomics: Remove superfluous definitions
cmpxchg contained definitions for unused (x)add_* operations, dating back to the original ticket spinlock implementation. Nowadays these are unused so remove them. Signed-off-by: Nikolay Borisov <n.borisov.lkml@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: hpa@zytor.com Link: http://lkml.kernel.org/r/1474913478-17757-1-git-send-email-n.borisov.lkml@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/cmpxchg.h44
1 files changed, 0 insertions, 44 deletions
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 9733361fed6f..97848cdfcb1a 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -158,53 +158,9 @@ extern void __add_wrong_size(void)
158 * value of "*ptr". 158 * value of "*ptr".
159 * 159 *
160 * xadd() is locked when multiple CPUs are online 160 * xadd() is locked when multiple CPUs are online
161 * xadd_sync() is always locked
162 * xadd_local() is never locked
163 */ 161 */
164#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock) 162#define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
165#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX) 163#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
166#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
167#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
168
169#define __add(ptr, inc, lock) \
170 ({ \
171 __typeof__ (*(ptr)) __ret = (inc); \
172 switch (sizeof(*(ptr))) { \
173 case __X86_CASE_B: \
174 asm volatile (lock "addb %b1, %0\n" \
175 : "+m" (*(ptr)) : "qi" (inc) \
176 : "memory", "cc"); \
177 break; \
178 case __X86_CASE_W: \
179 asm volatile (lock "addw %w1, %0\n" \
180 : "+m" (*(ptr)) : "ri" (inc) \
181 : "memory", "cc"); \
182 break; \
183 case __X86_CASE_L: \
184 asm volatile (lock "addl %1, %0\n" \
185 : "+m" (*(ptr)) : "ri" (inc) \
186 : "memory", "cc"); \
187 break; \
188 case __X86_CASE_Q: \
189 asm volatile (lock "addq %1, %0\n" \
190 : "+m" (*(ptr)) : "ri" (inc) \
191 : "memory", "cc"); \
192 break; \
193 default: \
194 __add_wrong_size(); \
195 } \
196 __ret; \
197 })
198
199/*
200 * add_*() adds "inc" to "*ptr"
201 *
202 * __add() takes a lock prefix
203 * add_smp() is locked when multiple CPUs are online
204 * add_sync() is always locked
205 */
206#define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
207#define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
208 164
209#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \ 165#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
210({ \ 166({ \