aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-06-22 05:16:49 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-24 02:17:04 -0400
commitb7271b9f3e18181559b96a610f4e42bdb04b07f5 (patch)
tree2ec7b479bbfbd8398fe789620ee72075196d2bf5 /arch/tile/include
parent86a664d58f3ba2398a378dc9da6d4cfa737d2281 (diff)
locking/atomic, arch/tile: Fix tilepro build
The tilepro change wasn't ever compiled it seems (the 0day built bot also doesn't have a toolchain for it). Make it work. The thing that makes the patch bigger than desired is namespace collision with the C11 __atomic builtin functions. So rename the tilepro functions to __atomic32. Reported-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Chris Metcalf <cmetcalf@mellanox.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: 1af5de9af138 ("locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()") Link: http://lkml.kernel.org/r/20160622091649.GB30154@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/tile/include')
-rw-r--r--arch/tile/include/asm/atomic_32.h24
-rw-r--r--arch/tile/include/asm/futex.h14
2 files changed, 19 insertions, 19 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index da8eb4ed3752..a93774255136 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -143,15 +143,15 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
143{ \ 143{ \
144 _atomic64_fetch_##op(&v->counter, i); \ 144 _atomic64_fetch_##op(&v->counter, i); \
145} \ 145} \
146static inline void atomic64_##op(long long i, atomic64_t *v) \ 146static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
147{ \ 147{ \
148 smp_mb(); \ 148 smp_mb(); \
149 return _atomic64_fetch_##op(&v->counter, i); \ 149 return _atomic64_fetch_##op(&v->counter, i); \
150} 150}
151 151
152ATOMIC64_OP(and) 152ATOMIC64_OPS(and)
153ATOMIC64_OP(or) 153ATOMIC64_OPS(or)
154ATOMIC64_OP(xor) 154ATOMIC64_OPS(xor)
155 155
156#undef ATOMIC64_OPS 156#undef ATOMIC64_OPS
157 157
@@ -266,16 +266,16 @@ struct __get_user {
266 unsigned long val; 266 unsigned long val;
267 int err; 267 int err;
268}; 268};
269extern struct __get_user __atomic_cmpxchg(volatile int *p, 269extern struct __get_user __atomic32_cmpxchg(volatile int *p,
270 int *lock, int o, int n); 270 int *lock, int o, int n);
271extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); 271extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
272extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); 272extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
273extern struct __get_user __atomic_xchg_add_unless(volatile int *p, 273extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
274 int *lock, int o, int n); 274 int *lock, int o, int n);
275extern struct __get_user __atomic_fetch_or(volatile int *p, int *lock, int n); 275extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
276extern struct __get_user __atomic_fetch_and(volatile int *p, int *lock, int n); 276extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
277extern struct __get_user __atomic_fetch_andn(volatile int *p, int *lock, int n); 277extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
278extern struct __get_user __atomic_fetch_xor(volatile int *p, int *lock, int n); 278extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
279extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, 279extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
280 long long o, long long n); 280 long long o, long long n);
281extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); 281extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index 1a6ef1b69cb1..e64a1b75fc38 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -80,16 +80,16 @@
80 ret = gu.err; \ 80 ret = gu.err; \
81 } 81 }
82 82
83#define __futex_set() __futex_call(__atomic_xchg) 83#define __futex_set() __futex_call(__atomic32_xchg)
84#define __futex_add() __futex_call(__atomic_xchg_add) 84#define __futex_add() __futex_call(__atomic32_xchg_add)
85#define __futex_or() __futex_call(__atomic_or) 85#define __futex_or() __futex_call(__atomic32_fetch_or)
86#define __futex_andn() __futex_call(__atomic_andn) 86#define __futex_andn() __futex_call(__atomic32_fetch_andn)
87#define __futex_xor() __futex_call(__atomic_xor) 87#define __futex_xor() __futex_call(__atomic32_fetch_xor)
88 88
89#define __futex_cmpxchg() \ 89#define __futex_cmpxchg() \
90 { \ 90 { \
91 struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \ 91 struct __get_user gu = __atomic32_cmpxchg((u32 __force *)uaddr, \
92 lock, oldval, oparg); \ 92 lock, oldval, oparg); \
93 val = gu.val; \ 93 val = gu.val; \
94 ret = gu.err; \ 94 ret = gu.err; \
95 } 95 }