diff options
author | Peter Zijlstra <peterz@infradead.org> | 2016-06-22 05:16:49 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-06-24 02:17:04 -0400 |
commit | b7271b9f3e18181559b96a610f4e42bdb04b07f5 (patch) | |
tree | 2ec7b479bbfbd8398fe789620ee72075196d2bf5 | |
parent | 86a664d58f3ba2398a378dc9da6d4cfa737d2281 (diff) |
locking/atomic, arch/tile: Fix tilepro build
The tilepro change wasn't ever compiled it seems (the 0day built bot
also doesn't have a toolchain for it).
Make it work.
The thing that makes the patch bigger than desired is namespace
collision with the C11 __atomic builtin functions. So rename the
tilepro functions to __atomic32.
Reported-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: 1af5de9af138 ("locking/atomic, arch/tile: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()")
Link: http://lkml.kernel.org/r/20160622091649.GB30154@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/tile/include/asm/atomic_32.h | 24 | ||||
-rw-r--r-- | arch/tile/include/asm/futex.h | 14 | ||||
-rw-r--r-- | arch/tile/lib/atomic_32.c | 16 | ||||
-rw-r--r-- | arch/tile/lib/atomic_asm_32.S | 21 |
4 files changed, 40 insertions, 35 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index da8eb4ed3752..a93774255136 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h | |||
@@ -143,15 +143,15 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \ | |||
143 | { \ | 143 | { \ |
144 | _atomic64_fetch_##op(&v->counter, i); \ | 144 | _atomic64_fetch_##op(&v->counter, i); \ |
145 | } \ | 145 | } \ |
146 | static inline void atomic64_##op(long long i, atomic64_t *v) \ | 146 | static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \ |
147 | { \ | 147 | { \ |
148 | smp_mb(); \ | 148 | smp_mb(); \ |
149 | return _atomic64_fetch_##op(&v->counter, i); \ | 149 | return _atomic64_fetch_##op(&v->counter, i); \ |
150 | } | 150 | } |
151 | 151 | ||
152 | ATOMIC64_OP(and) | 152 | ATOMIC64_OPS(and) |
153 | ATOMIC64_OP(or) | 153 | ATOMIC64_OPS(or) |
154 | ATOMIC64_OP(xor) | 154 | ATOMIC64_OPS(xor) |
155 | 155 | ||
156 | #undef ATOMIC64_OPS | 156 | #undef ATOMIC64_OPS |
157 | 157 | ||
@@ -266,16 +266,16 @@ struct __get_user { | |||
266 | unsigned long val; | 266 | unsigned long val; |
267 | int err; | 267 | int err; |
268 | }; | 268 | }; |
269 | extern struct __get_user __atomic_cmpxchg(volatile int *p, | 269 | extern struct __get_user __atomic32_cmpxchg(volatile int *p, |
270 | int *lock, int o, int n); | 270 | int *lock, int o, int n); |
271 | extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); | 271 | extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n); |
272 | extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); | 272 | extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n); |
273 | extern struct __get_user __atomic_xchg_add_unless(volatile int *p, | 273 | extern struct __get_user __atomic32_xchg_add_unless(volatile int *p, |
274 | int *lock, int o, int n); | 274 | int *lock, int o, int n); |
275 | extern struct __get_user __atomic_fetch_or(volatile int *p, int *lock, int n); | 275 | extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n); |
276 | extern struct __get_user __atomic_fetch_and(volatile int *p, int *lock, int n); | 276 | extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n); |
277 | extern struct __get_user __atomic_fetch_andn(volatile int *p, int *lock, int n); | 277 | extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n); |
278 | extern struct __get_user __atomic_fetch_xor(volatile int *p, int *lock, int n); | 278 | extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n); |
279 | extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, | 279 | extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, |
280 | long long o, long long n); | 280 | long long o, long long n); |
281 | extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); | 281 | extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); |
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h index 1a6ef1b69cb1..e64a1b75fc38 100644 --- a/arch/tile/include/asm/futex.h +++ b/arch/tile/include/asm/futex.h | |||
@@ -80,16 +80,16 @@ | |||
80 | ret = gu.err; \ | 80 | ret = gu.err; \ |
81 | } | 81 | } |
82 | 82 | ||
83 | #define __futex_set() __futex_call(__atomic_xchg) | 83 | #define __futex_set() __futex_call(__atomic32_xchg) |
84 | #define __futex_add() __futex_call(__atomic_xchg_add) | 84 | #define __futex_add() __futex_call(__atomic32_xchg_add) |
85 | #define __futex_or() __futex_call(__atomic_or) | 85 | #define __futex_or() __futex_call(__atomic32_fetch_or) |
86 | #define __futex_andn() __futex_call(__atomic_andn) | 86 | #define __futex_andn() __futex_call(__atomic32_fetch_andn) |
87 | #define __futex_xor() __futex_call(__atomic_xor) | 87 | #define __futex_xor() __futex_call(__atomic32_fetch_xor) |
88 | 88 | ||
89 | #define __futex_cmpxchg() \ | 89 | #define __futex_cmpxchg() \ |
90 | { \ | 90 | { \ |
91 | struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \ | 91 | struct __get_user gu = __atomic32_cmpxchg((u32 __force *)uaddr, \ |
92 | lock, oldval, oparg); \ | 92 | lock, oldval, oparg); \ |
93 | val = gu.val; \ | 93 | val = gu.val; \ |
94 | ret = gu.err; \ | 94 | ret = gu.err; \ |
95 | } | 95 | } |
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c index 5b6bd932c9c7..f8128800dbf5 100644 --- a/arch/tile/lib/atomic_32.c +++ b/arch/tile/lib/atomic_32.c | |||
@@ -61,13 +61,13 @@ static inline int *__atomic_setup(volatile void *v) | |||
61 | 61 | ||
62 | int _atomic_xchg(int *v, int n) | 62 | int _atomic_xchg(int *v, int n) |
63 | { | 63 | { |
64 | return __atomic_xchg(v, __atomic_setup(v), n).val; | 64 | return __atomic32_xchg(v, __atomic_setup(v), n).val; |
65 | } | 65 | } |
66 | EXPORT_SYMBOL(_atomic_xchg); | 66 | EXPORT_SYMBOL(_atomic_xchg); |
67 | 67 | ||
68 | int _atomic_xchg_add(int *v, int i) | 68 | int _atomic_xchg_add(int *v, int i) |
69 | { | 69 | { |
70 | return __atomic_xchg_add(v, __atomic_setup(v), i).val; | 70 | return __atomic32_xchg_add(v, __atomic_setup(v), i).val; |
71 | } | 71 | } |
72 | EXPORT_SYMBOL(_atomic_xchg_add); | 72 | EXPORT_SYMBOL(_atomic_xchg_add); |
73 | 73 | ||
@@ -78,37 +78,37 @@ int _atomic_xchg_add_unless(int *v, int a, int u) | |||
78 | * to use the first argument consistently as the "old value" | 78 | * to use the first argument consistently as the "old value" |
79 | * in the assembly, as is done for _atomic_cmpxchg(). | 79 | * in the assembly, as is done for _atomic_cmpxchg(). |
80 | */ | 80 | */ |
81 | return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val; | 81 | return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val; |
82 | } | 82 | } |
83 | EXPORT_SYMBOL(_atomic_xchg_add_unless); | 83 | EXPORT_SYMBOL(_atomic_xchg_add_unless); |
84 | 84 | ||
85 | int _atomic_cmpxchg(int *v, int o, int n) | 85 | int _atomic_cmpxchg(int *v, int o, int n) |
86 | { | 86 | { |
87 | return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val; | 87 | return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val; |
88 | } | 88 | } |
89 | EXPORT_SYMBOL(_atomic_cmpxchg); | 89 | EXPORT_SYMBOL(_atomic_cmpxchg); |
90 | 90 | ||
91 | unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask) | 91 | unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask) |
92 | { | 92 | { |
93 | return __atomic_fetch_or((int *)p, __atomic_setup(p), mask).val; | 93 | return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val; |
94 | } | 94 | } |
95 | EXPORT_SYMBOL(_atomic_fetch_or); | 95 | EXPORT_SYMBOL(_atomic_fetch_or); |
96 | 96 | ||
97 | unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask) | 97 | unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask) |
98 | { | 98 | { |
99 | return __atomic_fetch_and((int *)p, __atomic_setup(p), mask).val; | 99 | return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val; |
100 | } | 100 | } |
101 | EXPORT_SYMBOL(_atomic_fetch_and); | 101 | EXPORT_SYMBOL(_atomic_fetch_and); |
102 | 102 | ||
103 | unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask) | 103 | unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask) |
104 | { | 104 | { |
105 | return __atomic_fetch_andn((int *)p, __atomic_setup(p), mask).val; | 105 | return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val; |
106 | } | 106 | } |
107 | EXPORT_SYMBOL(_atomic_fetch_andn); | 107 | EXPORT_SYMBOL(_atomic_fetch_andn); |
108 | 108 | ||
109 | unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask) | 109 | unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask) |
110 | { | 110 | { |
111 | return __atomic_fetch_xor((int *)p, __atomic_setup(p), mask).val; | 111 | return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val; |
112 | } | 112 | } |
113 | EXPORT_SYMBOL(_atomic_fetch_xor); | 113 | EXPORT_SYMBOL(_atomic_fetch_xor); |
114 | 114 | ||
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S index 507abdd2bf9a..1a70e6c0f259 100644 --- a/arch/tile/lib/atomic_asm_32.S +++ b/arch/tile/lib/atomic_asm_32.S | |||
@@ -172,15 +172,20 @@ STD_ENTRY_SECTION(__atomic\name, .text.atomic) | |||
172 | .endif | 172 | .endif |
173 | .endm | 173 | .endm |
174 | 174 | ||
175 | atomic_op _cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }" | 175 | |
176 | atomic_op _xchg, 32, "move r24, r2" | 176 | /* |
177 | atomic_op _xchg_add, 32, "add r24, r22, r2" | 177 | * Use __atomic32 prefix to avoid collisions with GCC builtin __atomic functions. |
178 | atomic_op _xchg_add_unless, 32, \ | 178 | */ |
179 | |||
180 | atomic_op 32_cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }" | ||
181 | atomic_op 32_xchg, 32, "move r24, r2" | ||
182 | atomic_op 32_xchg_add, 32, "add r24, r22, r2" | ||
183 | atomic_op 32_xchg_add_unless, 32, \ | ||
179 | "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" | 184 | "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" |
180 | atomic_op _fetch_or, 32, "or r24, r22, r2" | 185 | atomic_op 32_fetch_or, 32, "or r24, r22, r2" |
181 | atomic_op _fetch_and, 32, "and r24, r22, r2" | 186 | atomic_op 32_fetch_and, 32, "and r24, r22, r2" |
182 | atomic_op _fetch_andn, 32, "nor r2, r2, zero; and r24, r22, r2" | 187 | atomic_op 32_fetch_andn, 32, "nor r2, r2, zero; and r24, r22, r2" |
183 | atomic_op _fetch_xor, 32, "xor r24, r22, r2" | 188 | atomic_op 32_fetch_xor, 32, "xor r24, r22, r2" |
184 | 189 | ||
185 | atomic_op 64_cmpxchg, 64, "{ seq r26, r22, r2; seq r27, r23, r3 }; \ | 190 | atomic_op 64_cmpxchg, 64, "{ seq r26, r22, r2; seq r27, r23, r3 }; \ |
186 | { bbns r26, 3f; move r24, r4 }; { bbns r27, 3f; move r25, r5 }" | 191 | { bbns r26, 3f; move r24, r4 }; { bbns r27, 3f; move r25, r5 }" |