aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/lib
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@ezchip.com>2015-07-09 16:38:17 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-07-27 08:06:24 -0400
commit2957c035395e492463d7f589af9dd32388967bbb (patch)
tree012c7882af09f225a45bc0a2d71bb61620cc5303 /arch/tile/lib
parent73ada3700bbb0a4c7cc06ea8d74e93c689f90cdb (diff)
tile: Provide atomic_{or,xor,and}
Implement atomic logic ops -- atomic_{or,xor,and}. For tilegx, these are relatively straightforward; the architecture provides atomic "or" and "and", both 32-bit and 64-bit. To support xor we provide a loop using "cmpexch". For the older 32-bit tilepro architecture, we have to extend the set of low-level assembly routines to include 32-bit "and", as well as all three 64-bit routines. Somewhat confusingly, some 32-bit versions are already used by the bitops inlines, with parameter types appropriate for bitops, so we have to do a bit of casting to match "int" to "unsigned long". Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: http://lkml.kernel.org/r/1436474297-32187-1-git-send-email-cmetcalf@ezchip.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/tile/lib')
-rw-r--r--arch/tile/lib/atomic_32.c23
-rw-r--r--arch/tile/lib/atomic_asm_32.S4
2 files changed, 27 insertions, 0 deletions
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index c89b211fd9e7..298df1e9912a 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -94,6 +94,12 @@ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
94} 94}
95EXPORT_SYMBOL(_atomic_or); 95EXPORT_SYMBOL(_atomic_or);
96 96
97unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
98{
99 return __atomic_and((int *)p, __atomic_setup(p), mask).val;
100}
101EXPORT_SYMBOL(_atomic_and);
102
97unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) 103unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
98{ 104{
99 return __atomic_andn((int *)p, __atomic_setup(p), mask).val; 105 return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
@@ -136,6 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n)
136} 142}
137EXPORT_SYMBOL(_atomic64_cmpxchg); 143EXPORT_SYMBOL(_atomic64_cmpxchg);
138 144
145long long _atomic64_and(long long *v, long long n)
146{
147 return __atomic64_and(v, __atomic_setup(v), n);
148}
149EXPORT_SYMBOL(_atomic64_and);
150
151long long _atomic64_or(long long *v, long long n)
152{
153 return __atomic64_or(v, __atomic_setup(v), n);
154}
155EXPORT_SYMBOL(_atomic64_or);
156
157long long _atomic64_xor(long long *v, long long n)
158{
159 return __atomic64_xor(v, __atomic_setup(v), n);
160}
161EXPORT_SYMBOL(_atomic64_xor);
139 162
140/* 163/*
141 * If any of the atomic or futex routines hit a bad address (not in 164 * If any of the atomic or futex routines hit a bad address (not in
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 6bda3132cd61..f611265633d6 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -178,6 +178,7 @@ atomic_op _xchg_add, 32, "add r24, r22, r2"
178atomic_op _xchg_add_unless, 32, \ 178atomic_op _xchg_add_unless, 32, \
179 "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" 179 "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
180atomic_op _or, 32, "or r24, r22, r2" 180atomic_op _or, 32, "or r24, r22, r2"
181atomic_op _and, 32, "and r24, r22, r2"
181atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" 182atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
182atomic_op _xor, 32, "xor r24, r22, r2" 183atomic_op _xor, 32, "xor r24, r22, r2"
183 184
@@ -191,6 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \
191 { bbns r26, 3f; add r24, r22, r4 }; \ 192 { bbns r26, 3f; add r24, r22, r4 }; \
192 { bbns r27, 3f; add r25, r23, r5 }; \ 193 { bbns r27, 3f; add r25, r23, r5 }; \
193 slt_u r26, r24, r22; add r25, r25, r26" 194 slt_u r26, r24, r22; add r25, r25, r26"
195atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
196atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
197atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
194 198
195 jrp lr /* happy backtracer */ 199 jrp lr /* happy backtracer */
196 200