aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-09-06 08:56:45 -0400
committerChris Metcalf <cmetcalf@tilera.com>2013-09-06 13:06:25 -0400
commit6dc9658fa1af9f58d358692b68135f464c167e10 (patch)
treea112036cab626e664489d87e40012d59331e1404 /arch/tile
parentb40f451d56de69477a2244a0b4082f644699673f (diff)
tile: rework <asm/cmpxchg.h>
The macrology in cmpxchg.h was designed to allow arbitrary pointer and integer values to be passed through the routines. To support cmpxchg() on 64-bit values on the 32-bit tilepro architecture, we used the idiom "(typeof(val))(typeof(val-val))". This way, in the "size 8" branch of the switch, when the underlying cmpxchg routine returns a 64-bit quantity, we cast it first to a typeof(val-val) quantity (i.e. size_t if "val" is a pointer) with no warnings about casting between pointers and integers of different sizes, then cast onwards to typeof(val), again with no warnings. If val is not a pointer type, the additional cast is a no-op. We can't replace the typeof(val-val) cast with (for example) unsigned long, since then if "val" is really a 64-bit type, we cast away the high bits. HOWEVER, this fails with current gcc (through 4.7 at least) if "val" is a pointer to an incomplete type. Unfortunately gcc isn't smart enough to realize that "val - val" will always be a size_t type even if it's an incomplete type pointer. Accordingly, I've reworked the way we handle the casting. We have given up the ability to use cmpxchg() on 64-bit values on tilepro, which is OK in the kernel since we should use cmpxchg64() explicitly on such values anyway. As a result, I can just use simple "unsigned long" casts internally. As I reworked it, I realized it would be cleaner to move the architecture-specific conditionals for cmpxchg and xchg out of the atomic.h headers and into cmpxchg, and then use the cmpxchg() and xchg() primitives directly in atomic.h and elsewhere. This allowed the cmpxchg.h header to stand on its own without relying on the implicit include of it that is performed by <asm/atomic.h>. It also allowed collapsing the atomic_xchg/atomic_cmpxchg routines from atomic_{32,64}.h into atomic.h. I improved the tests that guard the allowed size of the arguments to the routines to use a __compiletime_error() test. (By avoiding the use of BUILD_BUG, I could include cmpxchg.h into bitops.h as well and use the macros there, which is otherwise impossible due to include order dependency issues.) The tilepro _atomic_xxx internal methods were previously set up to take atomic_t and atomic64_t arguments, which isn't as convenient with the new model, so I modified them to take int or u64 arguments, which is consistent with how they used the arguments internally anyway, so provided some nice simplification there too. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/asm/atomic.h52
-rw-r--r--arch/tile/include/asm/atomic_32.h85
-rw-r--r--arch/tile/include/asm/atomic_64.h42
-rw-r--r--arch/tile/include/asm/bitops_32.h2
-rw-r--r--arch/tile/include/asm/bitops_64.h8
-rw-r--r--arch/tile/include/asm/cmpxchg.h97
-rw-r--r--arch/tile/lib/atomic_32.c34
7 files changed, 156 insertions, 164 deletions
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index e71387ab20ca..d385eaadece7 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -114,6 +114,32 @@ static inline int atomic_read(const atomic_t *v)
114#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 114#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
115 115
116/** 116/**
117 * atomic_xchg - atomically exchange contents of memory with a new value
118 * @v: pointer of type atomic_t
119 * @i: integer value to store in memory
120 *
121 * Atomically sets @v to @i and returns old @v
122 */
123static inline int atomic_xchg(atomic_t *v, int n)
124{
125 return xchg(&v->counter, n);
126}
127
128/**
129 * atomic_cmpxchg - atomically exchange contents of memory if it matches
130 * @v: pointer of type atomic_t
131 * @o: old value that memory should have
132 * @n: new value to write to memory if it matches
133 *
134 * Atomically checks if @v holds @o and replaces it with @n if so.
135 * Returns the old value at @v.
136 */
137static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
138{
139 return cmpxchg(&v->counter, o, n);
140}
141
142/**
117 * atomic_add_negative - add and test if negative 143 * atomic_add_negative - add and test if negative
118 * @v: pointer of type atomic_t 144 * @v: pointer of type atomic_t
119 * @i: integer value to add 145 * @i: integer value to add
@@ -133,6 +159,32 @@ static inline int atomic_read(const atomic_t *v)
133 159
134#ifndef __ASSEMBLY__ 160#ifndef __ASSEMBLY__
135 161
162/**
163 * atomic64_xchg - atomically exchange contents of memory with a new value
164 * @v: pointer of type atomic64_t
165 * @i: integer value to store in memory
166 *
167 * Atomically sets @v to @i and returns old @v
168 */
169static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
170{
171 return xchg64(&v->counter, n);
172}
173
174/**
175 * atomic64_cmpxchg - atomically exchange contents of memory if it matches
176 * @v: pointer of type atomic64_t
177 * @o: old value that memory should have
178 * @n: new value to write to memory if it matches
179 *
180 * Atomically checks if @v holds @o and replaces it with @n if so.
181 * Returns the old value at @v.
182 */
183static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
184{
185 return cmpxchg64(&v->counter, o, n);
186}
187
136static inline long long atomic64_dec_if_positive(atomic64_t *v) 188static inline long long atomic64_dec_if_positive(atomic64_t *v)
137{ 189{
138 long long c, old, dec; 190 long long c, old, dec;
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 96156f5ba640..0d0395b1b152 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -22,40 +22,6 @@
22 22
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24 24
25/* Tile-specific routines to support <linux/atomic.h>. */
26int _atomic_xchg(atomic_t *v, int n);
27int _atomic_xchg_add(atomic_t *v, int i);
28int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
29int _atomic_cmpxchg(atomic_t *v, int o, int n);
30
31/**
32 * atomic_xchg - atomically exchange contents of memory with a new value
33 * @v: pointer of type atomic_t
34 * @i: integer value to store in memory
35 *
36 * Atomically sets @v to @i and returns old @v
37 */
38static inline int atomic_xchg(atomic_t *v, int n)
39{
40 smp_mb(); /* barrier for proper semantics */
41 return _atomic_xchg(v, n);
42}
43
44/**
45 * atomic_cmpxchg - atomically exchange contents of memory if it matches
46 * @v: pointer of type atomic_t
47 * @o: old value that memory should have
48 * @n: new value to write to memory if it matches
49 *
50 * Atomically checks if @v holds @o and replaces it with @n if so.
51 * Returns the old value at @v.
52 */
53static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
54{
55 smp_mb(); /* barrier for proper semantics */
56 return _atomic_cmpxchg(v, o, n);
57}
58
59/** 25/**
60 * atomic_add - add integer to atomic variable 26 * atomic_add - add integer to atomic variable
61 * @i: integer value to add 27 * @i: integer value to add
@@ -65,7 +31,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
65 */ 31 */
66static inline void atomic_add(int i, atomic_t *v) 32static inline void atomic_add(int i, atomic_t *v)
67{ 33{
68 _atomic_xchg_add(v, i); 34 _atomic_xchg_add(&v->counter, i);
69} 35}
70 36
71/** 37/**
@@ -78,7 +44,7 @@ static inline void atomic_add(int i, atomic_t *v)
78static inline int atomic_add_return(int i, atomic_t *v) 44static inline int atomic_add_return(int i, atomic_t *v)
79{ 45{
80 smp_mb(); /* barrier for proper semantics */ 46 smp_mb(); /* barrier for proper semantics */
81 return _atomic_xchg_add(v, i) + i; 47 return _atomic_xchg_add(&v->counter, i) + i;
82} 48}
83 49
84/** 50/**
@@ -93,7 +59,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
93static inline int __atomic_add_unless(atomic_t *v, int a, int u) 59static inline int __atomic_add_unless(atomic_t *v, int a, int u)
94{ 60{
95 smp_mb(); /* barrier for proper semantics */ 61 smp_mb(); /* barrier for proper semantics */
96 return _atomic_xchg_add_unless(v, a, u); 62 return _atomic_xchg_add_unless(&v->counter, a, u);
97} 63}
98 64
99/** 65/**
@@ -108,7 +74,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
108 */ 74 */
109static inline void atomic_set(atomic_t *v, int n) 75static inline void atomic_set(atomic_t *v, int n)
110{ 76{
111 _atomic_xchg(v, n); 77 _atomic_xchg(&v->counter, n);
112} 78}
113 79
114/* A 64bit atomic type */ 80/* A 64bit atomic type */
@@ -119,11 +85,6 @@ typedef struct {
119 85
120#define ATOMIC64_INIT(val) { (val) } 86#define ATOMIC64_INIT(val) { (val) }
121 87
122u64 _atomic64_xchg(atomic64_t *v, u64 n);
123u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
124u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
125u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
126
127/** 88/**
128 * atomic64_read - read atomic variable 89 * atomic64_read - read atomic variable
129 * @v: pointer of type atomic64_t 90 * @v: pointer of type atomic64_t
@@ -137,35 +98,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
137 * Casting away const is safe since the atomic support routines 98 * Casting away const is safe since the atomic support routines
138 * do not write to memory if the value has not been modified. 99 * do not write to memory if the value has not been modified.
139 */ 100 */
140 return _atomic64_xchg_add((atomic64_t *)v, 0); 101 return _atomic64_xchg_add((u64 *)&v->counter, 0);
141}
142
143/**
144 * atomic64_xchg - atomically exchange contents of memory with a new value
145 * @v: pointer of type atomic64_t
146 * @i: integer value to store in memory
147 *
148 * Atomically sets @v to @i and returns old @v
149 */
150static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
151{
152 smp_mb(); /* barrier for proper semantics */
153 return _atomic64_xchg(v, n);
154}
155
156/**
157 * atomic64_cmpxchg - atomically exchange contents of memory if it matches
158 * @v: pointer of type atomic64_t
159 * @o: old value that memory should have
160 * @n: new value to write to memory if it matches
161 *
162 * Atomically checks if @v holds @o and replaces it with @n if so.
163 * Returns the old value at @v.
164 */
165static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
166{
167 smp_mb(); /* barrier for proper semantics */
168 return _atomic64_cmpxchg(v, o, n);
169} 102}
170 103
171/** 104/**
@@ -177,7 +110,7 @@ static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
177 */ 110 */
178static inline void atomic64_add(u64 i, atomic64_t *v) 111static inline void atomic64_add(u64 i, atomic64_t *v)
179{ 112{
180 _atomic64_xchg_add(v, i); 113 _atomic64_xchg_add(&v->counter, i);
181} 114}
182 115
183/** 116/**
@@ -190,7 +123,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
190static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 123static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
191{ 124{
192 smp_mb(); /* barrier for proper semantics */ 125 smp_mb(); /* barrier for proper semantics */
193 return _atomic64_xchg_add(v, i) + i; 126 return _atomic64_xchg_add(&v->counter, i) + i;
194} 127}
195 128
196/** 129/**
@@ -205,7 +138,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
205static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 138static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
206{ 139{
207 smp_mb(); /* barrier for proper semantics */ 140 smp_mb(); /* barrier for proper semantics */
208 return _atomic64_xchg_add_unless(v, a, u) != u; 141 return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
209} 142}
210 143
211/** 144/**
@@ -220,7 +153,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
220 */ 153 */
221static inline void atomic64_set(atomic64_t *v, u64 n) 154static inline void atomic64_set(atomic64_t *v, u64 n)
222{ 155{
223 _atomic64_xchg(v, n); 156 _atomic64_xchg(&v->counter, n);
224} 157}
225 158
226#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 159#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index f4500c688ffa..ad220eed05fc 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -32,25 +32,6 @@
32 * on any routine which updates memory and returns a value. 32 * on any routine which updates memory and returns a value.
33 */ 33 */
34 34
35static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
36{
37 int val;
38 __insn_mtspr(SPR_CMPEXCH_VALUE, o);
39 smp_mb(); /* barrier for proper semantics */
40 val = __insn_cmpexch4((void *)&v->counter, n);
41 smp_mb(); /* barrier for proper semantics */
42 return val;
43}
44
45static inline int atomic_xchg(atomic_t *v, int n)
46{
47 int val;
48 smp_mb(); /* barrier for proper semantics */
49 val = __insn_exch4((void *)&v->counter, n);
50 smp_mb(); /* barrier for proper semantics */
51 return val;
52}
53
54static inline void atomic_add(int i, atomic_t *v) 35static inline void atomic_add(int i, atomic_t *v)
55{ 36{
56 __insn_fetchadd4((void *)&v->counter, i); 37 __insn_fetchadd4((void *)&v->counter, i);
@@ -72,7 +53,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
72 if (oldval == u) 53 if (oldval == u)
73 break; 54 break;
74 guess = oldval; 55 guess = oldval;
75 oldval = atomic_cmpxchg(v, guess, guess + a); 56 oldval = cmpxchg(&v->counter, guess, guess + a);
76 } while (guess != oldval); 57 } while (guess != oldval);
77 return oldval; 58 return oldval;
78} 59}
@@ -84,25 +65,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
84#define atomic64_read(v) ((v)->counter) 65#define atomic64_read(v) ((v)->counter)
85#define atomic64_set(v, i) ((v)->counter = (i)) 66#define atomic64_set(v, i) ((v)->counter = (i))
86 67
87static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
88{
89 long val;
90 smp_mb(); /* barrier for proper semantics */
91 __insn_mtspr(SPR_CMPEXCH_VALUE, o);
92 val = __insn_cmpexch((void *)&v->counter, n);
93 smp_mb(); /* barrier for proper semantics */
94 return val;
95}
96
97static inline long atomic64_xchg(atomic64_t *v, long n)
98{
99 long val;
100 smp_mb(); /* barrier for proper semantics */
101 val = __insn_exch((void *)&v->counter, n);
102 smp_mb(); /* barrier for proper semantics */
103 return val;
104}
105
106static inline void atomic64_add(long i, atomic64_t *v) 68static inline void atomic64_add(long i, atomic64_t *v)
107{ 69{
108 __insn_fetchadd((void *)&v->counter, i); 70 __insn_fetchadd((void *)&v->counter, i);
@@ -124,7 +86,7 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
124 if (oldval == u) 86 if (oldval == u)
125 break; 87 break;
126 guess = oldval; 88 guess = oldval;
127 oldval = atomic64_cmpxchg(v, guess, guess + a); 89 oldval = cmpxchg(&v->counter, guess, guess + a);
128 } while (guess != oldval); 90 } while (guess != oldval);
129 return oldval != u; 91 return oldval != u;
130} 92}
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
index ddc4c1efde43..386865ad2f55 100644
--- a/arch/tile/include/asm/bitops_32.h
+++ b/arch/tile/include/asm/bitops_32.h
@@ -16,7 +16,7 @@
16#define _ASM_TILE_BITOPS_32_H 16#define _ASM_TILE_BITOPS_32_H
17 17
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/atomic.h> 19#include <asm/barrier.h>
20 20
21/* Tile-specific routines to support <asm/bitops.h>. */ 21/* Tile-specific routines to support <asm/bitops.h>. */
22unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); 22unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
index 60b87ee54fb8..ad34cd056085 100644
--- a/arch/tile/include/asm/bitops_64.h
+++ b/arch/tile/include/asm/bitops_64.h
@@ -16,7 +16,7 @@
16#define _ASM_TILE_BITOPS_64_H 16#define _ASM_TILE_BITOPS_64_H
17 17
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/atomic.h> 19#include <asm/cmpxchg.h>
20 20
21/* See <asm/bitops.h> for API comments. */ 21/* See <asm/bitops.h> for API comments. */
22 22
@@ -44,8 +44,7 @@ static inline void change_bit(unsigned nr, volatile unsigned long *addr)
44 oldval = *addr; 44 oldval = *addr;
45 do { 45 do {
46 guess = oldval; 46 guess = oldval;
47 oldval = atomic64_cmpxchg((atomic64_t *)addr, 47 oldval = cmpxchg(addr, guess, guess ^ mask);
48 guess, guess ^ mask);
49 } while (guess != oldval); 48 } while (guess != oldval);
50} 49}
51 50
@@ -90,8 +89,7 @@ static inline int test_and_change_bit(unsigned nr,
90 oldval = *addr; 89 oldval = *addr;
91 do { 90 do {
92 guess = oldval; 91 guess = oldval;
93 oldval = atomic64_cmpxchg((atomic64_t *)addr, 92 oldval = cmpxchg(addr, guess, guess ^ mask);
94 guess, guess ^ mask);
95 } while (guess != oldval); 93 } while (guess != oldval);
96 return (oldval & mask) != 0; 94 return (oldval & mask) != 0;
97} 95}
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
index 1da5bfbd8c61..4001d5eab4bb 100644
--- a/arch/tile/include/asm/cmpxchg.h
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -20,59 +20,108 @@
20 20
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22 22
23/* Nonexistent functions intended to cause link errors. */ 23#include <asm/barrier.h>
24extern unsigned long __xchg_called_with_bad_pointer(void);
25extern unsigned long __cmpxchg_called_with_bad_pointer(void);
26 24
27#define xchg(ptr, x) \ 25/* Nonexistent functions intended to cause compile errors. */
26extern void __xchg_called_with_bad_pointer(void)
27 __compiletime_error("Bad argument size for xchg");
28extern void __cmpxchg_called_with_bad_pointer(void)
29 __compiletime_error("Bad argument size for cmpxchg");
30
31#ifndef __tilegx__
32
33/* Note the _atomic_xxx() routines include a final mb(). */
34int _atomic_xchg(int *ptr, int n);
35int _atomic_xchg_add(int *v, int i);
36int _atomic_xchg_add_unless(int *v, int a, int u);
37int _atomic_cmpxchg(int *ptr, int o, int n);
38u64 _atomic64_xchg(u64 *v, u64 n);
39u64 _atomic64_xchg_add(u64 *v, u64 i);
40u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);
41u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
42
43#define xchg(ptr, n) \
44 ({ \
45 if (sizeof(*(ptr)) != 4) \
46 __xchg_called_with_bad_pointer(); \
47 smp_mb(); \
48 (typeof(*(ptr)))_atomic_xchg((int *)(ptr), (int)(n)); \
49 })
50
51#define cmpxchg(ptr, o, n) \
52 ({ \
53 if (sizeof(*(ptr)) != 4) \
54 __cmpxchg_called_with_bad_pointer(); \
55 smp_mb(); \
56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \
57 })
58
59#define xchg64(ptr, n) \
60 ({ \
61 if (sizeof(*(ptr)) != 8) \
62 __xchg_called_with_bad_pointer(); \
63 smp_mb(); \
64 (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \
65 })
66
67#define cmpxchg64(ptr, o, n) \
68 ({ \
69 if (sizeof(*(ptr)) != 8) \
70 __cmpxchg_called_with_bad_pointer(); \
71 smp_mb(); \
72 (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \
73 })
74
75#else
76
77#define xchg(ptr, n) \
28 ({ \ 78 ({ \
29 typeof(*(ptr)) __x; \ 79 typeof(*(ptr)) __x; \
80 smp_mb(); \
30 switch (sizeof(*(ptr))) { \ 81 switch (sizeof(*(ptr))) { \
31 case 4: \ 82 case 4: \
32 __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \ 83 __x = (typeof(__x))(unsigned long) \
33 (atomic_t *)(ptr), \ 84 __insn_exch4((ptr), (u32)(unsigned long)(n)); \
34 (u32)(typeof((x)-(x)))(x)); \
35 break; \ 85 break; \
36 case 8: \ 86 case 8: \
37 __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \ 87 __x = (typeof(__x)) \
38 (atomic64_t *)(ptr), \ 88 __insn_exch((ptr), (unsigned long)(n)); \
39 (u64)(typeof((x)-(x)))(x)); \
40 break; \ 89 break; \
41 default: \ 90 default: \
42 __xchg_called_with_bad_pointer(); \ 91 __xchg_called_with_bad_pointer(); \
92 break; \
43 } \ 93 } \
94 smp_mb(); \
44 __x; \ 95 __x; \
45 }) 96 })
46 97
47#define cmpxchg(ptr, o, n) \ 98#define cmpxchg(ptr, o, n) \
48 ({ \ 99 ({ \
49 typeof(*(ptr)) __x; \ 100 typeof(*(ptr)) __x; \
101 __insn_mtspr(SPR_CMPEXCH_VALUE, (unsigned long)(o)); \
102 smp_mb(); \
50 switch (sizeof(*(ptr))) { \ 103 switch (sizeof(*(ptr))) { \
51 case 4: \ 104 case 4: \
52 __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \ 105 __x = (typeof(__x))(unsigned long) \
53 (atomic_t *)(ptr), \ 106 __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \
54 (u32)(typeof((o)-(o)))(o), \
55 (u32)(typeof((n)-(n)))(n)); \
56 break; \ 107 break; \
57 case 8: \ 108 case 8: \
58 __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \ 109 __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \
59 (atomic64_t *)(ptr), \
60 (u64)(typeof((o)-(o)))(o), \
61 (u64)(typeof((n)-(n)))(n)); \
62 break; \ 110 break; \
63 default: \ 111 default: \
64 __cmpxchg_called_with_bad_pointer(); \ 112 __cmpxchg_called_with_bad_pointer(); \
113 break; \
65 } \ 114 } \
115 smp_mb(); \
66 __x; \ 116 __x; \
67 }) 117 })
68 118
69#define tas(ptr) (xchg((ptr), 1)) 119#define xchg64 xchg
120#define cmpxchg64 cmpxchg
70 121
71#define cmpxchg64(ptr, o, n) \ 122#endif
72({ \ 123
73 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 124#define tas(ptr) xchg((ptr), 1)
74 cmpxchg((ptr), (o), (n)); \
75})
76 125
77#endif /* __ASSEMBLY__ */ 126#endif /* __ASSEMBLY__ */
78 127
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 5d91d1860640..759efa337be8 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -59,33 +59,32 @@ static inline int *__atomic_setup(volatile void *v)
59 return __atomic_hashed_lock(v); 59 return __atomic_hashed_lock(v);
60} 60}
61 61
62int _atomic_xchg(atomic_t *v, int n) 62int _atomic_xchg(int *v, int n)
63{ 63{
64 return __atomic_xchg(&v->counter, __atomic_setup(v), n).val; 64 return __atomic_xchg(v, __atomic_setup(v), n).val;
65} 65}
66EXPORT_SYMBOL(_atomic_xchg); 66EXPORT_SYMBOL(_atomic_xchg);
67 67
68int _atomic_xchg_add(atomic_t *v, int i) 68int _atomic_xchg_add(int *v, int i)
69{ 69{
70 return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val; 70 return __atomic_xchg_add(v, __atomic_setup(v), i).val;
71} 71}
72EXPORT_SYMBOL(_atomic_xchg_add); 72EXPORT_SYMBOL(_atomic_xchg_add);
73 73
74int _atomic_xchg_add_unless(atomic_t *v, int a, int u) 74int _atomic_xchg_add_unless(int *v, int a, int u)
75{ 75{
76 /* 76 /*
77 * Note: argument order is switched here since it is easier 77 * Note: argument order is switched here since it is easier
78 * to use the first argument consistently as the "old value" 78 * to use the first argument consistently as the "old value"
79 * in the assembly, as is done for _atomic_cmpxchg(). 79 * in the assembly, as is done for _atomic_cmpxchg().
80 */ 80 */
81 return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a) 81 return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val;
82 .val;
83} 82}
84EXPORT_SYMBOL(_atomic_xchg_add_unless); 83EXPORT_SYMBOL(_atomic_xchg_add_unless);
85 84
86int _atomic_cmpxchg(atomic_t *v, int o, int n) 85int _atomic_cmpxchg(int *v, int o, int n)
87{ 86{
88 return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val; 87 return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val;
89} 88}
90EXPORT_SYMBOL(_atomic_cmpxchg); 89EXPORT_SYMBOL(_atomic_cmpxchg);
91 90
@@ -108,33 +107,32 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
108EXPORT_SYMBOL(_atomic_xor); 107EXPORT_SYMBOL(_atomic_xor);
109 108
110 109
111u64 _atomic64_xchg(atomic64_t *v, u64 n) 110u64 _atomic64_xchg(u64 *v, u64 n)
112{ 111{
113 return __atomic64_xchg(&v->counter, __atomic_setup(v), n); 112 return __atomic64_xchg(v, __atomic_setup(v), n);
114} 113}
115EXPORT_SYMBOL(_atomic64_xchg); 114EXPORT_SYMBOL(_atomic64_xchg);
116 115
117u64 _atomic64_xchg_add(atomic64_t *v, u64 i) 116u64 _atomic64_xchg_add(u64 *v, u64 i)
118{ 117{
119 return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i); 118 return __atomic64_xchg_add(v, __atomic_setup(v), i);
120} 119}
121EXPORT_SYMBOL(_atomic64_xchg_add); 120EXPORT_SYMBOL(_atomic64_xchg_add);
122 121
123u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u) 122u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
124{ 123{
125 /* 124 /*
126 * Note: argument order is switched here since it is easier 125 * Note: argument order is switched here since it is easier
127 * to use the first argument consistently as the "old value" 126 * to use the first argument consistently as the "old value"
128 * in the assembly, as is done for _atomic_cmpxchg(). 127 * in the assembly, as is done for _atomic_cmpxchg().
129 */ 128 */
130 return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v), 129 return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
131 u, a);
132} 130}
133EXPORT_SYMBOL(_atomic64_xchg_add_unless); 131EXPORT_SYMBOL(_atomic64_xchg_add_unless);
134 132
135u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) 133u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n)
136{ 134{
137 return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n); 135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
138} 136}
139EXPORT_SYMBOL(_atomic64_cmpxchg); 137EXPORT_SYMBOL(_atomic64_cmpxchg);
140 138