aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/include/asm/atomic.h5
-rw-r--r--arch/tile/include/asm/atomic_32.h27
-rw-r--r--arch/tile/include/asm/cmpxchg.h28
-rw-r--r--arch/tile/lib/atomic_32.c8
4 files changed, 39 insertions, 29 deletions
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index d385eaadece7..709798460763 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
166 * 166 *
167 * Atomically sets @v to @i and returns old @v 167 * Atomically sets @v to @i and returns old @v
168 */ 168 */
169static inline u64 atomic64_xchg(atomic64_t *v, u64 n) 169static inline long long atomic64_xchg(atomic64_t *v, long long n)
170{ 170{
171 return xchg64(&v->counter, n); 171 return xchg64(&v->counter, n);
172} 172}
@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
180 * Atomically checks if @v holds @o and replaces it with @n if so. 180 * Atomically checks if @v holds @o and replaces it with @n if so.
181 * Returns the old value at @v. 181 * Returns the old value at @v.
182 */ 182 */
183static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) 183static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
184 long long n)
184{ 185{
185 return cmpxchg64(&v->counter, o, n); 186 return cmpxchg64(&v->counter, o, n);
186} 187}
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 0d0395b1b152..1ad4a1f7d42b 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
80/* A 64bit atomic type */ 80/* A 64bit atomic type */
81 81
82typedef struct { 82typedef struct {
83 u64 __aligned(8) counter; 83 long long counter;
84} atomic64_t; 84} atomic64_t;
85 85
86#define ATOMIC64_INIT(val) { (val) } 86#define ATOMIC64_INIT(val) { (val) }
@@ -91,14 +91,14 @@ typedef struct {
91 * 91 *
92 * Atomically reads the value of @v. 92 * Atomically reads the value of @v.
93 */ 93 */
94static inline u64 atomic64_read(const atomic64_t *v) 94static inline long long atomic64_read(const atomic64_t *v)
95{ 95{
96 /* 96 /*
97 * Requires an atomic op to read both 32-bit parts consistently. 97 * Requires an atomic op to read both 32-bit parts consistently.
98 * Casting away const is safe since the atomic support routines 98 * Casting away const is safe since the atomic support routines
99 * do not write to memory if the value has not been modified. 99 * do not write to memory if the value has not been modified.
100 */ 100 */
101 return _atomic64_xchg_add((u64 *)&v->counter, 0); 101 return _atomic64_xchg_add((long long *)&v->counter, 0);
102} 102}
103 103
104/** 104/**
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
108 * 108 *
109 * Atomically adds @i to @v. 109 * Atomically adds @i to @v.
110 */ 110 */
111static inline void atomic64_add(u64 i, atomic64_t *v) 111static inline void atomic64_add(long long i, atomic64_t *v)
112{ 112{
113 _atomic64_xchg_add(&v->counter, i); 113 _atomic64_xchg_add(&v->counter, i);
114} 114}
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
120 * 120 *
121 * Atomically adds @i to @v and returns @i + @v 121 * Atomically adds @i to @v and returns @i + @v
122 */ 122 */
123static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 123static inline long long atomic64_add_return(long long i, atomic64_t *v)
124{ 124{
125 smp_mb(); /* barrier for proper semantics */ 125 smp_mb(); /* barrier for proper semantics */
126 return _atomic64_xchg_add(&v->counter, i) + i; 126 return _atomic64_xchg_add(&v->counter, i) + i;
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
135 * Atomically adds @a to @v, so long as @v was not already @u. 135 * Atomically adds @a to @v, so long as @v was not already @u.
136 * Returns non-zero if @v was not @u, and zero otherwise. 136 * Returns non-zero if @v was not @u, and zero otherwise.
137 */ 137 */
138static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 138static inline long long atomic64_add_unless(atomic64_t *v, long long a,
139 long long u)
139{ 140{
140 smp_mb(); /* barrier for proper semantics */ 141 smp_mb(); /* barrier for proper semantics */
141 return _atomic64_xchg_add_unless(&v->counter, a, u) != u; 142 return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
151 * atomic64_set() can't be just a raw store, since it would be lost if it 152 * atomic64_set() can't be just a raw store, since it would be lost if it
152 * fell between the load and store of one of the other atomic ops. 153 * fell between the load and store of one of the other atomic ops.
153 */ 154 */
154static inline void atomic64_set(atomic64_t *v, u64 n) 155static inline void atomic64_set(atomic64_t *v, long long n)
155{ 156{
156 _atomic64_xchg(&v->counter, n); 157 _atomic64_xchg(&v->counter, n);
157} 158}
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
236extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); 237extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
237extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); 238extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
238extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); 239extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
239extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); 240extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
240extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); 241 long long o, long long n);
241extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); 242extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
242extern u64 __atomic64_xchg_add_unless(volatile u64 *p, 243extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
243 int *lock, u64 o, u64 n); 244 long long n);
245extern long long __atomic64_xchg_add_unless(volatile long long *p,
246 int *lock, long long o, long long n);
244 247
245/* Return failure from the atomic wrappers. */ 248/* Return failure from the atomic wrappers. */
246struct __get_user __atomic_bad_address(int __user *addr); 249struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
index 4001d5eab4bb..0ccda3c425be 100644
--- a/arch/tile/include/asm/cmpxchg.h
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
35int _atomic_xchg_add(int *v, int i); 35int _atomic_xchg_add(int *v, int i);
36int _atomic_xchg_add_unless(int *v, int a, int u); 36int _atomic_xchg_add_unless(int *v, int a, int u);
37int _atomic_cmpxchg(int *ptr, int o, int n); 37int _atomic_cmpxchg(int *ptr, int o, int n);
38u64 _atomic64_xchg(u64 *v, u64 n); 38long long _atomic64_xchg(long long *v, long long n);
39u64 _atomic64_xchg_add(u64 *v, u64 i); 39long long _atomic64_xchg_add(long long *v, long long i);
40u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); 40long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
41u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); 41long long _atomic64_cmpxchg(long long *v, long long o, long long n);
42 42
43#define xchg(ptr, n) \ 43#define xchg(ptr, n) \
44 ({ \ 44 ({ \
@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
53 if (sizeof(*(ptr)) != 4) \ 53 if (sizeof(*(ptr)) != 4) \
54 __cmpxchg_called_with_bad_pointer(); \ 54 __cmpxchg_called_with_bad_pointer(); \
55 smp_mb(); \ 55 smp_mb(); \
56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ 56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
57 (int)n); \
57 }) 58 })
58 59
59#define xchg64(ptr, n) \ 60#define xchg64(ptr, n) \
@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
61 if (sizeof(*(ptr)) != 8) \ 62 if (sizeof(*(ptr)) != 8) \
62 __xchg_called_with_bad_pointer(); \ 63 __xchg_called_with_bad_pointer(); \
63 smp_mb(); \ 64 smp_mb(); \
64 (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \ 65 (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
66 (long long)(n)); \
65 }) 67 })
66 68
67#define cmpxchg64(ptr, o, n) \ 69#define cmpxchg64(ptr, o, n) \
@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
69 if (sizeof(*(ptr)) != 8) \ 71 if (sizeof(*(ptr)) != 8) \
70 __cmpxchg_called_with_bad_pointer(); \ 72 __cmpxchg_called_with_bad_pointer(); \
71 smp_mb(); \ 73 smp_mb(); \
72 (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ 74 (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
75 (long long)o, (long long)n); \
73 }) 76 })
74 77
75#else 78#else
@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
81 switch (sizeof(*(ptr))) { \ 84 switch (sizeof(*(ptr))) { \
82 case 4: \ 85 case 4: \
83 __x = (typeof(__x))(unsigned long) \ 86 __x = (typeof(__x))(unsigned long) \
84 __insn_exch4((ptr), (u32)(unsigned long)(n)); \ 87 __insn_exch4((ptr), \
88 (u32)(unsigned long)(n)); \
85 break; \ 89 break; \
86 case 8: \ 90 case 8: \
87 __x = (typeof(__x)) \ 91 __x = (typeof(__x)) \
88 __insn_exch((ptr), (unsigned long)(n)); \ 92 __insn_exch((ptr), (unsigned long)(n)); \
89 break; \ 93 break; \
90 default: \ 94 default: \
@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
103 switch (sizeof(*(ptr))) { \ 107 switch (sizeof(*(ptr))) { \
104 case 4: \ 108 case 4: \
105 __x = (typeof(__x))(unsigned long) \ 109 __x = (typeof(__x))(unsigned long) \
106 __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ 110 __insn_cmpexch4((ptr), \
111 (u32)(unsigned long)(n)); \
107 break; \ 112 break; \
108 case 8: \ 113 case 8: \
109 __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ 114 __x = (typeof(__x))__insn_cmpexch((ptr), \
115 (long long)(n)); \
110 break; \ 116 break; \
111 default: \ 117 default: \
112 __cmpxchg_called_with_bad_pointer(); \ 118 __cmpxchg_called_with_bad_pointer(); \
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 759efa337be8..c89b211fd9e7 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
107EXPORT_SYMBOL(_atomic_xor); 107EXPORT_SYMBOL(_atomic_xor);
108 108
109 109
110u64 _atomic64_xchg(u64 *v, u64 n) 110long long _atomic64_xchg(long long *v, long long n)
111{ 111{
112 return __atomic64_xchg(v, __atomic_setup(v), n); 112 return __atomic64_xchg(v, __atomic_setup(v), n);
113} 113}
114EXPORT_SYMBOL(_atomic64_xchg); 114EXPORT_SYMBOL(_atomic64_xchg);
115 115
116u64 _atomic64_xchg_add(u64 *v, u64 i) 116long long _atomic64_xchg_add(long long *v, long long i)
117{ 117{
118 return __atomic64_xchg_add(v, __atomic_setup(v), i); 118 return __atomic64_xchg_add(v, __atomic_setup(v), i);
119} 119}
120EXPORT_SYMBOL(_atomic64_xchg_add); 120EXPORT_SYMBOL(_atomic64_xchg_add);
121 121
122u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) 122long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
123{ 123{
124 /* 124 /*
125 * Note: argument order is switched here since it is easier 125 * Note: argument order is switched here since it is easier
@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
130} 130}
131EXPORT_SYMBOL(_atomic64_xchg_add_unless); 131EXPORT_SYMBOL(_atomic64_xchg_add_unless);
132 132
133u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) 133long long _atomic64_cmpxchg(long long *v, long long o, long long n)
134{ 134{
135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); 135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
136} 136}