diff options
Diffstat (limited to 'arch/tile/include/asm/atomic_32.h')
-rw-r--r-- | arch/tile/include/asm/atomic_32.h | 27 |
1 files changed, 15 insertions, 12 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 0d0395b1b152..1ad4a1f7d42b 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h | |||
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n) | |||
80 | /* A 64bit atomic type */ | 80 | /* A 64bit atomic type */ |
81 | 81 | ||
82 | typedef struct { | 82 | typedef struct { |
83 | u64 __aligned(8) counter; | 83 | long long counter; |
84 | } atomic64_t; | 84 | } atomic64_t; |
85 | 85 | ||
86 | #define ATOMIC64_INIT(val) { (val) } | 86 | #define ATOMIC64_INIT(val) { (val) } |
@@ -91,14 +91,14 @@ typedef struct { | |||
91 | * | 91 | * |
92 | * Atomically reads the value of @v. | 92 | * Atomically reads the value of @v. |
93 | */ | 93 | */ |
94 | static inline u64 atomic64_read(const atomic64_t *v) | 94 | static inline long long atomic64_read(const atomic64_t *v) |
95 | { | 95 | { |
96 | /* | 96 | /* |
97 | * Requires an atomic op to read both 32-bit parts consistently. | 97 | * Requires an atomic op to read both 32-bit parts consistently. |
98 | * Casting away const is safe since the atomic support routines | 98 | * Casting away const is safe since the atomic support routines |
99 | * do not write to memory if the value has not been modified. | 99 | * do not write to memory if the value has not been modified. |
100 | */ | 100 | */ |
101 | return _atomic64_xchg_add((u64 *)&v->counter, 0); | 101 | return _atomic64_xchg_add((long long *)&v->counter, 0); |
102 | } | 102 | } |
103 | 103 | ||
104 | /** | 104 | /** |
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v) | |||
108 | * | 108 | * |
109 | * Atomically adds @i to @v. | 109 | * Atomically adds @i to @v. |
110 | */ | 110 | */ |
111 | static inline void atomic64_add(u64 i, atomic64_t *v) | 111 | static inline void atomic64_add(long long i, atomic64_t *v) |
112 | { | 112 | { |
113 | _atomic64_xchg_add(&v->counter, i); | 113 | _atomic64_xchg_add(&v->counter, i); |
114 | } | 114 | } |
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v) | |||
120 | * | 120 | * |
121 | * Atomically adds @i to @v and returns @i + @v | 121 | * Atomically adds @i to @v and returns @i + @v |
122 | */ | 122 | */ |
123 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | 123 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
124 | { | 124 | { |
125 | smp_mb(); /* barrier for proper semantics */ | 125 | smp_mb(); /* barrier for proper semantics */ |
126 | return _atomic64_xchg_add(&v->counter, i) + i; | 126 | return _atomic64_xchg_add(&v->counter, i) + i; |
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | |||
135 | * Atomically adds @a to @v, so long as @v was not already @u. | 135 | * Atomically adds @a to @v, so long as @v was not already @u. |
136 | * Returns non-zero if @v was not @u, and zero otherwise. | 136 | * Returns non-zero if @v was not @u, and zero otherwise. |
137 | */ | 137 | */ |
138 | static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | 138 | static inline long long atomic64_add_unless(atomic64_t *v, long long a, |
139 | long long u) | ||
139 | { | 140 | { |
140 | smp_mb(); /* barrier for proper semantics */ | 141 | smp_mb(); /* barrier for proper semantics */ |
141 | return _atomic64_xchg_add_unless(&v->counter, a, u) != u; | 142 | return _atomic64_xchg_add_unless(&v->counter, a, u) != u; |
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | |||
151 | * atomic64_set() can't be just a raw store, since it would be lost if it | 152 | * atomic64_set() can't be just a raw store, since it would be lost if it |
152 | * fell between the load and store of one of the other atomic ops. | 153 | * fell between the load and store of one of the other atomic ops. |
153 | */ | 154 | */ |
154 | static inline void atomic64_set(atomic64_t *v, u64 n) | 155 | static inline void atomic64_set(atomic64_t *v, long long n) |
155 | { | 156 | { |
156 | _atomic64_xchg(&v->counter, n); | 157 | _atomic64_xchg(&v->counter, n); |
157 | } | 158 | } |
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p, | |||
236 | extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); | 237 | extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); |
237 | extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); | 238 | extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); |
238 | extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); | 239 | extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); |
239 | extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); | 240 | extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, |
240 | extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); | 241 | long long o, long long n); |
241 | extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); | 242 | extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); |
242 | extern u64 __atomic64_xchg_add_unless(volatile u64 *p, | 243 | extern long long __atomic64_xchg_add(volatile long long *p, int *lock, |
243 | int *lock, u64 o, u64 n); | 244 | long long n); |
245 | extern long long __atomic64_xchg_add_unless(volatile long long *p, | ||
246 | int *lock, long long o, long long n); | ||
244 | 247 | ||
245 | /* Return failure from the atomic wrappers. */ | 248 | /* Return failure from the atomic wrappers. */ |
246 | struct __get_user __atomic_bad_address(int __user *addr); | 249 | struct __get_user __atomic_bad_address(int __user *addr); |