diff options
-rw-r--r-- | include/asm-x86/atomic_64.h | 251 |
1 files changed, 119 insertions, 132 deletions
diff --git a/include/asm-x86/atomic_64.h b/include/asm-x86/atomic_64.h index 2d20a7a19f62..3e0cd7d38335 100644 --- a/include/asm-x86/atomic_64.h +++ b/include/asm-x86/atomic_64.h | |||
@@ -22,140 +22,135 @@ | |||
22 | * on us. We need to use _exactly_ the address the user gave us, | 22 | * on us. We need to use _exactly_ the address the user gave us, |
23 | * not some alias that contains the same information. | 23 | * not some alias that contains the same information. |
24 | */ | 24 | */ |
25 | typedef struct { int counter; } atomic_t; | 25 | typedef struct { |
26 | int counter; | ||
27 | } atomic_t; | ||
26 | 28 | ||
27 | #define ATOMIC_INIT(i) { (i) } | 29 | #define ATOMIC_INIT(i) { (i) } |
28 | 30 | ||
29 | /** | 31 | /** |
30 | * atomic_read - read atomic variable | 32 | * atomic_read - read atomic variable |
31 | * @v: pointer of type atomic_t | 33 | * @v: pointer of type atomic_t |
32 | * | 34 | * |
33 | * Atomically reads the value of @v. | 35 | * Atomically reads the value of @v. |
34 | */ | 36 | */ |
35 | #define atomic_read(v) ((v)->counter) | 37 | #define atomic_read(v) ((v)->counter) |
36 | 38 | ||
37 | /** | 39 | /** |
38 | * atomic_set - set atomic variable | 40 | * atomic_set - set atomic variable |
39 | * @v: pointer of type atomic_t | 41 | * @v: pointer of type atomic_t |
40 | * @i: required value | 42 | * @i: required value |
41 | * | 43 | * |
42 | * Atomically sets the value of @v to @i. | 44 | * Atomically sets the value of @v to @i. |
43 | */ | 45 | */ |
44 | #define atomic_set(v,i) (((v)->counter) = (i)) | 46 | #define atomic_set(v, i) (((v)->counter) = (i)) |
45 | 47 | ||
46 | /** | 48 | /** |
47 | * atomic_add - add integer to atomic variable | 49 | * atomic_add - add integer to atomic variable |
48 | * @i: integer value to add | 50 | * @i: integer value to add |
49 | * @v: pointer of type atomic_t | 51 | * @v: pointer of type atomic_t |
50 | * | 52 | * |
51 | * Atomically adds @i to @v. | 53 | * Atomically adds @i to @v. |
52 | */ | 54 | */ |
53 | static __inline__ void atomic_add(int i, atomic_t *v) | 55 | static inline void atomic_add(int i, atomic_t *v) |
54 | { | 56 | { |
55 | __asm__ __volatile__( | 57 | asm volatile(LOCK_PREFIX "addl %1,%0" |
56 | LOCK_PREFIX "addl %1,%0" | 58 | : "=m" (v->counter) |
57 | :"=m" (v->counter) | 59 | : "ir" (i), "m" (v->counter)); |
58 | :"ir" (i), "m" (v->counter)); | ||
59 | } | 60 | } |
60 | 61 | ||
61 | /** | 62 | /** |
62 | * atomic_sub - subtract the atomic variable | 63 | * atomic_sub - subtract the atomic variable |
63 | * @i: integer value to subtract | 64 | * @i: integer value to subtract |
64 | * @v: pointer of type atomic_t | 65 | * @v: pointer of type atomic_t |
65 | * | 66 | * |
66 | * Atomically subtracts @i from @v. | 67 | * Atomically subtracts @i from @v. |
67 | */ | 68 | */ |
68 | static __inline__ void atomic_sub(int i, atomic_t *v) | 69 | static inline void atomic_sub(int i, atomic_t *v) |
69 | { | 70 | { |
70 | __asm__ __volatile__( | 71 | asm volatile(LOCK_PREFIX "subl %1,%0" |
71 | LOCK_PREFIX "subl %1,%0" | 72 | : "=m" (v->counter) |
72 | :"=m" (v->counter) | 73 | : "ir" (i), "m" (v->counter)); |
73 | :"ir" (i), "m" (v->counter)); | ||
74 | } | 74 | } |
75 | 75 | ||
76 | /** | 76 | /** |
77 | * atomic_sub_and_test - subtract value from variable and test result | 77 | * atomic_sub_and_test - subtract value from variable and test result |
78 | * @i: integer value to subtract | 78 | * @i: integer value to subtract |
79 | * @v: pointer of type atomic_t | 79 | * @v: pointer of type atomic_t |
80 | * | 80 | * |
81 | * Atomically subtracts @i from @v and returns | 81 | * Atomically subtracts @i from @v and returns |
82 | * true if the result is zero, or false for all | 82 | * true if the result is zero, or false for all |
83 | * other cases. | 83 | * other cases. |
84 | */ | 84 | */ |
85 | static __inline__ int atomic_sub_and_test(int i, atomic_t *v) | 85 | static inline int atomic_sub_and_test(int i, atomic_t *v) |
86 | { | 86 | { |
87 | unsigned char c; | 87 | unsigned char c; |
88 | 88 | ||
89 | __asm__ __volatile__( | 89 | asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" |
90 | LOCK_PREFIX "subl %2,%0; sete %1" | 90 | : "=m" (v->counter), "=qm" (c) |
91 | :"=m" (v->counter), "=qm" (c) | 91 | : "ir" (i), "m" (v->counter) : "memory"); |
92 | :"ir" (i), "m" (v->counter) : "memory"); | ||
93 | return c; | 92 | return c; |
94 | } | 93 | } |
95 | 94 | ||
96 | /** | 95 | /** |
97 | * atomic_inc - increment atomic variable | 96 | * atomic_inc - increment atomic variable |
98 | * @v: pointer of type atomic_t | 97 | * @v: pointer of type atomic_t |
99 | * | 98 | * |
100 | * Atomically increments @v by 1. | 99 | * Atomically increments @v by 1. |
101 | */ | 100 | */ |
102 | static __inline__ void atomic_inc(atomic_t *v) | 101 | static inline void atomic_inc(atomic_t *v) |
103 | { | 102 | { |
104 | __asm__ __volatile__( | 103 | asm volatile(LOCK_PREFIX "incl %0" |
105 | LOCK_PREFIX "incl %0" | 104 | : "=m" (v->counter) |
106 | :"=m" (v->counter) | 105 | : "m" (v->counter)); |
107 | :"m" (v->counter)); | ||
108 | } | 106 | } |
109 | 107 | ||
110 | /** | 108 | /** |
111 | * atomic_dec - decrement atomic variable | 109 | * atomic_dec - decrement atomic variable |
112 | * @v: pointer of type atomic_t | 110 | * @v: pointer of type atomic_t |
113 | * | 111 | * |
114 | * Atomically decrements @v by 1. | 112 | * Atomically decrements @v by 1. |
115 | */ | 113 | */ |
116 | static __inline__ void atomic_dec(atomic_t *v) | 114 | static inline void atomic_dec(atomic_t *v) |
117 | { | 115 | { |
118 | __asm__ __volatile__( | 116 | asm volatile(LOCK_PREFIX "decl %0" |
119 | LOCK_PREFIX "decl %0" | 117 | : "=m" (v->counter) |
120 | :"=m" (v->counter) | 118 | : "m" (v->counter)); |
121 | :"m" (v->counter)); | ||
122 | } | 119 | } |
123 | 120 | ||
124 | /** | 121 | /** |
125 | * atomic_dec_and_test - decrement and test | 122 | * atomic_dec_and_test - decrement and test |
126 | * @v: pointer of type atomic_t | 123 | * @v: pointer of type atomic_t |
127 | * | 124 | * |
128 | * Atomically decrements @v by 1 and | 125 | * Atomically decrements @v by 1 and |
129 | * returns true if the result is 0, or false for all other | 126 | * returns true if the result is 0, or false for all other |
130 | * cases. | 127 | * cases. |
131 | */ | 128 | */ |
132 | static __inline__ int atomic_dec_and_test(atomic_t *v) | 129 | static inline int atomic_dec_and_test(atomic_t *v) |
133 | { | 130 | { |
134 | unsigned char c; | 131 | unsigned char c; |
135 | 132 | ||
136 | __asm__ __volatile__( | 133 | asm volatile(LOCK_PREFIX "decl %0; sete %1" |
137 | LOCK_PREFIX "decl %0; sete %1" | 134 | : "=m" (v->counter), "=qm" (c) |
138 | :"=m" (v->counter), "=qm" (c) | 135 | : "m" (v->counter) : "memory"); |
139 | :"m" (v->counter) : "memory"); | ||
140 | return c != 0; | 136 | return c != 0; |
141 | } | 137 | } |
142 | 138 | ||
143 | /** | 139 | /** |
144 | * atomic_inc_and_test - increment and test | 140 | * atomic_inc_and_test - increment and test |
145 | * @v: pointer of type atomic_t | 141 | * @v: pointer of type atomic_t |
146 | * | 142 | * |
147 | * Atomically increments @v by 1 | 143 | * Atomically increments @v by 1 |
148 | * and returns true if the result is zero, or false for all | 144 | * and returns true if the result is zero, or false for all |
149 | * other cases. | 145 | * other cases. |
150 | */ | 146 | */ |
151 | static __inline__ int atomic_inc_and_test(atomic_t *v) | 147 | static inline int atomic_inc_and_test(atomic_t *v) |
152 | { | 148 | { |
153 | unsigned char c; | 149 | unsigned char c; |
154 | 150 | ||
155 | __asm__ __volatile__( | 151 | asm volatile(LOCK_PREFIX "incl %0; sete %1" |
156 | LOCK_PREFIX "incl %0; sete %1" | 152 | : "=m" (v->counter), "=qm" (c) |
157 | :"=m" (v->counter), "=qm" (c) | 153 | : "m" (v->counter) : "memory"); |
158 | :"m" (v->counter) : "memory"); | ||
159 | return c != 0; | 154 | return c != 0; |
160 | } | 155 | } |
161 | 156 | ||
@@ -163,19 +158,18 @@ static __inline__ int atomic_inc_and_test(atomic_t *v) | |||
163 | * atomic_add_negative - add and test if negative | 158 | * atomic_add_negative - add and test if negative |
164 | * @i: integer value to add | 159 | * @i: integer value to add |
165 | * @v: pointer of type atomic_t | 160 | * @v: pointer of type atomic_t |
166 | * | 161 | * |
167 | * Atomically adds @i to @v and returns true | 162 | * Atomically adds @i to @v and returns true |
168 | * if the result is negative, or false when | 163 | * if the result is negative, or false when |
169 | * result is greater than or equal to zero. | 164 | * result is greater than or equal to zero. |
170 | */ | 165 | */ |
171 | static __inline__ int atomic_add_negative(int i, atomic_t *v) | 166 | static inline int atomic_add_negative(int i, atomic_t *v) |
172 | { | 167 | { |
173 | unsigned char c; | 168 | unsigned char c; |
174 | 169 | ||
175 | __asm__ __volatile__( | 170 | asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" |
176 | LOCK_PREFIX "addl %2,%0; sets %1" | 171 | : "=m" (v->counter), "=qm" (c) |
177 | :"=m" (v->counter), "=qm" (c) | 172 | : "ir" (i), "m" (v->counter) : "memory"); |
178 | :"ir" (i), "m" (v->counter) : "memory"); | ||
179 | return c; | 173 | return c; |
180 | } | 174 | } |
181 | 175 | ||
@@ -186,27 +180,28 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v) | |||
186 | * | 180 | * |
187 | * Atomically adds @i to @v and returns @i + @v | 181 | * Atomically adds @i to @v and returns @i + @v |
188 | */ | 182 | */ |
189 | static __inline__ int atomic_add_return(int i, atomic_t *v) | 183 | static inline int atomic_add_return(int i, atomic_t *v) |
190 | { | 184 | { |
191 | int __i = i; | 185 | int __i = i; |
192 | __asm__ __volatile__( | 186 | asm volatile(LOCK_PREFIX "xaddl %0, %1" |
193 | LOCK_PREFIX "xaddl %0, %1" | 187 | : "+r" (i), "+m" (v->counter) |
194 | :"+r" (i), "+m" (v->counter) | 188 | : : "memory"); |
195 | : : "memory"); | ||
196 | return i + __i; | 189 | return i + __i; |
197 | } | 190 | } |
198 | 191 | ||
199 | static __inline__ int atomic_sub_return(int i, atomic_t *v) | 192 | static inline int atomic_sub_return(int i, atomic_t *v) |
200 | { | 193 | { |
201 | return atomic_add_return(-i,v); | 194 | return atomic_add_return(-i, v); |
202 | } | 195 | } |
203 | 196 | ||
204 | #define atomic_inc_return(v) (atomic_add_return(1,v)) | 197 | #define atomic_inc_return(v) (atomic_add_return(1, v)) |
205 | #define atomic_dec_return(v) (atomic_sub_return(1,v)) | 198 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) |
206 | 199 | ||
207 | /* An 64bit atomic type */ | 200 | /* An 64bit atomic type */ |
208 | 201 | ||
209 | typedef struct { long counter; } atomic64_t; | 202 | typedef struct { |
203 | long counter; | ||
204 | } atomic64_t; | ||
210 | 205 | ||
211 | #define ATOMIC64_INIT(i) { (i) } | 206 | #define ATOMIC64_INIT(i) { (i) } |
212 | 207 | ||
@@ -226,7 +221,7 @@ typedef struct { long counter; } atomic64_t; | |||
226 | * | 221 | * |
227 | * Atomically sets the value of @v to @i. | 222 | * Atomically sets the value of @v to @i. |
228 | */ | 223 | */ |
229 | #define atomic64_set(v,i) (((v)->counter) = (i)) | 224 | #define atomic64_set(v, i) (((v)->counter) = (i)) |
230 | 225 | ||
231 | /** | 226 | /** |
232 | * atomic64_add - add integer to atomic64 variable | 227 | * atomic64_add - add integer to atomic64 variable |
@@ -235,12 +230,11 @@ typedef struct { long counter; } atomic64_t; | |||
235 | * | 230 | * |
236 | * Atomically adds @i to @v. | 231 | * Atomically adds @i to @v. |
237 | */ | 232 | */ |
238 | static __inline__ void atomic64_add(long i, atomic64_t *v) | 233 | static inline void atomic64_add(long i, atomic64_t *v) |
239 | { | 234 | { |
240 | __asm__ __volatile__( | 235 | asm volatile(LOCK_PREFIX "addq %1,%0" |
241 | LOCK_PREFIX "addq %1,%0" | 236 | : "=m" (v->counter) |
242 | :"=m" (v->counter) | 237 | : "ir" (i), "m" (v->counter)); |
243 | :"ir" (i), "m" (v->counter)); | ||
244 | } | 238 | } |
245 | 239 | ||
246 | /** | 240 | /** |
@@ -250,12 +244,11 @@ static __inline__ void atomic64_add(long i, atomic64_t *v) | |||
250 | * | 244 | * |
251 | * Atomically subtracts @i from @v. | 245 | * Atomically subtracts @i from @v. |
252 | */ | 246 | */ |
253 | static __inline__ void atomic64_sub(long i, atomic64_t *v) | 247 | static inline void atomic64_sub(long i, atomic64_t *v) |
254 | { | 248 | { |
255 | __asm__ __volatile__( | 249 | asm volatile(LOCK_PREFIX "subq %1,%0" |
256 | LOCK_PREFIX "subq %1,%0" | 250 | : "=m" (v->counter) |
257 | :"=m" (v->counter) | 251 | : "ir" (i), "m" (v->counter)); |
258 | :"ir" (i), "m" (v->counter)); | ||
259 | } | 252 | } |
260 | 253 | ||
261 | /** | 254 | /** |
@@ -267,14 +260,13 @@ static __inline__ void atomic64_sub(long i, atomic64_t *v) | |||
267 | * true if the result is zero, or false for all | 260 | * true if the result is zero, or false for all |
268 | * other cases. | 261 | * other cases. |
269 | */ | 262 | */ |
270 | static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) | 263 | static inline int atomic64_sub_and_test(long i, atomic64_t *v) |
271 | { | 264 | { |
272 | unsigned char c; | 265 | unsigned char c; |
273 | 266 | ||
274 | __asm__ __volatile__( | 267 | asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" |
275 | LOCK_PREFIX "subq %2,%0; sete %1" | 268 | : "=m" (v->counter), "=qm" (c) |
276 | :"=m" (v->counter), "=qm" (c) | 269 | : "ir" (i), "m" (v->counter) : "memory"); |
277 | :"ir" (i), "m" (v->counter) : "memory"); | ||
278 | return c; | 270 | return c; |
279 | } | 271 | } |
280 | 272 | ||
@@ -284,12 +276,11 @@ static __inline__ int atomic64_sub_and_test(long i, atomic64_t *v) | |||
284 | * | 276 | * |
285 | * Atomically increments @v by 1. | 277 | * Atomically increments @v by 1. |
286 | */ | 278 | */ |
287 | static __inline__ void atomic64_inc(atomic64_t *v) | 279 | static inline void atomic64_inc(atomic64_t *v) |
288 | { | 280 | { |
289 | __asm__ __volatile__( | 281 | asm volatile(LOCK_PREFIX "incq %0" |
290 | LOCK_PREFIX "incq %0" | 282 | : "=m" (v->counter) |
291 | :"=m" (v->counter) | 283 | : "m" (v->counter)); |
292 | :"m" (v->counter)); | ||
293 | } | 284 | } |
294 | 285 | ||
295 | /** | 286 | /** |
@@ -298,12 +289,11 @@ static __inline__ void atomic64_inc(atomic64_t *v) | |||
298 | * | 289 | * |
299 | * Atomically decrements @v by 1. | 290 | * Atomically decrements @v by 1. |
300 | */ | 291 | */ |
301 | static __inline__ void atomic64_dec(atomic64_t *v) | 292 | static inline void atomic64_dec(atomic64_t *v) |
302 | { | 293 | { |
303 | __asm__ __volatile__( | 294 | asm volatile(LOCK_PREFIX "decq %0" |
304 | LOCK_PREFIX "decq %0" | 295 | : "=m" (v->counter) |
305 | :"=m" (v->counter) | 296 | : "m" (v->counter)); |
306 | :"m" (v->counter)); | ||
307 | } | 297 | } |
308 | 298 | ||
309 | /** | 299 | /** |
@@ -314,14 +304,13 @@ static __inline__ void atomic64_dec(atomic64_t *v) | |||
314 | * returns true if the result is 0, or false for all other | 304 | * returns true if the result is 0, or false for all other |
315 | * cases. | 305 | * cases. |
316 | */ | 306 | */ |
317 | static __inline__ int atomic64_dec_and_test(atomic64_t *v) | 307 | static inline int atomic64_dec_and_test(atomic64_t *v) |
318 | { | 308 | { |
319 | unsigned char c; | 309 | unsigned char c; |
320 | 310 | ||
321 | __asm__ __volatile__( | 311 | asm volatile(LOCK_PREFIX "decq %0; sete %1" |
322 | LOCK_PREFIX "decq %0; sete %1" | 312 | : "=m" (v->counter), "=qm" (c) |
323 | :"=m" (v->counter), "=qm" (c) | 313 | : "m" (v->counter) : "memory"); |
324 | :"m" (v->counter) : "memory"); | ||
325 | return c != 0; | 314 | return c != 0; |
326 | } | 315 | } |
327 | 316 | ||
@@ -333,14 +322,13 @@ static __inline__ int atomic64_dec_and_test(atomic64_t *v) | |||
333 | * and returns true if the result is zero, or false for all | 322 | * and returns true if the result is zero, or false for all |
334 | * other cases. | 323 | * other cases. |
335 | */ | 324 | */ |
336 | static __inline__ int atomic64_inc_and_test(atomic64_t *v) | 325 | static inline int atomic64_inc_and_test(atomic64_t *v) |
337 | { | 326 | { |
338 | unsigned char c; | 327 | unsigned char c; |
339 | 328 | ||
340 | __asm__ __volatile__( | 329 | asm volatile(LOCK_PREFIX "incq %0; sete %1" |
341 | LOCK_PREFIX "incq %0; sete %1" | 330 | : "=m" (v->counter), "=qm" (c) |
342 | :"=m" (v->counter), "=qm" (c) | 331 | : "m" (v->counter) : "memory"); |
343 | :"m" (v->counter) : "memory"); | ||
344 | return c != 0; | 332 | return c != 0; |
345 | } | 333 | } |
346 | 334 | ||
@@ -353,14 +341,13 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v) | |||
353 | * if the result is negative, or false when | 341 | * if the result is negative, or false when |
354 | * result is greater than or equal to zero. | 342 | * result is greater than or equal to zero. |
355 | */ | 343 | */ |
356 | static __inline__ int atomic64_add_negative(long i, atomic64_t *v) | 344 | static inline int atomic64_add_negative(long i, atomic64_t *v) |
357 | { | 345 | { |
358 | unsigned char c; | 346 | unsigned char c; |
359 | 347 | ||
360 | __asm__ __volatile__( | 348 | asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" |
361 | LOCK_PREFIX "addq %2,%0; sets %1" | 349 | : "=m" (v->counter), "=qm" (c) |
362 | :"=m" (v->counter), "=qm" (c) | 350 | : "ir" (i), "m" (v->counter) : "memory"); |
363 | :"ir" (i), "m" (v->counter) : "memory"); | ||
364 | return c; | 351 | return c; |
365 | } | 352 | } |
366 | 353 | ||
@@ -371,29 +358,28 @@ static __inline__ int atomic64_add_negative(long i, atomic64_t *v) | |||
371 | * | 358 | * |
372 | * Atomically adds @i to @v and returns @i + @v | 359 | * Atomically adds @i to @v and returns @i + @v |
373 | */ | 360 | */ |
374 | static __inline__ long atomic64_add_return(long i, atomic64_t *v) | 361 | static inline long atomic64_add_return(long i, atomic64_t *v) |
375 | { | 362 | { |
376 | long __i = i; | 363 | long __i = i; |
377 | __asm__ __volatile__( | 364 | asm volatile(LOCK_PREFIX "xaddq %0, %1;" |
378 | LOCK_PREFIX "xaddq %0, %1;" | 365 | : "+r" (i), "+m" (v->counter) |
379 | :"+r" (i), "+m" (v->counter) | 366 | : : "memory"); |
380 | : : "memory"); | ||
381 | return i + __i; | 367 | return i + __i; |
382 | } | 368 | } |
383 | 369 | ||
384 | static __inline__ long atomic64_sub_return(long i, atomic64_t *v) | 370 | static inline long atomic64_sub_return(long i, atomic64_t *v) |
385 | { | 371 | { |
386 | return atomic64_add_return(-i,v); | 372 | return atomic64_add_return(-i, v); |
387 | } | 373 | } |
388 | 374 | ||
389 | #define atomic64_inc_return(v) (atomic64_add_return(1,v)) | 375 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) |
390 | #define atomic64_dec_return(v) (atomic64_sub_return(1,v)) | 376 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) |
391 | 377 | ||
392 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | 378 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
393 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 379 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
394 | 380 | ||
395 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | 381 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
396 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 382 | #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) |
397 | 383 | ||
398 | /** | 384 | /** |
399 | * atomic_add_unless - add unless the number is a given value | 385 | * atomic_add_unless - add unless the number is a given value |
@@ -404,7 +390,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) | |||
404 | * Atomically adds @a to @v, so long as it was not @u. | 390 | * Atomically adds @a to @v, so long as it was not @u. |
405 | * Returns non-zero if @v was not @u, and zero otherwise. | 391 | * Returns non-zero if @v was not @u, and zero otherwise. |
406 | */ | 392 | */ |
407 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | 393 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
408 | { | 394 | { |
409 | int c, old; | 395 | int c, old; |
410 | c = atomic_read(v); | 396 | c = atomic_read(v); |
@@ -430,7 +416,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | |||
430 | * Atomically adds @a to @v, so long as it was not @u. | 416 | * Atomically adds @a to @v, so long as it was not @u. |
431 | * Returns non-zero if @v was not @u, and zero otherwise. | 417 | * Returns non-zero if @v was not @u, and zero otherwise. |
432 | */ | 418 | */ |
433 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | 419 | static inline int atomic64_add_unless(atomic64_t *v, long a, long u) |
434 | { | 420 | { |
435 | long c, old; | 421 | long c, old; |
436 | c = atomic64_read(v); | 422 | c = atomic64_read(v); |
@@ -448,13 +434,14 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | |||
448 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | 434 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
449 | 435 | ||
450 | /* These are x86-specific, used by some header files */ | 436 | /* These are x86-specific, used by some header files */ |
451 | #define atomic_clear_mask(mask, addr) \ | 437 | #define atomic_clear_mask(mask, addr) \ |
452 | __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ | 438 | asm volatile(LOCK_PREFIX "andl %0,%1" \ |
453 | : : "r" (~(mask)),"m" (*addr) : "memory") | 439 | : : "r" (~(mask)), "m" (*(addr)) : "memory") |
454 | 440 | ||
455 | #define atomic_set_mask(mask, addr) \ | 441 | #define atomic_set_mask(mask, addr) \ |
456 | __asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ | 442 | asm volatile(LOCK_PREFIX "orl %0,%1" \ |
457 | : : "r" ((unsigned)mask),"m" (*(addr)) : "memory") | 443 | : : "r" ((unsigned)(mask)), "m" (*(addr)) \ |
444 | : "memory") | ||
458 | 445 | ||
459 | /* Atomic operations are already serializing on x86 */ | 446 | /* Atomic operations are already serializing on x86 */ |
460 | #define smp_mb__before_atomic_dec() barrier() | 447 | #define smp_mb__before_atomic_dec() barrier() |