diff options
| -rw-r--r-- | arch/x86/include/asm/atomic64_32.h | 278 | ||||
| -rw-r--r-- | arch/x86/lib/Makefile | 3 | ||||
| -rw-r--r-- | arch/x86/lib/atomic64_32.c | 273 | ||||
| -rw-r--r-- | arch/x86/lib/atomic64_386_32.S | 175 | ||||
| -rw-r--r-- | arch/x86/lib/atomic64_cx8_32.S | 225 |
5 files changed, 664 insertions, 290 deletions
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 03027bf28de5..2a934aa19a43 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h | |||
| @@ -14,109 +14,193 @@ typedef struct { | |||
| 14 | 14 | ||
| 15 | #define ATOMIC64_INIT(val) { (val) } | 15 | #define ATOMIC64_INIT(val) { (val) } |
| 16 | 16 | ||
| 17 | extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); | 17 | #ifdef CONFIG_X86_CMPXCHG64 |
| 18 | #define ATOMIC64_ALTERNATIVE_(f, g) "call atomic64_" #g "_cx8" | ||
| 19 | #else | ||
| 20 | #define ATOMIC64_ALTERNATIVE_(f, g) ALTERNATIVE("call atomic64_" #f "_386", "call atomic64_" #g "_cx8", X86_FEATURE_CX8) | ||
| 21 | #endif | ||
| 22 | |||
| 23 | #define ATOMIC64_ALTERNATIVE(f) ATOMIC64_ALTERNATIVE_(f, f) | ||
| 24 | |||
| 25 | /** | ||
| 26 | * atomic64_cmpxchg - cmpxchg atomic64 variable | ||
| 27 | * @p: pointer to type atomic64_t | ||
| 28 | * @o: expected value | ||
| 29 | * @n: new value | ||
| 30 | * | ||
| 31 | * Atomically sets @v to @n if it was equal to @o and returns | ||
| 32 | * the old value. | ||
| 33 | */ | ||
| 34 | |||
| 35 | static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | ||
| 36 | { | ||
| 37 | return cmpxchg64(&v->counter, o, n); | ||
| 38 | } | ||
| 18 | 39 | ||
| 19 | /** | 40 | /** |
| 20 | * atomic64_xchg - xchg atomic64 variable | 41 | * atomic64_xchg - xchg atomic64 variable |
| 21 | * @ptr: pointer to type atomic64_t | 42 | * @v: pointer to type atomic64_t |
| 22 | * @new_val: value to assign | 43 | * @n: value to assign |
| 23 | * | 44 | * |
| 24 | * Atomically xchgs the value of @ptr to @new_val and returns | 45 | * Atomically xchgs the value of @v to @n and returns |
| 25 | * the old value. | 46 | * the old value. |
| 26 | */ | 47 | */ |
| 27 | extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); | 48 | static inline long long atomic64_xchg(atomic64_t *v, long long n) |
| 49 | { | ||
| 50 | long long o; | ||
| 51 | unsigned high = (unsigned)(n >> 32); | ||
| 52 | unsigned low = (unsigned)n; | ||
| 53 | asm volatile(ATOMIC64_ALTERNATIVE(xchg) | ||
| 54 | : "=A" (o), "+b" (low), "+c" (high) | ||
| 55 | : "S" (v) | ||
| 56 | : "memory" | ||
| 57 | ); | ||
| 58 | return o; | ||
| 59 | } | ||
| 28 | 60 | ||
| 29 | /** | 61 | /** |
| 30 | * atomic64_set - set atomic64 variable | 62 | * atomic64_set - set atomic64 variable |
| 31 | * @ptr: pointer to type atomic64_t | 63 | * @v: pointer to type atomic64_t |
| 32 | * @new_val: value to assign | 64 | * @n: value to assign |
| 33 | * | 65 | * |
| 34 | * Atomically sets the value of @ptr to @new_val. | 66 | * Atomically sets the value of @v to @n. |
| 35 | */ | 67 | */ |
| 36 | extern void atomic64_set(atomic64_t *ptr, u64 new_val); | 68 | static inline void atomic64_set(atomic64_t *v, long long i) |
| 69 | { | ||
| 70 | unsigned high = (unsigned)(i >> 32); | ||
| 71 | unsigned low = (unsigned)i; | ||
| 72 | asm volatile(ATOMIC64_ALTERNATIVE(set) | ||
| 73 | : "+b" (low), "+c" (high) | ||
| 74 | : "S" (v) | ||
| 75 | : "eax", "edx", "memory" | ||
| 76 | ); | ||
| 77 | } | ||
| 37 | 78 | ||
| 38 | /** | 79 | /** |
| 39 | * atomic64_read - read atomic64 variable | 80 | * atomic64_read - read atomic64 variable |
| 40 | * @ptr: pointer to type atomic64_t | 81 | * @v: pointer to type atomic64_t |
| 41 | * | 82 | * |
| 42 | * Atomically reads the value of @ptr and returns it. | 83 | * Atomically reads the value of @v and returns it. |
| 43 | */ | 84 | */ |
| 44 | static inline u64 atomic64_read(atomic64_t *ptr) | 85 | static inline long long atomic64_read(atomic64_t *v) |
| 45 | { | 86 | { |
| 46 | u64 res; | 87 | long long r; |
| 47 | 88 | asm volatile(ATOMIC64_ALTERNATIVE(read) | |
| 48 | /* | 89 | : "=A" (r), "+c" (v) |
| 49 | * Note, we inline this atomic64_t primitive because | 90 | : : "memory" |
| 50 | * it only clobbers EAX/EDX and leaves the others | 91 | ); |
| 51 | * untouched. We also (somewhat subtly) rely on the | 92 | return r; |
| 52 | * fact that cmpxchg8b returns the current 64-bit value | 93 | } |
| 53 | * of the memory location we are touching: | ||
| 54 | */ | ||
| 55 | asm volatile( | ||
| 56 | "mov %%ebx, %%eax\n\t" | ||
| 57 | "mov %%ecx, %%edx\n\t" | ||
| 58 | LOCK_PREFIX "cmpxchg8b %1\n" | ||
| 59 | : "=&A" (res) | ||
| 60 | : "m" (*ptr) | ||
| 61 | ); | ||
| 62 | |||
| 63 | return res; | ||
| 64 | } | ||
| 65 | |||
| 66 | extern u64 atomic64_read(atomic64_t *ptr); | ||
| 67 | 94 | ||
| 68 | /** | 95 | /** |
| 69 | * atomic64_add_return - add and return | 96 | * atomic64_add_return - add and return |
| 70 | * @delta: integer value to add | 97 | * @i: integer value to add |
| 71 | * @ptr: pointer to type atomic64_t | 98 | * @v: pointer to type atomic64_t |
| 72 | * | 99 | * |
| 73 | * Atomically adds @delta to @ptr and returns @delta + *@ptr | 100 | * Atomically adds @i to @v and returns @i + *@v |
| 74 | */ | 101 | */ |
| 75 | extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); | 102 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
| 103 | { | ||
| 104 | asm volatile(ATOMIC64_ALTERNATIVE(add_return) | ||
| 105 | : "+A" (i), "+c" (v) | ||
| 106 | : : "memory" | ||
| 107 | ); | ||
| 108 | return i; | ||
| 109 | } | ||
| 76 | 110 | ||
| 77 | /* | 111 | /* |
| 78 | * Other variants with different arithmetic operators: | 112 | * Other variants with different arithmetic operators: |
| 79 | */ | 113 | */ |
| 80 | extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); | 114 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) |
| 81 | extern u64 atomic64_inc_return(atomic64_t *ptr); | 115 | { |
| 82 | extern u64 atomic64_dec_return(atomic64_t *ptr); | 116 | asm volatile(ATOMIC64_ALTERNATIVE(sub_return) |
| 117 | : "+A" (i), "+c" (v) | ||
| 118 | : : "memory" | ||
| 119 | ); | ||
| 120 | return i; | ||
| 121 | } | ||
| 122 | |||
| 123 | static inline long long atomic64_inc_return(atomic64_t *v) | ||
| 124 | { | ||
| 125 | long long a; | ||
| 126 | asm volatile(ATOMIC64_ALTERNATIVE(inc_return) | ||
| 127 | : "=A" (a) | ||
| 128 | : "S" (v) | ||
| 129 | : "memory", "ecx" | ||
| 130 | ); | ||
| 131 | return a; | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline long long atomic64_dec_return(atomic64_t *v) | ||
| 135 | { | ||
| 136 | long long a; | ||
| 137 | asm volatile(ATOMIC64_ALTERNATIVE(dec_return) | ||
| 138 | : "=A" (a) | ||
| 139 | : "S" (v) | ||
| 140 | : "memory", "ecx" | ||
| 141 | ); | ||
| 142 | return a; | ||
| 143 | } | ||
| 83 | 144 | ||
| 84 | /** | 145 | /** |
| 85 | * atomic64_add - add integer to atomic64 variable | 146 | * atomic64_add - add integer to atomic64 variable |
| 86 | * @delta: integer value to add | 147 | * @i: integer value to add |
| 87 | * @ptr: pointer to type atomic64_t | 148 | * @v: pointer to type atomic64_t |
| 88 | * | 149 | * |
| 89 | * Atomically adds @delta to @ptr. | 150 | * Atomically adds @i to @v. |
| 90 | */ | 151 | */ |
| 91 | extern void atomic64_add(u64 delta, atomic64_t *ptr); | 152 | static inline long long atomic64_add(long long i, atomic64_t *v) |
| 153 | { | ||
| 154 | asm volatile(ATOMIC64_ALTERNATIVE_(add, add_return) | ||
| 155 | : "+A" (i), "+c" (v) | ||
| 156 | : : "memory" | ||
| 157 | ); | ||
| 158 | return i; | ||
| 159 | } | ||
| 92 | 160 | ||
| 93 | /** | 161 | /** |
| 94 | * atomic64_sub - subtract the atomic64 variable | 162 | * atomic64_sub - subtract the atomic64 variable |
| 95 | * @delta: integer value to subtract | 163 | * @i: integer value to subtract |
| 96 | * @ptr: pointer to type atomic64_t | 164 | * @v: pointer to type atomic64_t |
| 97 | * | 165 | * |
| 98 | * Atomically subtracts @delta from @ptr. | 166 | * Atomically subtracts @i from @v. |
| 99 | */ | 167 | */ |
| 100 | extern void atomic64_sub(u64 delta, atomic64_t *ptr); | 168 | static inline long long atomic64_sub(long long i, atomic64_t *v) |
| 169 | { | ||
| 170 | asm volatile(ATOMIC64_ALTERNATIVE_(sub, sub_return) | ||
| 171 | : "+A" (i), "+c" (v) | ||
| 172 | : : "memory" | ||
| 173 | ); | ||
| 174 | return i; | ||
| 175 | } | ||
| 101 | 176 | ||
| 102 | /** | 177 | /** |
| 103 | * atomic64_sub_and_test - subtract value from variable and test result | 178 | * atomic64_sub_and_test - subtract value from variable and test result |
| 104 | * @delta: integer value to subtract | 179 | * @i: integer value to subtract |
| 105 | * @ptr: pointer to type atomic64_t | 180 | * @v: pointer to type atomic64_t |
| 106 | * | 181 | * |
| 107 | * Atomically subtracts @delta from @ptr and returns | 182 | * Atomically subtracts @i from @v and returns |
| 108 | * true if the result is zero, or false for all | 183 | * true if the result is zero, or false for all |
| 109 | * other cases. | 184 | * other cases. |
| 110 | */ | 185 | */ |
| 111 | extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); | 186 | static inline int atomic64_sub_and_test(long long i, atomic64_t *v) |
| 187 | { | ||
| 188 | return atomic64_sub_return(i, v) == 0; | ||
| 189 | } | ||
| 112 | 190 | ||
| 113 | /** | 191 | /** |
| 114 | * atomic64_inc - increment atomic64 variable | 192 | * atomic64_inc - increment atomic64 variable |
| 115 | * @ptr: pointer to type atomic64_t | 193 | * @v: pointer to type atomic64_t |
| 116 | * | 194 | * |
| 117 | * Atomically increments @ptr by 1. | 195 | * Atomically increments @v by 1. |
| 118 | */ | 196 | */ |
| 119 | extern void atomic64_inc(atomic64_t *ptr); | 197 | static inline void atomic64_inc(atomic64_t *v) |
| 198 | { | ||
| 199 | asm volatile(ATOMIC64_ALTERNATIVE_(inc, inc_return) | ||
| 200 | : : "S" (v) | ||
| 201 | : "memory", "eax", "ecx", "edx" | ||
| 202 | ); | ||
| 203 | } | ||
| 120 | 204 | ||
| 121 | /** | 205 | /** |
| 122 | * atomic64_dec - decrement atomic64 variable | 206 | * atomic64_dec - decrement atomic64 variable |
| @@ -124,37 +208,97 @@ extern void atomic64_inc(atomic64_t *ptr); | |||
| 124 | * | 208 | * |
| 125 | * Atomically decrements @ptr by 1. | 209 | * Atomically decrements @ptr by 1. |
| 126 | */ | 210 | */ |
| 127 | extern void atomic64_dec(atomic64_t *ptr); | 211 | static inline void atomic64_dec(atomic64_t *v) |
| 212 | { | ||
| 213 | asm volatile(ATOMIC64_ALTERNATIVE_(dec, dec_return) | ||
| 214 | : : "S" (v) | ||
| 215 | : "memory", "eax", "ecx", "edx" | ||
| 216 | ); | ||
| 217 | } | ||
| 128 | 218 | ||
| 129 | /** | 219 | /** |
| 130 | * atomic64_dec_and_test - decrement and test | 220 | * atomic64_dec_and_test - decrement and test |
| 131 | * @ptr: pointer to type atomic64_t | 221 | * @v: pointer to type atomic64_t |
| 132 | * | 222 | * |
| 133 | * Atomically decrements @ptr by 1 and | 223 | * Atomically decrements @v by 1 and |
| 134 | * returns true if the result is 0, or false for all other | 224 | * returns true if the result is 0, or false for all other |
| 135 | * cases. | 225 | * cases. |
| 136 | */ | 226 | */ |
| 137 | extern int atomic64_dec_and_test(atomic64_t *ptr); | 227 | static inline int atomic64_dec_and_test(atomic64_t *v) |
| 228 | { | ||
| 229 | return atomic64_dec_return(v) == 0; | ||
| 230 | } | ||
| 138 | 231 | ||
| 139 | /** | 232 | /** |
| 140 | * atomic64_inc_and_test - increment and test | 233 | * atomic64_inc_and_test - increment and test |
| 141 | * @ptr: pointer to type atomic64_t | 234 | * @v: pointer to type atomic64_t |
| 142 | * | 235 | * |
| 143 | * Atomically increments @ptr by 1 | 236 | * Atomically increments @v by 1 |
| 144 | * and returns true if the result is zero, or false for all | 237 | * and returns true if the result is zero, or false for all |
| 145 | * other cases. | 238 | * other cases. |
| 146 | */ | 239 | */ |
| 147 | extern int atomic64_inc_and_test(atomic64_t *ptr); | 240 | static inline int atomic64_inc_and_test(atomic64_t *v) |
| 241 | { | ||
| 242 | return atomic64_inc_return(v) == 0; | ||
| 243 | } | ||
| 148 | 244 | ||
| 149 | /** | 245 | /** |
| 150 | * atomic64_add_negative - add and test if negative | 246 | * atomic64_add_negative - add and test if negative |
| 151 | * @delta: integer value to add | 247 | * @i: integer value to add |
| 152 | * @ptr: pointer to type atomic64_t | 248 | * @v: pointer to type atomic64_t |
| 153 | * | 249 | * |
| 154 | * Atomically adds @delta to @ptr and returns true | 250 | * Atomically adds @i to @v and returns true |
| 155 | * if the result is negative, or false when | 251 | * if the result is negative, or false when |
| 156 | * result is greater than or equal to zero. | 252 | * result is greater than or equal to zero. |
| 157 | */ | 253 | */ |
| 158 | extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); | 254 | static inline int atomic64_add_negative(long long i, atomic64_t *v) |
| 255 | { | ||
| 256 | return atomic64_add_return(i, v) < 0; | ||
| 257 | } | ||
| 258 | |||
| 259 | /** | ||
| 260 | * atomic64_add_unless - add unless the number is a given value | ||
| 261 | * @v: pointer of type atomic64_t | ||
| 262 | * @a: the amount to add to v... | ||
| 263 | * @u: ...unless v is equal to u. | ||
| 264 | * | ||
| 265 | * Atomically adds @a to @v, so long as it was not @u. | ||
| 266 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
| 267 | */ | ||
| 268 | static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) | ||
| 269 | { | ||
| 270 | unsigned low = (unsigned)u; | ||
| 271 | unsigned high = (unsigned)(u >> 32); | ||
| 272 | asm volatile(ATOMIC64_ALTERNATIVE(add_unless) "\n\t" | ||
| 273 | : "+A" (a), "+c" (v), "+S" (low), "+D" (high) | ||
| 274 | : : "memory"); | ||
| 275 | return (int)a; | ||
| 276 | } | ||
| 277 | |||
| 278 | |||
| 279 | static inline int atomic64_inc_not_zero(atomic64_t *v) | ||
| 280 | { | ||
| 281 | int r; | ||
| 282 | asm volatile(ATOMIC64_ALTERNATIVE(inc_not_zero) | ||
| 283 | : "=a" (r) | ||
| 284 | : "S" (v) | ||
| 285 | : "ecx", "edx", "memory" | ||
| 286 | ); | ||
| 287 | return r; | ||
| 288 | } | ||
| 289 | |||
| 290 | static inline long long atomic64_dec_if_positive(atomic64_t *v) | ||
| 291 | { | ||
| 292 | long long r; | ||
| 293 | asm volatile(ATOMIC64_ALTERNATIVE(dec_if_positive) | ||
| 294 | : "=A" (r) | ||
| 295 | : "S" (v) | ||
| 296 | : "ecx", "memory" | ||
| 297 | ); | ||
| 298 | return r; | ||
| 299 | } | ||
| 300 | |||
| 301 | #undef ATOMIC64_ALTERNATIVE | ||
| 302 | #undef ATOMIC64_ALTERNATIVE_ | ||
| 159 | 303 | ||
| 160 | #endif /* _ASM_X86_ATOMIC64_32_H */ | 304 | #endif /* _ASM_X86_ATOMIC64_32_H */ |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index cffd754f3039..05d686bbbe9f 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
| @@ -26,11 +26,12 @@ obj-y += msr.o msr-reg.o msr-reg-export.o | |||
| 26 | 26 | ||
| 27 | ifeq ($(CONFIG_X86_32),y) | 27 | ifeq ($(CONFIG_X86_32),y) |
| 28 | obj-y += atomic64_32.o | 28 | obj-y += atomic64_32.o |
| 29 | lib-y += atomic64_cx8_32.o | ||
| 29 | lib-y += checksum_32.o | 30 | lib-y += checksum_32.o |
| 30 | lib-y += strstr_32.o | 31 | lib-y += strstr_32.o |
| 31 | lib-y += semaphore_32.o string_32.o | 32 | lib-y += semaphore_32.o string_32.o |
| 32 | ifneq ($(CONFIG_X86_CMPXCHG64),y) | 33 | ifneq ($(CONFIG_X86_CMPXCHG64),y) |
| 33 | lib-y += cmpxchg8b_emu.o | 34 | lib-y += cmpxchg8b_emu.o atomic64_386_32.o |
| 34 | endif | 35 | endif |
| 35 | lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o | 36 | lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o |
| 36 | else | 37 | else |
diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c index 824fa0be55a3..540179e8e9fa 100644 --- a/arch/x86/lib/atomic64_32.c +++ b/arch/x86/lib/atomic64_32.c | |||
| @@ -6,225 +6,54 @@ | |||
| 6 | #include <asm/cmpxchg.h> | 6 | #include <asm/cmpxchg.h> |
| 7 | #include <asm/atomic.h> | 7 | #include <asm/atomic.h> |
| 8 | 8 | ||
| 9 | static noinline u64 cmpxchg8b(u64 *ptr, u64 old, u64 new) | 9 | long long atomic64_read_cx8(long long, const atomic64_t *v); |
| 10 | { | 10 | EXPORT_SYMBOL(atomic64_read_cx8); |
| 11 | u32 low = new; | 11 | long long atomic64_set_cx8(long long, const atomic64_t *v); |
| 12 | u32 high = new >> 32; | 12 | EXPORT_SYMBOL(atomic64_set_cx8); |
| 13 | 13 | long long atomic64_xchg_cx8(long long, unsigned high); | |
| 14 | asm volatile( | 14 | EXPORT_SYMBOL(atomic64_xchg_cx8); |
| 15 | LOCK_PREFIX "cmpxchg8b %1\n" | 15 | long long atomic64_add_return_cx8(long long a, atomic64_t *v); |
| 16 | : "+A" (old), "+m" (*ptr) | 16 | EXPORT_SYMBOL(atomic64_add_return_cx8); |
| 17 | : "b" (low), "c" (high) | 17 | long long atomic64_sub_return_cx8(long long a, atomic64_t *v); |
| 18 | ); | 18 | EXPORT_SYMBOL(atomic64_sub_return_cx8); |
| 19 | return old; | 19 | long long atomic64_inc_return_cx8(long long a, atomic64_t *v); |
| 20 | } | 20 | EXPORT_SYMBOL(atomic64_inc_return_cx8); |
| 21 | 21 | long long atomic64_dec_return_cx8(long long a, atomic64_t *v); | |
| 22 | u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val) | 22 | EXPORT_SYMBOL(atomic64_dec_return_cx8); |
| 23 | { | 23 | long long atomic64_dec_if_positive_cx8(atomic64_t *v); |
| 24 | return cmpxchg8b(&ptr->counter, old_val, new_val); | 24 | EXPORT_SYMBOL(atomic64_dec_if_positive_cx8); |
| 25 | } | 25 | int atomic64_inc_not_zero_cx8(atomic64_t *v); |
| 26 | EXPORT_SYMBOL(atomic64_cmpxchg); | 26 | EXPORT_SYMBOL(atomic64_inc_not_zero_cx8); |
| 27 | 27 | int atomic64_add_unless_cx8(atomic64_t *v, long long a, long long u); | |
| 28 | /** | 28 | EXPORT_SYMBOL(atomic64_add_unless_cx8); |
| 29 | * atomic64_xchg - xchg atomic64 variable | 29 | |
| 30 | * @ptr: pointer to type atomic64_t | 30 | #ifndef CONFIG_X86_CMPXCHG64 |
| 31 | * @new_val: value to assign | 31 | long long atomic64_read_386(long long, const atomic64_t *v); |
| 32 | * | 32 | EXPORT_SYMBOL(atomic64_read_386); |
| 33 | * Atomically xchgs the value of @ptr to @new_val and returns | 33 | long long atomic64_set_386(long long, const atomic64_t *v); |
| 34 | * the old value. | 34 | EXPORT_SYMBOL(atomic64_set_386); |
| 35 | */ | 35 | long long atomic64_xchg_386(long long, unsigned high); |
| 36 | u64 atomic64_xchg(atomic64_t *ptr, u64 new_val) | 36 | EXPORT_SYMBOL(atomic64_xchg_386); |
| 37 | { | 37 | long long atomic64_add_return_386(long long a, atomic64_t *v); |
| 38 | /* | 38 | EXPORT_SYMBOL(atomic64_add_return_386); |
| 39 | * Try first with a (possibly incorrect) assumption about | 39 | long long atomic64_sub_return_386(long long a, atomic64_t *v); |
| 40 | * what we have there. We'll do two loops most likely, | 40 | EXPORT_SYMBOL(atomic64_sub_return_386); |
| 41 | * but we'll get an ownership MESI transaction straight away | 41 | long long atomic64_inc_return_386(long long a, atomic64_t *v); |
| 42 | * instead of a read transaction followed by a | 42 | EXPORT_SYMBOL(atomic64_inc_return_386); |
| 43 | * flush-for-ownership transaction: | 43 | long long atomic64_dec_return_386(long long a, atomic64_t *v); |
| 44 | */ | 44 | EXPORT_SYMBOL(atomic64_dec_return_386); |
| 45 | u64 old_val, real_val = 0; | 45 | long long atomic64_add_386(long long a, atomic64_t *v); |
| 46 | 46 | EXPORT_SYMBOL(atomic64_add_386); | |
| 47 | do { | 47 | long long atomic64_sub_386(long long a, atomic64_t *v); |
| 48 | old_val = real_val; | 48 | EXPORT_SYMBOL(atomic64_sub_386); |
| 49 | 49 | long long atomic64_inc_386(long long a, atomic64_t *v); | |
| 50 | real_val = atomic64_cmpxchg(ptr, old_val, new_val); | 50 | EXPORT_SYMBOL(atomic64_inc_386); |
| 51 | 51 | long long atomic64_dec_386(long long a, atomic64_t *v); | |
| 52 | } while (real_val != old_val); | 52 | EXPORT_SYMBOL(atomic64_dec_386); |
| 53 | 53 | long long atomic64_dec_if_positive_386(atomic64_t *v); | |
| 54 | return old_val; | 54 | EXPORT_SYMBOL(atomic64_dec_if_positive_386); |
| 55 | } | 55 | int atomic64_inc_not_zero_386(atomic64_t *v); |
| 56 | EXPORT_SYMBOL(atomic64_xchg); | 56 | EXPORT_SYMBOL(atomic64_inc_not_zero_386); |
| 57 | 57 | int atomic64_add_unless_386(atomic64_t *v, long long a, long long u); | |
| 58 | /** | 58 | EXPORT_SYMBOL(atomic64_add_unless_386); |
| 59 | * atomic64_set - set atomic64 variable | 59 | #endif |
| 60 | * @ptr: pointer to type atomic64_t | ||
| 61 | * @new_val: value to assign | ||
| 62 | * | ||
| 63 | * Atomically sets the value of @ptr to @new_val. | ||
| 64 | */ | ||
| 65 | void atomic64_set(atomic64_t *ptr, u64 new_val) | ||
| 66 | { | ||
| 67 | atomic64_xchg(ptr, new_val); | ||
| 68 | } | ||
| 69 | EXPORT_SYMBOL(atomic64_set); | ||
| 70 | |||
| 71 | /** | ||
| 72 | EXPORT_SYMBOL(atomic64_read); | ||
| 73 | * atomic64_add_return - add and return | ||
| 74 | * @delta: integer value to add | ||
| 75 | * @ptr: pointer to type atomic64_t | ||
| 76 | * | ||
| 77 | * Atomically adds @delta to @ptr and returns @delta + *@ptr | ||
| 78 | */ | ||
| 79 | noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr) | ||
| 80 | { | ||
| 81 | /* | ||
| 82 | * Try first with a (possibly incorrect) assumption about | ||
| 83 | * what we have there. We'll do two loops most likely, | ||
| 84 | * but we'll get an ownership MESI transaction straight away | ||
| 85 | * instead of a read transaction followed by a | ||
| 86 | * flush-for-ownership transaction: | ||
| 87 | */ | ||
| 88 | u64 old_val, new_val, real_val = 0; | ||
| 89 | |||
| 90 | do { | ||
| 91 | old_val = real_val; | ||
| 92 | new_val = old_val + delta; | ||
| 93 | |||
| 94 | real_val = atomic64_cmpxchg(ptr, old_val, new_val); | ||
| 95 | |||
| 96 | } while (real_val != old_val); | ||
| 97 | |||
| 98 | return new_val; | ||
| 99 | } | ||
| 100 | EXPORT_SYMBOL(atomic64_add_return); | ||
| 101 | |||
| 102 | u64 atomic64_sub_return(u64 delta, atomic64_t *ptr) | ||
| 103 | { | ||
| 104 | return atomic64_add_return(-delta, ptr); | ||
| 105 | } | ||
| 106 | EXPORT_SYMBOL(atomic64_sub_return); | ||
| 107 | |||
| 108 | u64 atomic64_inc_return(atomic64_t *ptr) | ||
| 109 | { | ||
| 110 | return atomic64_add_return(1, ptr); | ||
| 111 | } | ||
| 112 | EXPORT_SYMBOL(atomic64_inc_return); | ||
| 113 | |||
| 114 | u64 atomic64_dec_return(atomic64_t *ptr) | ||
| 115 | { | ||
| 116 | return atomic64_sub_return(1, ptr); | ||
| 117 | } | ||
| 118 | EXPORT_SYMBOL(atomic64_dec_return); | ||
| 119 | |||
| 120 | /** | ||
| 121 | * atomic64_add - add integer to atomic64 variable | ||
| 122 | * @delta: integer value to add | ||
| 123 | * @ptr: pointer to type atomic64_t | ||
| 124 | * | ||
| 125 | * Atomically adds @delta to @ptr. | ||
| 126 | */ | ||
| 127 | void atomic64_add(u64 delta, atomic64_t *ptr) | ||
| 128 | { | ||
| 129 | atomic64_add_return(delta, ptr); | ||
| 130 | } | ||
| 131 | EXPORT_SYMBOL(atomic64_add); | ||
| 132 | |||
| 133 | /** | ||
| 134 | * atomic64_sub - subtract the atomic64 variable | ||
| 135 | * @delta: integer value to subtract | ||
| 136 | * @ptr: pointer to type atomic64_t | ||
| 137 | * | ||
| 138 | * Atomically subtracts @delta from @ptr. | ||
| 139 | */ | ||
| 140 | void atomic64_sub(u64 delta, atomic64_t *ptr) | ||
| 141 | { | ||
| 142 | atomic64_add(-delta, ptr); | ||
| 143 | } | ||
| 144 | EXPORT_SYMBOL(atomic64_sub); | ||
| 145 | |||
| 146 | /** | ||
| 147 | * atomic64_sub_and_test - subtract value from variable and test result | ||
| 148 | * @delta: integer value to subtract | ||
| 149 | * @ptr: pointer to type atomic64_t | ||
| 150 | * | ||
| 151 | * Atomically subtracts @delta from @ptr and returns | ||
| 152 | * true if the result is zero, or false for all | ||
| 153 | * other cases. | ||
| 154 | */ | ||
| 155 | int atomic64_sub_and_test(u64 delta, atomic64_t *ptr) | ||
| 156 | { | ||
| 157 | u64 new_val = atomic64_sub_return(delta, ptr); | ||
| 158 | |||
| 159 | return new_val == 0; | ||
| 160 | } | ||
| 161 | EXPORT_SYMBOL(atomic64_sub_and_test); | ||
| 162 | |||
| 163 | /** | ||
| 164 | * atomic64_inc - increment atomic64 variable | ||
| 165 | * @ptr: pointer to type atomic64_t | ||
| 166 | * | ||
| 167 | * Atomically increments @ptr by 1. | ||
| 168 | */ | ||
| 169 | void atomic64_inc(atomic64_t *ptr) | ||
| 170 | { | ||
| 171 | atomic64_add(1, ptr); | ||
| 172 | } | ||
| 173 | EXPORT_SYMBOL(atomic64_inc); | ||
| 174 | |||
| 175 | /** | ||
| 176 | * atomic64_dec - decrement atomic64 variable | ||
| 177 | * @ptr: pointer to type atomic64_t | ||
| 178 | * | ||
| 179 | * Atomically decrements @ptr by 1. | ||
| 180 | */ | ||
| 181 | void atomic64_dec(atomic64_t *ptr) | ||
| 182 | { | ||
| 183 | atomic64_sub(1, ptr); | ||
| 184 | } | ||
| 185 | EXPORT_SYMBOL(atomic64_dec); | ||
| 186 | |||
| 187 | /** | ||
| 188 | * atomic64_dec_and_test - decrement and test | ||
| 189 | * @ptr: pointer to type atomic64_t | ||
| 190 | * | ||
| 191 | * Atomically decrements @ptr by 1 and | ||
| 192 | * returns true if the result is 0, or false for all other | ||
| 193 | * cases. | ||
| 194 | */ | ||
| 195 | int atomic64_dec_and_test(atomic64_t *ptr) | ||
| 196 | { | ||
| 197 | return atomic64_sub_and_test(1, ptr); | ||
| 198 | } | ||
| 199 | EXPORT_SYMBOL(atomic64_dec_and_test); | ||
| 200 | |||
| 201 | /** | ||
| 202 | * atomic64_inc_and_test - increment and test | ||
| 203 | * @ptr: pointer to type atomic64_t | ||
| 204 | * | ||
| 205 | * Atomically increments @ptr by 1 | ||
| 206 | * and returns true if the result is zero, or false for all | ||
| 207 | * other cases. | ||
| 208 | */ | ||
| 209 | int atomic64_inc_and_test(atomic64_t *ptr) | ||
| 210 | { | ||
| 211 | return atomic64_sub_and_test(-1, ptr); | ||
| 212 | } | ||
| 213 | EXPORT_SYMBOL(atomic64_inc_and_test); | ||
| 214 | |||
| 215 | /** | ||
| 216 | * atomic64_add_negative - add and test if negative | ||
| 217 | * @delta: integer value to add | ||
| 218 | * @ptr: pointer to type atomic64_t | ||
| 219 | * | ||
| 220 | * Atomically adds @delta to @ptr and returns true | ||
| 221 | * if the result is negative, or false when | ||
| 222 | * result is greater than or equal to zero. | ||
| 223 | */ | ||
| 224 | int atomic64_add_negative(u64 delta, atomic64_t *ptr) | ||
| 225 | { | ||
| 226 | s64 new_val = atomic64_add_return(delta, ptr); | ||
| 227 | |||
| 228 | return new_val < 0; | ||
| 229 | } | ||
| 230 | EXPORT_SYMBOL(atomic64_add_negative); | ||
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S new file mode 100644 index 000000000000..5db07fe4a0ca --- /dev/null +++ b/arch/x86/lib/atomic64_386_32.S | |||
| @@ -0,0 +1,175 @@ | |||
| 1 | /* | ||
| 2 | * atomic64_t for 386/486 | ||
| 3 | * | ||
| 4 | * Copyright © 2010 Luca Barbieri | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/linkage.h> | ||
| 13 | #include <asm/alternative-asm.h> | ||
| 14 | #include <asm/dwarf2.h> | ||
| 15 | |||
| 16 | /* if you want SMP support, implement these with real spinlocks */ | ||
| 17 | .macro LOCK reg | ||
| 18 | pushfl | ||
| 19 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 20 | cli | ||
| 21 | .endm | ||
| 22 | |||
| 23 | .macro UNLOCK reg | ||
| 24 | popfl | ||
| 25 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 26 | .endm | ||
| 27 | |||
| 28 | .macro BEGIN func reg | ||
| 29 | $v = \reg | ||
| 30 | |||
| 31 | ENTRY(atomic64_\func\()_386) | ||
| 32 | CFI_STARTPROC | ||
| 33 | LOCK $v | ||
| 34 | |||
| 35 | .macro RETURN | ||
| 36 | UNLOCK $v | ||
| 37 | ret | ||
| 38 | .endm | ||
| 39 | |||
| 40 | .macro END_ | ||
| 41 | CFI_ENDPROC | ||
| 42 | ENDPROC(atomic64_\func\()_386) | ||
| 43 | .purgem RETURN | ||
| 44 | .purgem END_ | ||
| 45 | .purgem END | ||
| 46 | .endm | ||
| 47 | |||
| 48 | .macro END | ||
| 49 | RETURN | ||
| 50 | END_ | ||
| 51 | .endm | ||
| 52 | .endm | ||
| 53 | |||
| 54 | BEGIN read %ecx | ||
| 55 | movl ($v), %eax | ||
| 56 | movl 4($v), %edx | ||
| 57 | END | ||
| 58 | |||
| 59 | BEGIN set %esi | ||
| 60 | movl %ebx, ($v) | ||
| 61 | movl %ecx, 4($v) | ||
| 62 | END | ||
| 63 | |||
| 64 | BEGIN xchg %esi | ||
| 65 | movl ($v), %eax | ||
| 66 | movl 4($v), %edx | ||
| 67 | movl %ebx, ($v) | ||
| 68 | movl %ecx, 4($v) | ||
| 69 | END | ||
| 70 | |||
| 71 | BEGIN add %ecx | ||
| 72 | addl %eax, ($v) | ||
| 73 | adcl %edx, 4($v) | ||
| 74 | END | ||
| 75 | |||
| 76 | BEGIN add_return %ecx | ||
| 77 | addl ($v), %eax | ||
| 78 | adcl 4($v), %edx | ||
| 79 | movl %eax, ($v) | ||
| 80 | movl %edx, 4($v) | ||
| 81 | END | ||
| 82 | |||
| 83 | BEGIN sub %ecx | ||
| 84 | subl %eax, ($v) | ||
| 85 | sbbl %edx, 4($v) | ||
| 86 | END | ||
| 87 | |||
| 88 | BEGIN sub_return %ecx | ||
| 89 | negl %edx | ||
| 90 | negl %eax | ||
| 91 | sbbl $0, %edx | ||
| 92 | addl ($v), %eax | ||
| 93 | adcl 4($v), %edx | ||
| 94 | movl %eax, ($v) | ||
| 95 | movl %edx, 4($v) | ||
| 96 | END | ||
| 97 | |||
| 98 | BEGIN inc %esi | ||
| 99 | addl $1, ($v) | ||
| 100 | adcl $0, 4($v) | ||
| 101 | END | ||
| 102 | |||
| 103 | BEGIN inc_return %esi | ||
| 104 | movl ($v), %eax | ||
| 105 | movl 4($v), %edx | ||
| 106 | addl $1, %eax | ||
| 107 | adcl $0, %edx | ||
| 108 | movl %eax, ($v) | ||
| 109 | movl %edx, 4($v) | ||
| 110 | END | ||
| 111 | |||
| 112 | BEGIN dec %esi | ||
| 113 | subl $1, ($v) | ||
| 114 | sbbl $0, 4($v) | ||
| 115 | END | ||
| 116 | |||
| 117 | BEGIN dec_return %esi | ||
| 118 | movl ($v), %eax | ||
| 119 | movl 4($v), %edx | ||
| 120 | subl $1, %eax | ||
| 121 | sbbl $0, %edx | ||
| 122 | movl %eax, ($v) | ||
| 123 | movl %edx, 4($v) | ||
| 124 | END | ||
| 125 | |||
| 126 | BEGIN add_unless %ecx | ||
| 127 | addl %eax, %esi | ||
| 128 | adcl %edx, %edi | ||
| 129 | addl ($v), %eax | ||
| 130 | adcl 4($v), %edx | ||
| 131 | cmpl %eax, %esi | ||
| 132 | je 3f | ||
| 133 | 1: | ||
| 134 | movl %eax, ($v) | ||
| 135 | movl %edx, 4($v) | ||
| 136 | xorl %eax, %eax | ||
| 137 | 2: | ||
| 138 | RETURN | ||
| 139 | 3: | ||
| 140 | cmpl %edx, %edi | ||
| 141 | jne 1b | ||
| 142 | movl $1, %eax | ||
| 143 | jmp 2b | ||
| 144 | END_ | ||
| 145 | |||
| 146 | BEGIN inc_not_zero %esi | ||
| 147 | movl ($v), %eax | ||
| 148 | movl 4($v), %edx | ||
| 149 | testl %eax, %eax | ||
| 150 | je 3f | ||
| 151 | 1: | ||
| 152 | addl $1, %eax | ||
| 153 | adcl $0, %edx | ||
| 154 | movl %eax, ($v) | ||
| 155 | movl %edx, 4($v) | ||
| 156 | xorl %eax, %eax | ||
| 157 | 2: | ||
| 158 | RETURN | ||
| 159 | 3: | ||
| 160 | testl %edx, %edx | ||
| 161 | jne 1b | ||
| 162 | movl $1, %eax | ||
| 163 | jmp 2b | ||
| 164 | END_ | ||
| 165 | |||
| 166 | BEGIN dec_if_positive %esi | ||
| 167 | movl ($v), %eax | ||
| 168 | movl 4($v), %edx | ||
| 169 | subl $1, %eax | ||
| 170 | sbbl $0, %edx | ||
| 171 | js 1f | ||
| 172 | movl %eax, ($v) | ||
| 173 | movl %edx, 4($v) | ||
| 174 | 1: | ||
| 175 | END | ||
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S new file mode 100644 index 000000000000..e49c4ebca9f4 --- /dev/null +++ b/arch/x86/lib/atomic64_cx8_32.S | |||
| @@ -0,0 +1,225 @@ | |||
| 1 | /* | ||
| 2 | * atomic64_t for 586+ | ||
| 3 | * | ||
| 4 | * Copyright © 2010 Luca Barbieri | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/linkage.h> | ||
| 13 | #include <asm/alternative-asm.h> | ||
| 14 | #include <asm/dwarf2.h> | ||
| 15 | |||
| 16 | .macro SAVE reg | ||
| 17 | pushl %\reg | ||
| 18 | CFI_ADJUST_CFA_OFFSET 4 | ||
| 19 | CFI_REL_OFFSET \reg, 0 | ||
| 20 | .endm | ||
| 21 | |||
| 22 | .macro RESTORE reg | ||
| 23 | popl %\reg | ||
| 24 | CFI_ADJUST_CFA_OFFSET -4 | ||
| 25 | CFI_RESTORE \reg | ||
| 26 | .endm | ||
| 27 | |||
| 28 | .macro read64 reg | ||
| 29 | movl %ebx, %eax | ||
| 30 | movl %ecx, %edx | ||
| 31 | /* we need LOCK_PREFIX since otherwise cmpxchg8b always does the write */ | ||
| 32 | LOCK_PREFIX | ||
| 33 | cmpxchg8b (\reg) | ||
| 34 | .endm | ||
| 35 | |||
| 36 | ENTRY(atomic64_read_cx8) | ||
| 37 | CFI_STARTPROC | ||
| 38 | |||
| 39 | read64 %ecx | ||
| 40 | ret | ||
| 41 | CFI_ENDPROC | ||
| 42 | ENDPROC(atomic64_read_cx8) | ||
| 43 | |||
| 44 | ENTRY(atomic64_set_cx8) | ||
| 45 | CFI_STARTPROC | ||
| 46 | |||
| 47 | 1: | ||
| 48 | /* we don't need LOCK_PREFIX since aligned 64-bit writes | ||
| 49 | * are atomic on 586 and newer */ | ||
| 50 | cmpxchg8b (%esi) | ||
| 51 | jne 1b | ||
| 52 | |||
| 53 | ret | ||
| 54 | CFI_ENDPROC | ||
| 55 | ENDPROC(atomic64_set_cx8) | ||
| 56 | |||
| 57 | ENTRY(atomic64_xchg_cx8) | ||
| 58 | CFI_STARTPROC | ||
| 59 | |||
| 60 | movl %ebx, %eax | ||
| 61 | movl %ecx, %edx | ||
| 62 | 1: | ||
| 63 | LOCK_PREFIX | ||
| 64 | cmpxchg8b (%esi) | ||
| 65 | jne 1b | ||
| 66 | |||
| 67 | ret | ||
| 68 | CFI_ENDPROC | ||
| 69 | ENDPROC(atomic64_xchg_cx8) | ||
| 70 | |||
| 71 | .macro addsub_return func ins insc | ||
| 72 | ENTRY(atomic64_\func\()_return_cx8) | ||
| 73 | CFI_STARTPROC | ||
| 74 | SAVE ebp | ||
| 75 | SAVE ebx | ||
| 76 | SAVE esi | ||
| 77 | SAVE edi | ||
| 78 | |||
| 79 | movl %eax, %esi | ||
| 80 | movl %edx, %edi | ||
| 81 | movl %ecx, %ebp | ||
| 82 | |||
| 83 | read64 %ebp | ||
| 84 | 1: | ||
| 85 | movl %eax, %ebx | ||
| 86 | movl %edx, %ecx | ||
| 87 | \ins\()l %esi, %ebx | ||
| 88 | \insc\()l %edi, %ecx | ||
| 89 | LOCK_PREFIX | ||
| 90 | cmpxchg8b (%ebp) | ||
| 91 | jne 1b | ||
| 92 | |||
| 93 | 10: | ||
| 94 | movl %ebx, %eax | ||
| 95 | movl %ecx, %edx | ||
| 96 | RESTORE edi | ||
| 97 | RESTORE esi | ||
| 98 | RESTORE ebx | ||
| 99 | RESTORE ebp | ||
| 100 | ret | ||
| 101 | CFI_ENDPROC | ||
| 102 | ENDPROC(atomic64_\func\()_return_cx8) | ||
| 103 | .endm | ||
| 104 | |||
| 105 | addsub_return add add adc | ||
| 106 | addsub_return sub sub sbb | ||
| 107 | |||
| 108 | .macro incdec_return func ins insc | ||
| 109 | ENTRY(atomic64_\func\()_return_cx8) | ||
| 110 | CFI_STARTPROC | ||
| 111 | SAVE ebx | ||
| 112 | |||
| 113 | read64 %esi | ||
| 114 | 1: | ||
| 115 | movl %eax, %ebx | ||
| 116 | movl %edx, %ecx | ||
| 117 | \ins\()l $1, %ebx | ||
| 118 | \insc\()l $0, %ecx | ||
| 119 | LOCK_PREFIX | ||
| 120 | cmpxchg8b (%esi) | ||
| 121 | jne 1b | ||
| 122 | |||
| 123 | 10: | ||
| 124 | movl %ebx, %eax | ||
| 125 | movl %ecx, %edx | ||
| 126 | RESTORE ebx | ||
| 127 | ret | ||
| 128 | CFI_ENDPROC | ||
| 129 | ENDPROC(atomic64_\func\()_return_cx8) | ||
| 130 | .endm | ||
| 131 | |||
| 132 | incdec_return inc add adc | ||
| 133 | incdec_return dec sub sbb | ||
| 134 | |||
| 135 | ENTRY(atomic64_dec_if_positive_cx8) | ||
| 136 | CFI_STARTPROC | ||
| 137 | SAVE ebx | ||
| 138 | |||
| 139 | read64 %esi | ||
| 140 | 1: | ||
| 141 | movl %eax, %ebx | ||
| 142 | movl %edx, %ecx | ||
| 143 | subl $1, %ebx | ||
| 144 | sbb $0, %ecx | ||
| 145 | js 2f | ||
| 146 | LOCK_PREFIX | ||
| 147 | cmpxchg8b (%esi) | ||
| 148 | jne 1b | ||
| 149 | |||
| 150 | 2: | ||
| 151 | movl %ebx, %eax | ||
| 152 | movl %ecx, %edx | ||
| 153 | RESTORE ebx | ||
| 154 | ret | ||
| 155 | CFI_ENDPROC | ||
| 156 | ENDPROC(atomic64_dec_if_positive_cx8) | ||
| 157 | |||
| 158 | ENTRY(atomic64_add_unless_cx8) | ||
| 159 | CFI_STARTPROC | ||
| 160 | SAVE ebp | ||
| 161 | SAVE ebx | ||
| 162 | /* these just push these two parameters on the stack */ | ||
| 163 | SAVE edi | ||
| 164 | SAVE esi | ||
| 165 | |||
| 166 | movl %ecx, %ebp | ||
| 167 | movl %eax, %esi | ||
| 168 | movl %edx, %edi | ||
| 169 | |||
| 170 | read64 %ebp | ||
| 171 | 1: | ||
| 172 | cmpl %eax, 0(%esp) | ||
| 173 | je 4f | ||
| 174 | 2: | ||
| 175 | movl %eax, %ebx | ||
| 176 | movl %edx, %ecx | ||
| 177 | addl %esi, %ebx | ||
| 178 | adcl %edi, %ecx | ||
| 179 | LOCK_PREFIX | ||
| 180 | cmpxchg8b (%ebp) | ||
| 181 | jne 1b | ||
| 182 | |||
| 183 | xorl %eax, %eax | ||
| 184 | 3: | ||
| 185 | addl $8, %esp | ||
| 186 | CFI_ADJUST_CFA_OFFSET -8 | ||
| 187 | RESTORE ebx | ||
| 188 | RESTORE ebp | ||
| 189 | ret | ||
| 190 | 4: | ||
| 191 | cmpl %edx, 4(%esp) | ||
| 192 | jne 2b | ||
| 193 | movl $1, %eax | ||
| 194 | jmp 3b | ||
| 195 | CFI_ENDPROC | ||
| 196 | ENDPROC(atomic64_add_unless_cx8) | ||
| 197 | |||
| 198 | ENTRY(atomic64_inc_not_zero_cx8) | ||
| 199 | CFI_STARTPROC | ||
| 200 | SAVE ebx | ||
| 201 | |||
| 202 | read64 %esi | ||
| 203 | 1: | ||
| 204 | testl %eax, %eax | ||
| 205 | je 4f | ||
| 206 | 2: | ||
| 207 | movl %eax, %ebx | ||
| 208 | movl %edx, %ecx | ||
| 209 | addl $1, %ebx | ||
| 210 | adcl $0, %ecx | ||
| 211 | LOCK_PREFIX | ||
| 212 | cmpxchg8b (%esi) | ||
| 213 | jne 1b | ||
| 214 | |||
| 215 | xorl %eax, %eax | ||
| 216 | 3: | ||
| 217 | RESTORE ebx | ||
| 218 | ret | ||
| 219 | 4: | ||
| 220 | testl %edx, %edx | ||
| 221 | jne 2b | ||
| 222 | movl $1, %eax | ||
| 223 | jmp 3b | ||
| 224 | CFI_ENDPROC | ||
| 225 | ENDPROC(atomic64_inc_not_zero_cx8) | ||
