diff options
Diffstat (limited to 'include/asm-generic/mutex-xchg.h')
-rw-r--r-- | include/asm-generic/mutex-xchg.h | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h index f169ec064785..a6b4a7bd6ac9 100644 --- a/include/asm-generic/mutex-xchg.h +++ b/include/asm-generic/mutex-xchg.h | |||
@@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | |||
31 | * to ensure that any waiting tasks are woken up by the | 31 | * to ensure that any waiting tasks are woken up by the |
32 | * unlock slow path. | 32 | * unlock slow path. |
33 | */ | 33 | */ |
34 | if (likely(atomic_xchg(count, -1) != 1)) | 34 | if (likely(atomic_xchg_acquire(count, -1) != 1)) |
35 | fail_fn(count); | 35 | fail_fn(count); |
36 | } | 36 | } |
37 | 37 | ||
@@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | |||
46 | static inline int | 46 | static inline int |
47 | __mutex_fastpath_lock_retval(atomic_t *count) | 47 | __mutex_fastpath_lock_retval(atomic_t *count) |
48 | { | 48 | { |
49 | if (unlikely(atomic_xchg(count, 0) != 1)) | 49 | if (unlikely(atomic_xchg_acquire(count, 0) != 1)) |
50 | if (likely(atomic_xchg(count, -1) != 1)) | 50 | if (likely(atomic_xchg(count, -1) != 1)) |
51 | return -1; | 51 | return -1; |
52 | return 0; | 52 | return 0; |
@@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count) | |||
67 | static inline void | 67 | static inline void |
68 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 68 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
69 | { | 69 | { |
70 | if (unlikely(atomic_xchg(count, 1) != 0)) | 70 | if (unlikely(atomic_xchg_release(count, 1) != 0)) |
71 | fail_fn(count); | 71 | fail_fn(count); |
72 | } | 72 | } |
73 | 73 | ||
@@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | |||
91 | static inline int | 91 | static inline int |
92 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | 92 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) |
93 | { | 93 | { |
94 | int prev = atomic_xchg(count, 0); | 94 | int prev = atomic_xchg_acquire(count, 0); |
95 | 95 | ||
96 | if (unlikely(prev < 0)) { | 96 | if (unlikely(prev < 0)) { |
97 | /* | 97 | /* |
@@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | |||
105 | * owner's unlock path needlessly, but that's not a problem | 105 | * owner's unlock path needlessly, but that's not a problem |
106 | * in practice. ] | 106 | * in practice. ] |
107 | */ | 107 | */ |
108 | prev = atomic_xchg(count, prev); | 108 | prev = atomic_xchg_acquire(count, prev); |
109 | if (prev < 0) | 109 | if (prev < 0) |
110 | prev = 0; | 110 | prev = 0; |
111 | } | 111 | } |