aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/mutex.h10
-rw-r--r--kernel/mutex.c19
2 files changed, 26 insertions, 3 deletions
diff --git a/arch/x86/include/asm/mutex.h b/arch/x86/include/asm/mutex.h
index 7d3a48275394..bc2a0b0dcea6 100644
--- a/arch/x86/include/asm/mutex.h
+++ b/arch/x86/include/asm/mutex.h
@@ -3,3 +3,13 @@
3#else 3#else
4# include <asm/mutex_64.h> 4# include <asm/mutex_64.h>
5#endif 5#endif
6
7#ifndef __ASM_MUTEX_H
8#define __ASM_MUTEX_H
9/*
10 * For the x86 architecture, it allows any negative number (besides -1) in
11 * the mutex count to indicate that some other threads are waiting on the
12 * mutex.
13 */
14#define __ARCH_ALLOW_ANY_NEGATIVE_MUTEX_COUNT 1
15#endif
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 262d7177adad..70ebd855d9e8 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -37,6 +37,17 @@
37# include <asm/mutex.h> 37# include <asm/mutex.h>
38#endif 38#endif
39 39
40/*
41 * A mutex count of -1 indicates that waiters are sleeping waiting for the
42 * mutex. Some architectures can allow any negative number, not just -1, for
43 * this purpose.
44 */
45#ifdef __ARCH_ALLOW_ANY_NEGATIVE_MUTEX_COUNT
46#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
47#else
48#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) != -1)
49#endif
50
40void 51void
41__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 52__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
42{ 53{
@@ -217,7 +228,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
217 if (owner && !mutex_spin_on_owner(lock, owner)) 228 if (owner && !mutex_spin_on_owner(lock, owner))
218 break; 229 break;
219 230
220 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { 231 if ((atomic_read(&lock->count) == 1) &&
232 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
221 lock_acquired(&lock->dep_map, ip); 233 lock_acquired(&lock->dep_map, ip);
222 mutex_set_owner(lock); 234 mutex_set_owner(lock);
223 preempt_enable(); 235 preempt_enable();
@@ -251,7 +263,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
251 list_add_tail(&waiter.list, &lock->wait_list); 263 list_add_tail(&waiter.list, &lock->wait_list);
252 waiter.task = task; 264 waiter.task = task;
253 265
254 if (atomic_xchg(&lock->count, -1) == 1) 266 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
255 goto done; 267 goto done;
256 268
257 lock_contended(&lock->dep_map, ip); 269 lock_contended(&lock->dep_map, ip);
@@ -266,7 +278,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
266 * that when we release the lock, we properly wake up the 278 * that when we release the lock, we properly wake up the
267 * other waiters: 279 * other waiters:
268 */ 280 */
269 if (atomic_xchg(&lock->count, -1) == 1) 281 if (MUTEX_SHOW_NO_WAITER(lock) &&
282 (atomic_xchg(&lock->count, -1) == 1))
270 break; 283 break;
271 284
272 /* 285 /*