aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mutex.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-08-23 07:36:04 -0400
committerIngo Molnar <mingo@kernel.org>2016-10-25 05:31:50 -0400
commit3ca0ff571b092ee4d807f1168caa428d95b0173b (patch)
treebde73cbec0c0fb44da58291546da58830384fb14 /include/linux/mutex.h
parent3ab7c086d5ec72585ef0158dbc265cb03ddc682a (diff)
locking/mutex: Rework mutex::owner
The current mutex implementation has an atomic lock word and a non-atomic owner field. This disparity leads to a number of issues with the current mutex code as it means that we can have a locked mutex without an explicit owner (because the owner field has not been set, or already cleared). This leads to a number of weird corner cases, esp. between the optimistic spinning and debug code. Where the optimistic spinning code needs the owner field updated inside the lock region, the debug code is more relaxed because the whole lock is serialized by the wait_lock. Also, the spinning code itself has a few corner cases where we need to deal with a held lock without an owner field. Furthermore, it becomes even more of a problem when trying to fix starvation cases in the current code. We end up stacking special case on special case. To solve this rework the basic mutex implementation to be a single atomic word that contains the owner and uses the low bits for extra state. This matches how PI futexes and rt_mutex already work. By having the owner an integral part of the lock state a lot of the problems dissapear and we get a better option to deal with starvation cases, direct owner handoff. Changing the basic mutex does however invalidate all the arch specific mutex code; this patch leaves that unused in-place, a later patch will remove that. Tested-by: Jason Low <jason.low2@hpe.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Will Deacon <will.deacon@arm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/mutex.h')
-rw-r--r--include/linux/mutex.h46
1 files changed, 30 insertions, 16 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2cb7531e7d7a..4d3bccabbea5 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -18,6 +18,7 @@
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
20#include <linux/osq_lock.h> 20#include <linux/osq_lock.h>
21#include <linux/debug_locks.h>
21 22
22/* 23/*
23 * Simple, straightforward mutexes with strict semantics: 24 * Simple, straightforward mutexes with strict semantics:
@@ -48,16 +49,12 @@
48 * locks and tasks (and only those tasks) 49 * locks and tasks (and only those tasks)
49 */ 50 */
50struct mutex { 51struct mutex {
51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 52 atomic_long_t owner;
52 atomic_t count;
53 spinlock_t wait_lock; 53 spinlock_t wait_lock;
54 struct list_head wait_list;
55#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
56 struct task_struct *owner;
57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 54#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
59 struct optimistic_spin_queue osq; /* Spinner MCS lock */ 55 struct optimistic_spin_queue osq; /* Spinner MCS lock */
60#endif 56#endif
57 struct list_head wait_list;
61#ifdef CONFIG_DEBUG_MUTEXES 58#ifdef CONFIG_DEBUG_MUTEXES
62 void *magic; 59 void *magic;
63#endif 60#endif
@@ -66,6 +63,11 @@ struct mutex {
66#endif 63#endif
67}; 64};
68 65
66static inline struct task_struct *__mutex_owner(struct mutex *lock)
67{
68 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03);
69}
70
69/* 71/*
70 * This is the control structure for tasks blocked on mutex, 72 * This is the control structure for tasks blocked on mutex,
71 * which resides on the blocked task's kernel stack: 73 * which resides on the blocked task's kernel stack:
@@ -79,9 +81,20 @@ struct mutex_waiter {
79}; 81};
80 82
81#ifdef CONFIG_DEBUG_MUTEXES 83#ifdef CONFIG_DEBUG_MUTEXES
82# include <linux/mutex-debug.h> 84
85#define __DEBUG_MUTEX_INITIALIZER(lockname) \
86 , .magic = &lockname
87
88extern void mutex_destroy(struct mutex *lock);
89
83#else 90#else
91
84# define __DEBUG_MUTEX_INITIALIZER(lockname) 92# define __DEBUG_MUTEX_INITIALIZER(lockname)
93
94static inline void mutex_destroy(struct mutex *lock) {}
95
96#endif
97
85/** 98/**
86 * mutex_init - initialize the mutex 99 * mutex_init - initialize the mutex
87 * @mutex: the mutex to be initialized 100 * @mutex: the mutex to be initialized
@@ -90,14 +103,12 @@ struct mutex_waiter {
90 * 103 *
91 * It is not allowed to initialize an already locked mutex. 104 * It is not allowed to initialize an already locked mutex.
92 */ 105 */
93# define mutex_init(mutex) \ 106#define mutex_init(mutex) \
94do { \ 107do { \
95 static struct lock_class_key __key; \ 108 static struct lock_class_key __key; \
96 \ 109 \
97 __mutex_init((mutex), #mutex, &__key); \ 110 __mutex_init((mutex), #mutex, &__key); \
98} while (0) 111} while (0)
99static inline void mutex_destroy(struct mutex *lock) {}
100#endif
101 112
102#ifdef CONFIG_DEBUG_LOCK_ALLOC 113#ifdef CONFIG_DEBUG_LOCK_ALLOC
103# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ 114# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
@@ -107,7 +118,7 @@ static inline void mutex_destroy(struct mutex *lock) {}
107#endif 118#endif
108 119
109#define __MUTEX_INITIALIZER(lockname) \ 120#define __MUTEX_INITIALIZER(lockname) \
110 { .count = ATOMIC_INIT(1) \ 121 { .owner = ATOMIC_LONG_INIT(0) \
111 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ 122 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
112 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ 123 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
113 __DEBUG_MUTEX_INITIALIZER(lockname) \ 124 __DEBUG_MUTEX_INITIALIZER(lockname) \
@@ -127,7 +138,10 @@ extern void __mutex_init(struct mutex *lock, const char *name,
127 */ 138 */
128static inline int mutex_is_locked(struct mutex *lock) 139static inline int mutex_is_locked(struct mutex *lock)
129{ 140{
130 return atomic_read(&lock->count) != 1; 141 /*
142 * XXX think about spin_is_locked
143 */
144 return __mutex_owner(lock) != NULL;
131} 145}
132 146
133/* 147/*