diff options
author | Waiman Long <Waiman.Long@hp.com> | 2013-04-17 15:23:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-04-19 03:33:34 -0400 |
commit | 41fcb9f230bf773656d1768b73000ef720bf00c3 (patch) | |
tree | dd87697f88e28825668ed4c342824929b4d7070c /kernel/mutex.c | |
parent | 8184004ed7a0bc9538f5e825615c29fc52466bab (diff) |
mutex: Move mutex spinning code from sched/core.c back to mutex.c
As mentioned by Ingo, the SCHED_FEAT_OWNER_SPIN scheduler
feature bit was really just an early hack to make with/without
mutex-spinning testable. So it is no longer necessary.
This patch removes the SCHED_FEAT_OWNER_SPIN feature bit and
move the mutex spinning code from kernel/sched/core.c back to
kernel/mutex.c which is where they should belong.
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Chandramouleeswaran Aswin <aswin@hp.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Cc: Norton Scott J <scott.norton@hp.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Dave Jones <davej@redhat.com>
Cc: Clark Williams <williams@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1366226594-5506-2-git-send-email-Waiman.Long@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 46 |
1 files changed, 46 insertions, 0 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 52f23011b6e0..262d7177adad 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -95,6 +95,52 @@ void __sched mutex_lock(struct mutex *lock) | |||
95 | EXPORT_SYMBOL(mutex_lock); | 95 | EXPORT_SYMBOL(mutex_lock); |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | ||
99 | /* | ||
100 | * Mutex spinning code migrated from kernel/sched/core.c | ||
101 | */ | ||
102 | |||
103 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | ||
104 | { | ||
105 | if (lock->owner != owner) | ||
106 | return false; | ||
107 | |||
108 | /* | ||
109 | * Ensure we emit the owner->on_cpu, dereference _after_ checking | ||
110 | * lock->owner still matches owner, if that fails, owner might | ||
111 | * point to free()d memory, if it still matches, the rcu_read_lock() | ||
112 | * ensures the memory stays valid. | ||
113 | */ | ||
114 | barrier(); | ||
115 | |||
116 | return owner->on_cpu; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Look out! "owner" is an entirely speculative pointer | ||
121 | * access and not reliable. | ||
122 | */ | ||
123 | static noinline | ||
124 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) | ||
125 | { | ||
126 | rcu_read_lock(); | ||
127 | while (owner_running(lock, owner)) { | ||
128 | if (need_resched()) | ||
129 | break; | ||
130 | |||
131 | arch_mutex_cpu_relax(); | ||
132 | } | ||
133 | rcu_read_unlock(); | ||
134 | |||
135 | /* | ||
136 | * We break out the loop above on need_resched() and when the | ||
137 | * owner changed, which is a sign for heavy contention. Return | ||
138 | * success only when lock->owner is NULL. | ||
139 | */ | ||
140 | return lock->owner == NULL; | ||
141 | } | ||
142 | #endif | ||
143 | |||
98 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); | 144 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
99 | 145 | ||
100 | /** | 146 | /** |