aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorJason Low <jason.low2@hp.com>2015-02-02 16:59:27 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-18 10:57:08 -0500
commitbe1f7bf217ebb1e42190d7d0b332c89ea7871378 (patch)
tree978a3b490dcb984851fe82e92a97fbadb00aad65 /kernel/locking
parent07d2413a61db6500f58e614e873eed79d7f2ed72 (diff)
locking/mutex: Refactor mutex_spin_on_owner()
As suggested by Davidlohr, we could refactor mutex_spin_on_owner(). Currently, we split up owner_running() with mutex_spin_on_owner(). When the owner changes, we make duplicate owner checks which are not necessary. It also makes the code a bit obscure as we are using a second check to figure out why we broke out of the loop. This patch modifies it such that we remove the owner_running() function and the mutex_spin_on_owner() loop directly checks for if the owner changes, if the owner is not running, or if we need to reschedule. If the owner changes, we break out of the loop and return true. If the owner is not running or if we need to reschedule, then break out of the loop and return false. Suggested-by: Davidlohr Bueso <dave@stgolabs.net> Signed-off-by: Jason Low <jason.low2@hp.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Aswin Chandramouleeswaran <aswin@hp.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: chegu_vinod@hp.com Cc: tglx@linutronix.de Link: http://lkml.kernel.org/r/1422914367-5574-3-git-send-email-jason.low2@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/mutex.c47
1 files changed, 22 insertions, 25 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 49cce442f3ff..59cd6c30421e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -217,44 +217,41 @@ ww_mutex_set_context_slowpath(struct ww_mutex *lock,
217} 217}
218 218
219#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 219#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
220static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
221{
222 if (lock->owner != owner)
223 return false;
224
225 /*
226 * Ensure we emit the owner->on_cpu, dereference _after_ checking
227 * lock->owner still matches owner, if that fails, owner might
228 * point to free()d memory, if it still matches, the rcu_read_lock()
229 * ensures the memory stays valid.
230 */
231 barrier();
232
233 return owner->on_cpu;
234}
235
236/* 220/*
237 * Look out! "owner" is an entirely speculative pointer 221 * Look out! "owner" is an entirely speculative pointer
238 * access and not reliable. 222 * access and not reliable.
239 */ 223 */
240static noinline 224static noinline
241int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) 225bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
242{ 226{
227 bool ret;
228
243 rcu_read_lock(); 229 rcu_read_lock();
244 while (owner_running(lock, owner)) { 230 while (true) {
245 if (need_resched()) 231 /* Return success when the lock owner changed */
232 if (lock->owner != owner) {
233 ret = true;
246 break; 234 break;
235 }
236
237 /*
238 * Ensure we emit the owner->on_cpu, dereference _after_
239 * checking lock->owner still matches owner, if that fails,
240 * owner might point to free()d memory, if it still matches,
241 * the rcu_read_lock() ensures the memory stays valid.
242 */
243 barrier();
244
245 if (!owner->on_cpu || need_resched()) {
246 ret = false;
247 break;
248 }
247 249
248 cpu_relax_lowlatency(); 250 cpu_relax_lowlatency();
249 } 251 }
250 rcu_read_unlock(); 252 rcu_read_unlock();
251 253
252 /* 254 return ret;
253 * We break out of the loop above on either need_resched(), when
254 * the owner is not running, or when the lock owner changed.
255 * Return success only when the lock owner changed.
256 */
257 return lock->owner != owner;
258} 255}
259 256
260/* 257/*