aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-06-10 09:08:55 -0400
committerIngo Molnar <mingo@elte.hu>2011-07-01 04:39:07 -0400
commit307bf9803f25a8a3f53c1012110fb74e2f893eb0 (patch)
tree72146cbf22ea0e7d36a0734cca9d46291c40ca89 /kernel
parent2a46dae38087e62dd5fb08a6dadf1407717ed13c (diff)
sched: Simplify mutex_spin_on_owner()
It does not make sense to rcu_read_lock/unlock() in every loop iteration while spinning on the mutex. Move the rcu protection outside the loop. Also simplify the return path to always check for lock->owner == NULL which meets the requirements of both owner changed and need_resched() caused loop exits. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1106101458350.11814@ionos Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c25
1 files changed, 9 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 59252754fbe..e355ee72e83 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4306,11 +4306,8 @@ EXPORT_SYMBOL(schedule);
4306 4306
4307static inline bool owner_running(struct mutex *lock, struct task_struct *owner) 4307static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
4308{ 4308{
4309 bool ret = false;
4310
4311 rcu_read_lock();
4312 if (lock->owner != owner) 4309 if (lock->owner != owner)
4313 goto fail; 4310 return false;
4314 4311
4315 /* 4312 /*
4316 * Ensure we emit the owner->on_cpu, dereference _after_ checking 4313 * Ensure we emit the owner->on_cpu, dereference _after_ checking
@@ -4320,11 +4317,7 @@ static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
4320 */ 4317 */
4321 barrier(); 4318 barrier();
4322 4319
4323 ret = owner->on_cpu; 4320 return owner->on_cpu;
4324fail:
4325 rcu_read_unlock();
4326
4327 return ret;
4328} 4321}
4329 4322
4330/* 4323/*
@@ -4336,21 +4329,21 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
4336 if (!sched_feat(OWNER_SPIN)) 4329 if (!sched_feat(OWNER_SPIN))
4337 return 0; 4330 return 0;
4338 4331
4332 rcu_read_lock();
4339 while (owner_running(lock, owner)) { 4333 while (owner_running(lock, owner)) {
4340 if (need_resched()) 4334 if (need_resched())
4341 return 0; 4335 break;
4342 4336
4343 arch_mutex_cpu_relax(); 4337 arch_mutex_cpu_relax();
4344 } 4338 }
4339 rcu_read_unlock();
4345 4340
4346 /* 4341 /*
4347 * If the owner changed to another task there is likely 4342 * We break out the loop above on need_resched() and when the
4348 * heavy contention, stop spinning. 4343 * owner changed, which is a sign for heavy contention. Return
4344 * success only when lock->owner is NULL.
4349 */ 4345 */
4350 if (lock->owner) 4346 return lock->owner == NULL;
4351 return 0;
4352
4353 return 1;
4354} 4347}
4355#endif 4348#endif
4356 4349