aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2015-01-30 04:14:27 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-18 10:57:18 -0500
commit1a99367023f6ac664365a37fa508b059e31d0e88 (patch)
tree9d066f2915a65fa9180fd7c0d69b5c3991eb1d71
parentb3fd4f03ca0b9952221f39ae6790e698bf4b39e7 (diff)
locking/rwsem: Check for active lock before bailing on spinning
37e9562453b ("locking/rwsem: Allow conservative optimistic spinning when readers have lock") forced the default for optimistic spinning to be disabled if the lock owner was nil, which makes much sense for readers. However, while it is not our priority, we can make some optimizations for write-mostly workloads. We can bail the spinning step and still be conservative if there are any active tasks, otherwise there's really no reason not to spin, as the semaphore is most likely unlocked. This patch recovers most of a Unixbench 'execl' benchmark throughput by sleeping less and making better average system usage: before: CPU %user %nice %system %iowait %steal %idle all 0.60 0.00 8.02 0.00 0.00 91.38 after: CPU %user %nice %system %iowait %steal %idle all 1.22 0.00 70.18 0.00 0.00 28.60 Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Jason Low <jason.low2@hp.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michel Lespinasse <walken@google.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Tim Chen <tim.c.chen@linux.intel.com> Link: http://lkml.kernel.org/r/1422609267-15102-6-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/locking/rwsem-xadd.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 1c0d11e8ce34..e4ad019e23f5 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -298,23 +298,30 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
298static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) 298static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
299{ 299{
300 struct task_struct *owner; 300 struct task_struct *owner;
301 bool on_cpu = false; 301 bool ret = true;
302 302
303 if (need_resched()) 303 if (need_resched())
304 return false; 304 return false;
305 305
306 rcu_read_lock(); 306 rcu_read_lock();
307 owner = ACCESS_ONCE(sem->owner); 307 owner = ACCESS_ONCE(sem->owner);
308 if (owner) 308 if (!owner) {
309 on_cpu = owner->on_cpu; 309 long count = ACCESS_ONCE(sem->count);
310 rcu_read_unlock(); 310 /*
311 * If sem->owner is not set, yet we have just recently entered the
312 * slowpath with the lock being active, then there is a possibility
313 * reader(s) may have the lock. To be safe, bail spinning in these
314 * situations.
315 */
316 if (count & RWSEM_ACTIVE_MASK)
317 ret = false;
318 goto done;
319 }
311 320
312 /* 321 ret = owner->on_cpu;
313 * If sem->owner is not set, yet we have just recently entered the 322done:
314 * slowpath, then there is a possibility reader(s) may have the lock. 323 rcu_read_unlock();
315 * To be safe, avoid spinning in these situations. 324 return ret;
316 */
317 return on_cpu;
318} 325}
319 326
320static inline bool owner_running(struct rw_semaphore *sem, 327static inline bool owner_running(struct rw_semaphore *sem,