aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sys.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-01-23 04:45:50 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-01-23 14:08:03 -0500
commit1b5180b65122666a36a1a232b7b9b38b21a9dcdd (patch)
treea61b7cea4cd410755bd5f87ea886554ad1d05676 /kernel/sys.c
parentb53d0b919ebe227e2b8e49b6e0ddee506be83aa8 (diff)
[PATCH] notifiers: fix blocking_notifier_call_chain() scalability
while lock-profiling the -rt kernel i noticed weird contention during mmap-intense workloads, and the tracer showed the following gem, in one of our MM hotpaths: threaded-2771 1.... 65us : sys_munmap (sysenter_do_call) threaded-2771 1.... 66us : profile_munmap (sys_munmap) threaded-2771 1.... 66us : blocking_notifier_call_chain (profile_munmap) threaded-2771 1.... 66us : rt_down_read (blocking_notifier_call_chain) ouch! a global rw-semaphore taken in one of the most performance- sensitive codepaths of the kernel. And i dont even have oprofile enabled! All distro kernels have CONFIG_PROFILING enabled, so this scalability problem affects the majority of Linux users. The fix is to enhance blocking_notifier_call_chain() to only take the lock if there appears to be work on the call-chain. With this patch applied i get nicely saturated system, and much higher munmap performance, on SMP systems. And as a bonus this also fixes a similar scalability bottleneck in the thread-exit codepath: profile_task_exit() ... Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/sys.c')
-rw-r--r--kernel/sys.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/kernel/sys.c b/kernel/sys.c
index c7675c1bfdf2..6e2101dec0fc 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -323,11 +323,18 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
323int blocking_notifier_call_chain(struct blocking_notifier_head *nh, 323int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
324 unsigned long val, void *v) 324 unsigned long val, void *v)
325{ 325{
326 int ret; 326 int ret = NOTIFY_DONE;
327 327
328 down_read(&nh->rwsem); 328 /*
329 ret = notifier_call_chain(&nh->head, val, v); 329 * We check the head outside the lock, but if this access is
330 up_read(&nh->rwsem); 330 * racy then it does not matter what the result of the test
331 * is, we re-check the list after having taken the lock anyway:
332 */
333 if (rcu_dereference(nh->head)) {
334 down_read(&nh->rwsem);
335 ret = notifier_call_chain(&nh->head, val, v);
336 up_read(&nh->rwsem);
337 }
331 return ret; 338 return ret;
332} 339}
333 340