aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavidlohr Bueso <davidlohr@hp.com>2014-06-04 19:06:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:53:57 -0400
commit6b4ebc3a9078c5b7b8c4cf495a0b1d2d0e0bfe7a (patch)
treeb72fad03149fb8e21284558b636bb2a8faa88cb6
parent4f115147ff802267d0aa41e361c5aa5bd933d896 (diff)
mm,vmacache: optimize overflow system-wide flushing
For single threaded workloads, we can avoid flushing and iterating through the entire list of tasks, making the whole function a lot faster, requiring only a single atomic read for the mm_users. Signed-off-by: Davidlohr Bueso <davidlohr@hp.com> Suggested-by: Oleg Nesterov <oleg@redhat.com> Cc: Aswin Chandramouleeswaran <aswin@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/vmacache.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/mm/vmacache.c b/mm/vmacache.c
index 658ed3b3e38d..9f25af825dec 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -17,6 +17,16 @@ void vmacache_flush_all(struct mm_struct *mm)
17{ 17{
18 struct task_struct *g, *p; 18 struct task_struct *g, *p;
19 19
20 /*
21 * Single threaded tasks need not iterate the entire
22 * list of process. We can avoid the flushing as well
23 * since the mm's seqnum was increased and don't have
24 * to worry about other threads' seqnum. Current's
25 * flush will occur upon the next lookup.
26 */
27 if (atomic_read(&mm->mm_users) == 1)
28 return;
29
20 rcu_read_lock(); 30 rcu_read_lock();
21 for_each_process_thread(g, p) { 31 for_each_process_thread(g, p) {
22 /* 32 /*