aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2013-10-07 06:29:34 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 08:48:12 -0400
commitdabe1d992414a6456e60e41f1d1ad8affc6d444d (patch)
tree213b65fbc7864687d0ea9db96d7e768b6ff587cf /kernel/sched/fair.c
parent0ec8aa00f2b4dc457836ef4e2662b02483e94fb7 (diff)
sched/numa: Be more careful about joining numa groups
Due to the way the pid is truncated, and tasks are moved between CPUs by the scheduler, it is possible for the current task_numa_fault to group together tasks that do not actually share memory together. This patch adds a few easy sanity checks to task_numa_fault, joining tasks together if they share the same tsk->mm, or if the fault was on a page with an elevated mapcount, in a shared VMA. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-57-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5166b9b1af70..222c2d0b6ae2 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1381,7 +1381,7 @@ static void double_lock(spinlock_t *l1, spinlock_t *l2)
1381 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1381 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1382} 1382}
1383 1383
1384static void task_numa_group(struct task_struct *p, int cpupid) 1384static void task_numa_group(struct task_struct *p, int cpupid, int flags)
1385{ 1385{
1386 struct numa_group *grp, *my_grp; 1386 struct numa_group *grp, *my_grp;
1387 struct task_struct *tsk; 1387 struct task_struct *tsk;
@@ -1439,10 +1439,16 @@ static void task_numa_group(struct task_struct *p, int cpupid)
1439 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) 1439 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
1440 goto unlock; 1440 goto unlock;
1441 1441
1442 if (!get_numa_group(grp)) 1442 /* Always join threads in the same process. */
1443 goto unlock; 1443 if (tsk->mm == current->mm)
1444 join = true;
1445
1446 /* Simple filter to avoid false positives due to PID collisions */
1447 if (flags & TNF_SHARED)
1448 join = true;
1444 1449
1445 join = true; 1450 if (join && !get_numa_group(grp))
1451 join = false;
1446 1452
1447unlock: 1453unlock:
1448 rcu_read_unlock(); 1454 rcu_read_unlock();
@@ -1539,7 +1545,7 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
1539 } else { 1545 } else {
1540 priv = cpupid_match_pid(p, last_cpupid); 1546 priv = cpupid_match_pid(p, last_cpupid);
1541 if (!priv && !(flags & TNF_NO_GROUP)) 1547 if (!priv && !(flags & TNF_NO_GROUP))
1542 task_numa_group(p, last_cpupid); 1548 task_numa_group(p, last_cpupid, flags);
1543 } 1549 }
1544 1550
1545 /* 1551 /*