aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2014-01-27 17:03:40 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-28 07:17:04 -0500
commit52bf84aa206cd2c2516dfa3e03b578edf8a3242f (patch)
treee8acbb2c3ce90b7aed27046c7efc5a082f6ef684 /kernel/sched/fair.c
parenta57beec5d427086cdc8d75fd51164577193fa7f4 (diff)
sched/numa, mm: Remove p->numa_migrate_deferred
Excessive migration of pages can hurt the performance of workloads that span multiple NUMA nodes. However, it turns out that the p->numa_migrate_deferred knob is a really big hammer, which does reduce migration rates, but does not actually help performance. Now that the second stage of the automatic numa balancing code has stabilized, it is time to replace the simplistic migration deferral code with something smarter. Signed-off-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Chegu Vinod <chegu_vinod@hp.com> Link: http://lkml.kernel.org/r/1390860228-21539-2-git-send-email-riel@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c8
1 files changed, 0 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index efe6457ac5c8..7cdde913b4dc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -819,14 +819,6 @@ unsigned int sysctl_numa_balancing_scan_size = 256;
819/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ 819/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
820unsigned int sysctl_numa_balancing_scan_delay = 1000; 820unsigned int sysctl_numa_balancing_scan_delay = 1000;
821 821
822/*
823 * After skipping a page migration on a shared page, skip N more numa page
824 * migrations unconditionally. This reduces the number of NUMA migrations
825 * in shared memory workloads, and has the effect of pulling tasks towards
826 * where their memory lives, over pulling the memory towards the task.
827 */
828unsigned int sysctl_numa_balancing_migrate_deferred = 16;
829
830static unsigned int task_nr_scan_windows(struct task_struct *p) 822static unsigned int task_nr_scan_windows(struct task_struct *p)
831{ 823{
832 unsigned long rss = 0; 824 unsigned long rss = 0;