aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-10-07 06:29:37 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 08:48:18 -0400
commit930aa174fcc8b0efaad102fd80f677b92f35eaa2 (patch)
tree7746bbcf350f3ce305d9e55435f7a5e3c41b9c8e /kernel/sched
parent04bb2f9475054298f0c67a89ca92cade42d3fe5e (diff)
sched/numa: Remove the numa_balancing_scan_period_reset sysctl
With scan rate adaptions based on whether the workload has properly converged or not there should be no need for the scan period reset hammer. Get rid of it. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-60-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c18
2 files changed, 1 insertions, 18 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8cfd51f62241..89c5ae836f66 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1721,7 +1721,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
1721#ifdef CONFIG_NUMA_BALANCING 1721#ifdef CONFIG_NUMA_BALANCING
1722 if (p->mm && atomic_read(&p->mm->mm_users) == 1) { 1722 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
1723 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 1723 p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1724 p->mm->numa_next_reset = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1725 p->mm->numa_scan_seq = 0; 1724 p->mm->numa_scan_seq = 0;
1726 } 1725 }
1727 1726
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 66237ff8b01e..da6fa22be000 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -826,7 +826,6 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
826 */ 826 */
827unsigned int sysctl_numa_balancing_scan_period_min = 1000; 827unsigned int sysctl_numa_balancing_scan_period_min = 1000;
828unsigned int sysctl_numa_balancing_scan_period_max = 60000; 828unsigned int sysctl_numa_balancing_scan_period_max = 60000;
829unsigned int sysctl_numa_balancing_scan_period_reset = 60000;
830 829
831/* Portion of address space to scan in MB */ 830/* Portion of address space to scan in MB */
832unsigned int sysctl_numa_balancing_scan_size = 256; 831unsigned int sysctl_numa_balancing_scan_size = 256;
@@ -1685,24 +1684,9 @@ void task_numa_work(struct callback_head *work)
1685 if (p->flags & PF_EXITING) 1684 if (p->flags & PF_EXITING)
1686 return; 1685 return;
1687 1686
1688 if (!mm->numa_next_reset || !mm->numa_next_scan) { 1687 if (!mm->numa_next_scan) {
1689 mm->numa_next_scan = now + 1688 mm->numa_next_scan = now +
1690 msecs_to_jiffies(sysctl_numa_balancing_scan_delay); 1689 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1691 mm->numa_next_reset = now +
1692 msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1693 }
1694
1695 /*
1696 * Reset the scan period if enough time has gone by. Objective is that
1697 * scanning will be reduced if pages are properly placed. As tasks
1698 * can enter different phases this needs to be re-examined. Lacking
1699 * proper tracking of reference behaviour, this blunt hammer is used.
1700 */
1701 migrate = mm->numa_next_reset;
1702 if (time_after(now, migrate)) {
1703 p->numa_scan_period = task_scan_min(p);
1704 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1705 xchg(&mm->numa_next_reset, next_scan);
1706 } 1690 }
1707 1691
1708 /* 1692 /*