diff options
author | Rik van Riel <riel@redhat.com> | 2013-10-07 06:29:38 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-09 08:48:20 -0400 |
commit | 1e3646ffc64b232cb14a5ef01d7b98997c1b73f9 (patch) | |
tree | 6bb7610078609d8d84e1b7d535d958de41c8d588 /kernel/sched | |
parent | 930aa174fcc8b0efaad102fd80f677b92f35eaa2 (diff) |
mm: numa: Revert temporarily disabling of NUMA migration
With the scan rate code working (at least for multi-instance specjbb),
the large hammer that is "sched: Do not migrate memory immediately after
switching node" can be replaced with something smarter. Revert temporarily
migration disabling and all traces of numa_migrate_seq.
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1381141781-10992-61-git-send-email-mgorman@suse.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 25 |
2 files changed, 1 insertions, 26 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 89c5ae836f66..0c3feebcf112 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1731,7 +1731,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) | |||
1731 | 1731 | ||
1732 | p->node_stamp = 0ULL; | 1732 | p->node_stamp = 0ULL; |
1733 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; | 1733 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; |
1734 | p->numa_migrate_seq = 1; | ||
1735 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; | 1734 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; |
1736 | p->numa_work.next = &p->numa_work; | 1735 | p->numa_work.next = &p->numa_work; |
1737 | p->numa_faults = NULL; | 1736 | p->numa_faults = NULL; |
@@ -4488,7 +4487,6 @@ void sched_setnuma(struct task_struct *p, int nid) | |||
4488 | p->sched_class->put_prev_task(rq, p); | 4487 | p->sched_class->put_prev_task(rq, p); |
4489 | 4488 | ||
4490 | p->numa_preferred_nid = nid; | 4489 | p->numa_preferred_nid = nid; |
4491 | p->numa_migrate_seq = 1; | ||
4492 | 4490 | ||
4493 | if (running) | 4491 | if (running) |
4494 | p->sched_class->set_curr_task(rq); | 4492 | p->sched_class->set_curr_task(rq); |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index da6fa22be000..8454c38b1b12 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1261,16 +1261,8 @@ static void numa_migrate_preferred(struct task_struct *p) | |||
1261 | { | 1261 | { |
1262 | /* Success if task is already running on preferred CPU */ | 1262 | /* Success if task is already running on preferred CPU */ |
1263 | p->numa_migrate_retry = 0; | 1263 | p->numa_migrate_retry = 0; |
1264 | if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid) { | 1264 | if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid) |
1265 | /* | ||
1266 | * If migration is temporarily disabled due to a task migration | ||
1267 | * then re-enable it now as the task is running on its | ||
1268 | * preferred node and memory should migrate locally | ||
1269 | */ | ||
1270 | if (!p->numa_migrate_seq) | ||
1271 | p->numa_migrate_seq++; | ||
1272 | return; | 1265 | return; |
1273 | } | ||
1274 | 1266 | ||
1275 | /* This task has no NUMA fault statistics yet */ | 1267 | /* This task has no NUMA fault statistics yet */ |
1276 | if (unlikely(p->numa_preferred_nid == -1)) | 1268 | if (unlikely(p->numa_preferred_nid == -1)) |
@@ -1367,7 +1359,6 @@ static void task_numa_placement(struct task_struct *p) | |||
1367 | if (p->numa_scan_seq == seq) | 1359 | if (p->numa_scan_seq == seq) |
1368 | return; | 1360 | return; |
1369 | p->numa_scan_seq = seq; | 1361 | p->numa_scan_seq = seq; |
1370 | p->numa_migrate_seq++; | ||
1371 | p->numa_scan_period_max = task_scan_max(p); | 1362 | p->numa_scan_period_max = task_scan_max(p); |
1372 | 1363 | ||
1373 | /* If the task is part of a group prevent parallel updates to group stats */ | 1364 | /* If the task is part of a group prevent parallel updates to group stats */ |
@@ -4730,20 +4721,6 @@ static void move_task(struct task_struct *p, struct lb_env *env) | |||
4730 | set_task_cpu(p, env->dst_cpu); | 4721 | set_task_cpu(p, env->dst_cpu); |
4731 | activate_task(env->dst_rq, p, 0); | 4722 | activate_task(env->dst_rq, p, 0); |
4732 | check_preempt_curr(env->dst_rq, p, 0); | 4723 | check_preempt_curr(env->dst_rq, p, 0); |
4733 | #ifdef CONFIG_NUMA_BALANCING | ||
4734 | if (p->numa_preferred_nid != -1) { | ||
4735 | int src_nid = cpu_to_node(env->src_cpu); | ||
4736 | int dst_nid = cpu_to_node(env->dst_cpu); | ||
4737 | |||
4738 | /* | ||
4739 | * If the load balancer has moved the task then limit | ||
4740 | * migrations from taking place in the short term in | ||
4741 | * case this is a short-lived migration. | ||
4742 | */ | ||
4743 | if (src_nid != dst_nid && dst_nid != p->numa_preferred_nid) | ||
4744 | p->numa_migrate_seq = 0; | ||
4745 | } | ||
4746 | #endif | ||
4747 | } | 4724 | } |
4748 | 4725 | ||
4749 | /* | 4726 | /* |