diff options
author | Rik van Riel <riel@redhat.com> | 2013-10-07 06:29:38 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-09 08:48:20 -0400 |
commit | 1e3646ffc64b232cb14a5ef01d7b98997c1b73f9 (patch) | |
tree | 6bb7610078609d8d84e1b7d535d958de41c8d588 | |
parent | 930aa174fcc8b0efaad102fd80f677b92f35eaa2 (diff) |
mm: numa: Revert temporarily disabling of NUMA migration
With the scan rate code working (at least for multi-instance specjbb),
the large hammer that is "sched: Do not migrate memory immediately after
switching node" can be replaced with something smarter. Revert temporarily
migration disabling and all traces of numa_migrate_seq.
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1381141781-10992-61-git-send-email-mgorman@suse.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched/core.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 25 | ||||
-rw-r--r-- | mm/mempolicy.c | 12 |
4 files changed, 1 insertions, 39 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 2292f6c1596f..d24f70ffddee 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1340,7 +1340,6 @@ struct task_struct { | |||
1340 | #endif | 1340 | #endif |
1341 | #ifdef CONFIG_NUMA_BALANCING | 1341 | #ifdef CONFIG_NUMA_BALANCING |
1342 | int numa_scan_seq; | 1342 | int numa_scan_seq; |
1343 | int numa_migrate_seq; | ||
1344 | unsigned int numa_scan_period; | 1343 | unsigned int numa_scan_period; |
1345 | unsigned int numa_scan_period_max; | 1344 | unsigned int numa_scan_period_max; |
1346 | unsigned long numa_migrate_retry; | 1345 | unsigned long numa_migrate_retry; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 89c5ae836f66..0c3feebcf112 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1731,7 +1731,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) | |||
1731 | 1731 | ||
1732 | p->node_stamp = 0ULL; | 1732 | p->node_stamp = 0ULL; |
1733 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; | 1733 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; |
1734 | p->numa_migrate_seq = 1; | ||
1735 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; | 1734 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; |
1736 | p->numa_work.next = &p->numa_work; | 1735 | p->numa_work.next = &p->numa_work; |
1737 | p->numa_faults = NULL; | 1736 | p->numa_faults = NULL; |
@@ -4488,7 +4487,6 @@ void sched_setnuma(struct task_struct *p, int nid) | |||
4488 | p->sched_class->put_prev_task(rq, p); | 4487 | p->sched_class->put_prev_task(rq, p); |
4489 | 4488 | ||
4490 | p->numa_preferred_nid = nid; | 4489 | p->numa_preferred_nid = nid; |
4491 | p->numa_migrate_seq = 1; | ||
4492 | 4490 | ||
4493 | if (running) | 4491 | if (running) |
4494 | p->sched_class->set_curr_task(rq); | 4492 | p->sched_class->set_curr_task(rq); |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index da6fa22be000..8454c38b1b12 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1261,16 +1261,8 @@ static void numa_migrate_preferred(struct task_struct *p) | |||
1261 | { | 1261 | { |
1262 | /* Success if task is already running on preferred CPU */ | 1262 | /* Success if task is already running on preferred CPU */ |
1263 | p->numa_migrate_retry = 0; | 1263 | p->numa_migrate_retry = 0; |
1264 | if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid) { | 1264 | if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid) |
1265 | /* | ||
1266 | * If migration is temporarily disabled due to a task migration | ||
1267 | * then re-enable it now as the task is running on its | ||
1268 | * preferred node and memory should migrate locally | ||
1269 | */ | ||
1270 | if (!p->numa_migrate_seq) | ||
1271 | p->numa_migrate_seq++; | ||
1272 | return; | 1265 | return; |
1273 | } | ||
1274 | 1266 | ||
1275 | /* This task has no NUMA fault statistics yet */ | 1267 | /* This task has no NUMA fault statistics yet */ |
1276 | if (unlikely(p->numa_preferred_nid == -1)) | 1268 | if (unlikely(p->numa_preferred_nid == -1)) |
@@ -1367,7 +1359,6 @@ static void task_numa_placement(struct task_struct *p) | |||
1367 | if (p->numa_scan_seq == seq) | 1359 | if (p->numa_scan_seq == seq) |
1368 | return; | 1360 | return; |
1369 | p->numa_scan_seq = seq; | 1361 | p->numa_scan_seq = seq; |
1370 | p->numa_migrate_seq++; | ||
1371 | p->numa_scan_period_max = task_scan_max(p); | 1362 | p->numa_scan_period_max = task_scan_max(p); |
1372 | 1363 | ||
1373 | /* If the task is part of a group prevent parallel updates to group stats */ | 1364 | /* If the task is part of a group prevent parallel updates to group stats */ |
@@ -4730,20 +4721,6 @@ static void move_task(struct task_struct *p, struct lb_env *env) | |||
4730 | set_task_cpu(p, env->dst_cpu); | 4721 | set_task_cpu(p, env->dst_cpu); |
4731 | activate_task(env->dst_rq, p, 0); | 4722 | activate_task(env->dst_rq, p, 0); |
4732 | check_preempt_curr(env->dst_rq, p, 0); | 4723 | check_preempt_curr(env->dst_rq, p, 0); |
4733 | #ifdef CONFIG_NUMA_BALANCING | ||
4734 | if (p->numa_preferred_nid != -1) { | ||
4735 | int src_nid = cpu_to_node(env->src_cpu); | ||
4736 | int dst_nid = cpu_to_node(env->dst_cpu); | ||
4737 | |||
4738 | /* | ||
4739 | * If the load balancer has moved the task then limit | ||
4740 | * migrations from taking place in the short term in | ||
4741 | * case this is a short-lived migration. | ||
4742 | */ | ||
4743 | if (src_nid != dst_nid && dst_nid != p->numa_preferred_nid) | ||
4744 | p->numa_migrate_seq = 0; | ||
4745 | } | ||
4746 | #endif | ||
4747 | } | 4724 | } |
4748 | 4725 | ||
4749 | /* | 4726 | /* |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index a5867ef24bda..2929c24c22b7 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -2404,18 +2404,6 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long | |||
2404 | last_cpupid = page_cpupid_xchg_last(page, this_cpupid); | 2404 | last_cpupid = page_cpupid_xchg_last(page, this_cpupid); |
2405 | if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid) | 2405 | if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid) |
2406 | goto out; | 2406 | goto out; |
2407 | |||
2408 | #ifdef CONFIG_NUMA_BALANCING | ||
2409 | /* | ||
2410 | * If the scheduler has just moved us away from our | ||
2411 | * preferred node, do not bother migrating pages yet. | ||
2412 | * This way a short and temporary process migration will | ||
2413 | * not cause excessive memory migration. | ||
2414 | */ | ||
2415 | if (thisnid != current->numa_preferred_nid && | ||
2416 | !current->numa_migrate_seq) | ||
2417 | goto out; | ||
2418 | #endif | ||
2419 | } | 2407 | } |
2420 | 2408 | ||
2421 | if (curnid != polnid) | 2409 | if (curnid != polnid) |