aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2014-01-27 17:03:40 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-28 07:17:04 -0500
commit52bf84aa206cd2c2516dfa3e03b578edf8a3242f (patch)
treee8acbb2c3ce90b7aed27046c7efc5a082f6ef684 /mm/mempolicy.c
parenta57beec5d427086cdc8d75fd51164577193fa7f4 (diff)
sched/numa, mm: Remove p->numa_migrate_deferred
Excessive migration of pages can hurt the performance of workloads that span multiple NUMA nodes. However, it turns out that the p->numa_migrate_deferred knob is a really big hammer, which does reduce migration rates, but does not actually help performance. Now that the second stage of the automatic numa balancing code has stabilized, it is time to replace the simplistic migration deferral code with something smarter. Signed-off-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Chegu Vinod <chegu_vinod@hp.com> Link: http://lkml.kernel.org/r/1390860228-21539-2-git-send-email-riel@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c45
1 files changed, 0 insertions, 45 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0cd2c4d4e270..68d5c7f7164e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2304,35 +2304,6 @@ static void sp_free(struct sp_node *n)
2304 kmem_cache_free(sn_cache, n); 2304 kmem_cache_free(sn_cache, n);
2305} 2305}
2306 2306
2307#ifdef CONFIG_NUMA_BALANCING
2308static bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
2309{
2310 /* Never defer a private fault */
2311 if (cpupid_match_pid(p, last_cpupid))
2312 return false;
2313
2314 if (p->numa_migrate_deferred) {
2315 p->numa_migrate_deferred--;
2316 return true;
2317 }
2318 return false;
2319}
2320
2321static inline void defer_numa_migrate(struct task_struct *p)
2322{
2323 p->numa_migrate_deferred = sysctl_numa_balancing_migrate_deferred;
2324}
2325#else
2326static inline bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
2327{
2328 return false;
2329}
2330
2331static inline void defer_numa_migrate(struct task_struct *p)
2332{
2333}
2334#endif /* CONFIG_NUMA_BALANCING */
2335
2336/** 2307/**
2337 * mpol_misplaced - check whether current page node is valid in policy 2308 * mpol_misplaced - check whether current page node is valid in policy
2338 * 2309 *
@@ -2435,24 +2406,8 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
2435 */ 2406 */
2436 last_cpupid = page_cpupid_xchg_last(page, this_cpupid); 2407 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
2437 if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid) { 2408 if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid) {
2438
2439 /* See sysctl_numa_balancing_migrate_deferred comment */
2440 if (!cpupid_match_pid(current, last_cpupid))
2441 defer_numa_migrate(current);
2442
2443 goto out; 2409 goto out;
2444 } 2410 }
2445
2446 /*
2447 * The quadratic filter above reduces extraneous migration
2448 * of shared pages somewhat. This code reduces it even more,
2449 * reducing the overhead of page migrations of shared pages.
2450 * This makes workloads with shared pages rely more on
2451 * "move task near its memory", and less on "move memory
2452 * towards its task", which is exactly what we want.
2453 */
2454 if (numa_migrate_deferred(current, last_cpupid))
2455 goto out;
2456 } 2411 }
2457 2412
2458 if (curnid != polnid) 2413 if (curnid != polnid)