aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-11-19 05:59:15 -0500
committerMel Gorman <mgorman@suse.de>2012-12-11 09:42:51 -0500
commite14808b49f55e0e1135da5e4a154a540dd9f3662 (patch)
treed66708455dcc1b6e2e15937d732ab12c121e623a /kernel
parenta8f6077213d285ca08dbf6d4a67470787388138b (diff)
mm: numa: Rate limit setting of pte_numa if node is saturated
If there are a large number of NUMA hinting faults and all of them are resulting in migrations it may indicate that memory is just bouncing uselessly around. NUMA balancing cost is likely exceeding any benefit from locality. Rate limit the PTE updates if the node is migration rate-limited. As noted in the comments, this distorts the NUMA faulting statistics. Signed-off-by: Mel Gorman <mgorman@suse.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7727b0161579..37e895a941ab 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -27,6 +27,7 @@
27#include <linux/profile.h> 27#include <linux/profile.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/mempolicy.h> 29#include <linux/mempolicy.h>
30#include <linux/migrate.h>
30#include <linux/task_work.h> 31#include <linux/task_work.h>
31 32
32#include <trace/events/sched.h> 33#include <trace/events/sched.h>
@@ -861,6 +862,14 @@ void task_numa_work(struct callback_head *work)
861 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) 862 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
862 return; 863 return;
863 864
865 /*
866 * Do not set pte_numa if the current running node is rate-limited.
867 * This loses statistics on the fault but if we are unwilling to
868 * migrate to this node, it is less likely we can do useful work
869 */
870 if (migrate_ratelimited(numa_node_id()))
871 return;
872
864 start = mm->numa_scan_offset; 873 start = mm->numa_scan_offset;
865 pages = sysctl_numa_balancing_scan_size; 874 pages = sysctl_numa_balancing_scan_size;
866 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ 875 pages <<= 20 - PAGE_SHIFT; /* MB in pages */