aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2014-08-06 19:07:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:20 -0400
commit14a4e2141e24304fff2c697be6382ffb83888185 (patch)
treea99c001aa3fbb184a0457a7673782d8b55bd659c /mm/huge_memory.c
parentfed400a181447ba975d40e1df5e0d555eae51795 (diff)
mm, thp: only collapse hugepages to nodes with affinity for zone_reclaim_mode
Commit 9f1b868a13ac ("mm: thp: khugepaged: add policy for finding target node") improved the previous khugepaged logic which allocated a transparent hugepages from the node of the first page being collapsed. However, it is still possible to collapse pages to remote memory which may suffer from additional access latency. With the current policy, it is possible that 255 pages (with PAGE_SHIFT == 12) will be collapsed remotely if the majority are allocated from that node. When zone_reclaim_mode is enabled, it means the VM should make every attempt to allocate locally to prevent NUMA performance degradation. In this case, we do not want to collapse hugepages to remote nodes that would suffer from increased access latency. Thus, when zone_reclaim_mode is enabled, only allow collapsing to nodes with RECLAIM_DISTANCE or less. There is no functional change for systems that disable zone_reclaim_mode. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Bob Liu <bob.liu@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 24e354c2b59e..3630d577e987 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2233,6 +2233,30 @@ static void khugepaged_alloc_sleep(void)
2233 2233
2234static int khugepaged_node_load[MAX_NUMNODES]; 2234static int khugepaged_node_load[MAX_NUMNODES];
2235 2235
2236static bool khugepaged_scan_abort(int nid)
2237{
2238 int i;
2239
2240 /*
2241 * If zone_reclaim_mode is disabled, then no extra effort is made to
2242 * allocate memory locally.
2243 */
2244 if (!zone_reclaim_mode)
2245 return false;
2246
2247 /* If there is a count for this node already, it must be acceptable */
2248 if (khugepaged_node_load[nid])
2249 return false;
2250
2251 for (i = 0; i < MAX_NUMNODES; i++) {
2252 if (!khugepaged_node_load[i])
2253 continue;
2254 if (node_distance(nid, i) > RECLAIM_DISTANCE)
2255 return true;
2256 }
2257 return false;
2258}
2259
2236#ifdef CONFIG_NUMA 2260#ifdef CONFIG_NUMA
2237static int khugepaged_find_target_node(void) 2261static int khugepaged_find_target_node(void)
2238{ 2262{
@@ -2545,6 +2569,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2545 * hit record. 2569 * hit record.
2546 */ 2570 */
2547 node = page_to_nid(page); 2571 node = page_to_nid(page);
2572 if (khugepaged_scan_abort(node))
2573 goto out_unmap;
2548 khugepaged_node_load[node]++; 2574 khugepaged_node_load[node]++;
2549 VM_BUG_ON_PAGE(PageCompound(page), page); 2575 VM_BUG_ON_PAGE(PageCompound(page), page);
2550 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2576 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))