aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-10-07 06:29:05 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 06:40:32 -0400
commit1bc115d87dffd1c43bdc3c9c9d1e3a51c195d18e (patch)
tree56a26b4f4fe089e3dd1df5a26877d1e4c0114d35 /mm/memory.c
parent9ff1d9ff3c2c8ab3feaeb2e8056a07ca293f7bde (diff)
mm: numa: Scan pages with elevated page_mapcount
Currently automatic NUMA balancing is unable to distinguish between false shared versus private pages except by ignoring pages with an elevated page_mapcount entirely. This avoids shared pages bouncing between the nodes whose task is using them but that is ignored quite a lot of data. This patch kicks away the training wheels in preparation for adding support for identifying shared/private pages is now in place. The ordering is so that the impact of the shared/private detection can be easily measured. Note that the patch does not migrate shared, file-backed within vmas marked VM_EXEC as these are generally shared library pages. Migrating such pages is not beneficial as there is an expectation they are read-shared between caches and iTLB and iCache pressure is generally low. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-28-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 24bc9b848af6..3e3b4b8b6c41 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3577,7 +3577,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3577 } 3577 }
3578 3578
3579 /* Migrate to the requested node */ 3579 /* Migrate to the requested node */
3580 migrated = migrate_misplaced_page(page, target_nid); 3580 migrated = migrate_misplaced_page(page, vma, target_nid);
3581 if (migrated) 3581 if (migrated)
3582 page_nid = target_nid; 3582 page_nid = target_nid;
3583 3583
@@ -3642,16 +3642,13 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3642 page = vm_normal_page(vma, addr, pteval); 3642 page = vm_normal_page(vma, addr, pteval);
3643 if (unlikely(!page)) 3643 if (unlikely(!page))
3644 continue; 3644 continue;
3645 /* only check non-shared pages */
3646 if (unlikely(page_mapcount(page) != 1))
3647 continue;
3648 3645
3649 last_nid = page_nid_last(page); 3646 last_nid = page_nid_last(page);
3650 page_nid = page_to_nid(page); 3647 page_nid = page_to_nid(page);
3651 target_nid = numa_migrate_prep(page, vma, addr, page_nid); 3648 target_nid = numa_migrate_prep(page, vma, addr, page_nid);
3652 pte_unmap_unlock(pte, ptl); 3649 pte_unmap_unlock(pte, ptl);
3653 if (target_nid != -1) { 3650 if (target_nid != -1) {
3654 migrated = migrate_misplaced_page(page, target_nid); 3651 migrated = migrate_misplaced_page(page, vma, target_nid);
3655 if (migrated) 3652 if (migrated)
3656 page_nid = target_nid; 3653 page_nid = target_nid;
3657 } else { 3654 } else {