aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2013-10-07 06:29:36 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 08:48:16 -0400
commit04bb2f9475054298f0c67a89ca92cade42d3fe5e (patch)
treeab48887e23b7f820380a3f415cbe0a6f64f7fecc /mm
parent3e6a9418cf05638b103e34f5d13be0321872e623 (diff)
sched/numa: Adjust scan rate in task_numa_placement
Adjust numa_scan_period in task_numa_placement, depending on how much useful work the numa code can do. The more local faults there are in a given scan window the longer the period (and hence the slower the scan rate) during the next window. If there are excessive shared faults then the scan period will decrease with the amount of scaling depending on whether the ratio of shared/private faults. If the preferred node changes then the scan rate is reset to recheck if the task is properly placed. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-59-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/memory.c9
2 files changed, 9 insertions, 4 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7ab4e32afe12..1be2a1f95b61 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1296,8 +1296,10 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1296 page_nid = page_to_nid(page); 1296 page_nid = page_to_nid(page);
1297 last_cpupid = page_cpupid_last(page); 1297 last_cpupid = page_cpupid_last(page);
1298 count_vm_numa_event(NUMA_HINT_FAULTS); 1298 count_vm_numa_event(NUMA_HINT_FAULTS);
1299 if (page_nid == this_nid) 1299 if (page_nid == this_nid) {
1300 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1300 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1301 flags |= TNF_FAULT_LOCAL;
1302 }
1301 1303
1302 /* 1304 /*
1303 * Avoid grouping on DSO/COW pages in specific and RO pages 1305 * Avoid grouping on DSO/COW pages in specific and RO pages
diff --git a/mm/memory.c b/mm/memory.c
index 823720c43ea9..1c7501f7fb1a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3527,13 +3527,16 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3527} 3527}
3528 3528
3529int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, 3529int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
3530 unsigned long addr, int page_nid) 3530 unsigned long addr, int page_nid,
3531 int *flags)
3531{ 3532{
3532 get_page(page); 3533 get_page(page);
3533 3534
3534 count_vm_numa_event(NUMA_HINT_FAULTS); 3535 count_vm_numa_event(NUMA_HINT_FAULTS);
3535 if (page_nid == numa_node_id()) 3536 if (page_nid == numa_node_id()) {
3536 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 3537 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
3538 *flags |= TNF_FAULT_LOCAL;
3539 }
3537 3540
3538 return mpol_misplaced(page, vma, addr); 3541 return mpol_misplaced(page, vma, addr);
3539} 3542}
@@ -3593,7 +3596,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3593 3596
3594 last_cpupid = page_cpupid_last(page); 3597 last_cpupid = page_cpupid_last(page);
3595 page_nid = page_to_nid(page); 3598 page_nid = page_to_nid(page);
3596 target_nid = numa_migrate_prep(page, vma, addr, page_nid); 3599 target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags);
3597 pte_unmap_unlock(ptep, ptl); 3600 pte_unmap_unlock(ptep, ptl);
3598 if (target_nid == -1) { 3601 if (target_nid == -1) {
3599 put_page(page); 3602 put_page(page);