aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-10-07 06:29:03 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 06:40:30 -0400
commitac8e895bd260cb8bb19ade6a3abd44e7abe9a01d (patch)
treefe0d50baf0dad412fd7d5ba0286ce95e08a363ac /mm/huge_memory.c
parente6628d5b0a2979f3e0ee6f7783ede5df50cb9ede (diff)
sched/numa: Add infrastructure for split shared/private accounting of NUMA hinting faults
Ideally it would be possible to distinguish between NUMA hinting faults that are private to a task and those that are shared. This patch prepares infrastructure for separately accounting shared and private faults by allocating the necessary buffers and passing in relevant information. For now, all faults are treated as private and detection will be introduced later. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-26-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8677dbf31c2e..914216733e0a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1282,7 +1282,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1282 struct page *page; 1282 struct page *page;
1283 unsigned long haddr = addr & HPAGE_PMD_MASK; 1283 unsigned long haddr = addr & HPAGE_PMD_MASK;
1284 int page_nid = -1, this_nid = numa_node_id(); 1284 int page_nid = -1, this_nid = numa_node_id();
1285 int target_nid; 1285 int target_nid, last_nid = -1;
1286 bool page_locked; 1286 bool page_locked;
1287 bool migrated = false; 1287 bool migrated = false;
1288 1288
@@ -1293,6 +1293,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1293 page = pmd_page(pmd); 1293 page = pmd_page(pmd);
1294 BUG_ON(is_huge_zero_page(page)); 1294 BUG_ON(is_huge_zero_page(page));
1295 page_nid = page_to_nid(page); 1295 page_nid = page_to_nid(page);
1296 last_nid = page_nid_last(page);
1296 count_vm_numa_event(NUMA_HINT_FAULTS); 1297 count_vm_numa_event(NUMA_HINT_FAULTS);
1297 if (page_nid == this_nid) 1298 if (page_nid == this_nid)
1298 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 1299 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
@@ -1361,7 +1362,7 @@ out:
1361 page_unlock_anon_vma_read(anon_vma); 1362 page_unlock_anon_vma_read(anon_vma);
1362 1363
1363 if (page_nid != -1) 1364 if (page_nid != -1)
1364 task_numa_fault(page_nid, HPAGE_PMD_NR, migrated); 1365 task_numa_fault(last_nid, page_nid, HPAGE_PMD_NR, migrated);
1365 1366
1366 return 0; 1367 return 0;
1367} 1368}