diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-10-07 06:29:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-09 08:47:45 -0400 |
commit | 90572890d202527c366aa9489b32404e88a7c020 (patch) | |
tree | 0577f3b043e312f6d53e50105b236514f7df2455 /mm/huge_memory.c | |
parent | e1dda8a797b59d7ec4b17e393152ec3273a552d5 (diff) |
mm: numa: Change page last {nid,pid} into {cpu,pid}
Change the per page last fault tracking to use cpu,pid instead of
nid,pid. This will allow us to try and lookup the alternate task more
easily. Note that even though it is the cpu that is store in the page
flags that the mpol_misplaced decision is still based on the node.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1381141781-10992-43-git-send-email-mgorman@suse.de
[ Fixed build failure on 32-bit systems. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0baf0e4d5203..becf92ca54f3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1282,7 +1282,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1282 | struct page *page; | 1282 | struct page *page; |
1283 | unsigned long haddr = addr & HPAGE_PMD_MASK; | 1283 | unsigned long haddr = addr & HPAGE_PMD_MASK; |
1284 | int page_nid = -1, this_nid = numa_node_id(); | 1284 | int page_nid = -1, this_nid = numa_node_id(); |
1285 | int target_nid, last_nidpid = -1; | 1285 | int target_nid, last_cpupid = -1; |
1286 | bool page_locked; | 1286 | bool page_locked; |
1287 | bool migrated = false; | 1287 | bool migrated = false; |
1288 | 1288 | ||
@@ -1293,7 +1293,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
1293 | page = pmd_page(pmd); | 1293 | page = pmd_page(pmd); |
1294 | BUG_ON(is_huge_zero_page(page)); | 1294 | BUG_ON(is_huge_zero_page(page)); |
1295 | page_nid = page_to_nid(page); | 1295 | page_nid = page_to_nid(page); |
1296 | last_nidpid = page_nidpid_last(page); | 1296 | last_cpupid = page_cpupid_last(page); |
1297 | count_vm_numa_event(NUMA_HINT_FAULTS); | 1297 | count_vm_numa_event(NUMA_HINT_FAULTS); |
1298 | if (page_nid == this_nid) | 1298 | if (page_nid == this_nid) |
1299 | count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); | 1299 | count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); |
@@ -1362,7 +1362,7 @@ out: | |||
1362 | page_unlock_anon_vma_read(anon_vma); | 1362 | page_unlock_anon_vma_read(anon_vma); |
1363 | 1363 | ||
1364 | if (page_nid != -1) | 1364 | if (page_nid != -1) |
1365 | task_numa_fault(last_nidpid, page_nid, HPAGE_PMD_NR, migrated); | 1365 | task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, migrated); |
1366 | 1366 | ||
1367 | return 0; | 1367 | return 0; |
1368 | } | 1368 | } |
@@ -1682,7 +1682,7 @@ static void __split_huge_page_refcount(struct page *page, | |||
1682 | page_tail->mapping = page->mapping; | 1682 | page_tail->mapping = page->mapping; |
1683 | 1683 | ||
1684 | page_tail->index = page->index + i; | 1684 | page_tail->index = page->index + i; |
1685 | page_nidpid_xchg_last(page_tail, page_nidpid_last(page)); | 1685 | page_cpupid_xchg_last(page_tail, page_cpupid_last(page)); |
1686 | 1686 | ||
1687 | BUG_ON(!PageAnon(page_tail)); | 1687 | BUG_ON(!PageAnon(page_tail)); |
1688 | BUG_ON(!PageUptodate(page_tail)); | 1688 | BUG_ON(!PageUptodate(page_tail)); |