aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-10-07 06:29:20 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 08:47:45 -0400
commit90572890d202527c366aa9489b32404e88a7c020 (patch)
tree0577f3b043e312f6d53e50105b236514f7df2455 /mm/memory.c
parente1dda8a797b59d7ec4b17e393152ec3273a552d5 (diff)
mm: numa: Change page last {nid,pid} into {cpu,pid}
Change the per page last fault tracking to use cpu,pid instead of nid,pid. This will allow us to try and lookup the alternate task more easily. Note that even though it is the cpu that is store in the page flags that the mpol_misplaced decision is still based on the node. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1381141781-10992-43-git-send-email-mgorman@suse.de [ Fixed build failure on 32-bit systems. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/memory.c b/mm/memory.c
index cc7f20691c82..5162e6d0d652 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -69,8 +69,8 @@
69 69
70#include "internal.h" 70#include "internal.h"
71 71
72#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS 72#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
73#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nidpid. 73#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
74#endif 74#endif
75 75
76#ifndef CONFIG_NEED_MULTIPLE_NODES 76#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -3536,7 +3536,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3536 struct page *page = NULL; 3536 struct page *page = NULL;
3537 spinlock_t *ptl; 3537 spinlock_t *ptl;
3538 int page_nid = -1; 3538 int page_nid = -1;
3539 int last_nidpid; 3539 int last_cpupid;
3540 int target_nid; 3540 int target_nid;
3541 bool migrated = false; 3541 bool migrated = false;
3542 3542
@@ -3567,7 +3567,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3567 } 3567 }
3568 BUG_ON(is_zero_pfn(page_to_pfn(page))); 3568 BUG_ON(is_zero_pfn(page_to_pfn(page)));
3569 3569
3570 last_nidpid = page_nidpid_last(page); 3570 last_cpupid = page_cpupid_last(page);
3571 page_nid = page_to_nid(page); 3571 page_nid = page_to_nid(page);
3572 target_nid = numa_migrate_prep(page, vma, addr, page_nid); 3572 target_nid = numa_migrate_prep(page, vma, addr, page_nid);
3573 pte_unmap_unlock(ptep, ptl); 3573 pte_unmap_unlock(ptep, ptl);
@@ -3583,7 +3583,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3583 3583
3584out: 3584out:
3585 if (page_nid != -1) 3585 if (page_nid != -1)
3586 task_numa_fault(last_nidpid, page_nid, 1, migrated); 3586 task_numa_fault(last_cpupid, page_nid, 1, migrated);
3587 return 0; 3587 return 0;
3588} 3588}
3589 3589
@@ -3598,7 +3598,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3598 unsigned long offset; 3598 unsigned long offset;
3599 spinlock_t *ptl; 3599 spinlock_t *ptl;
3600 bool numa = false; 3600 bool numa = false;
3601 int last_nidpid; 3601 int last_cpupid;
3602 3602
3603 spin_lock(&mm->page_table_lock); 3603 spin_lock(&mm->page_table_lock);
3604 pmd = *pmdp; 3604 pmd = *pmdp;
@@ -3643,7 +3643,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3643 if (unlikely(!page)) 3643 if (unlikely(!page))
3644 continue; 3644 continue;
3645 3645
3646 last_nidpid = page_nidpid_last(page); 3646 last_cpupid = page_cpupid_last(page);
3647 page_nid = page_to_nid(page); 3647 page_nid = page_to_nid(page);
3648 target_nid = numa_migrate_prep(page, vma, addr, page_nid); 3648 target_nid = numa_migrate_prep(page, vma, addr, page_nid);
3649 pte_unmap_unlock(pte, ptl); 3649 pte_unmap_unlock(pte, ptl);
@@ -3656,7 +3656,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3656 } 3656 }
3657 3657
3658 if (page_nid != -1) 3658 if (page_nid != -1)
3659 task_numa_fault(last_nidpid, page_nid, 1, migrated); 3659 task_numa_fault(last_cpupid, page_nid, 1, migrated);
3660 3660
3661 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl); 3661 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
3662 } 3662 }