aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-10-07 06:29:03 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 06:40:30 -0400
commitac8e895bd260cb8bb19ade6a3abd44e7abe9a01d (patch)
treefe0d50baf0dad412fd7d5ba0286ce95e08a363ac /mm/memory.c
parente6628d5b0a2979f3e0ee6f7783ede5df50cb9ede (diff)
sched/numa: Add infrastructure for split shared/private accounting of NUMA hinting faults
Ideally it would be possible to distinguish between NUMA hinting faults that are private to a task and those that are shared. This patch prepares infrastructure for separately accounting shared and private faults by allocating the necessary buffers and passing in relevant information. For now, all faults are treated as private and detection will be introduced later. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-26-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ed51f15136ee..24bc9b848af6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3536,6 +3536,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3536 struct page *page = NULL; 3536 struct page *page = NULL;
3537 spinlock_t *ptl; 3537 spinlock_t *ptl;
3538 int page_nid = -1; 3538 int page_nid = -1;
3539 int last_nid;
3539 int target_nid; 3540 int target_nid;
3540 bool migrated = false; 3541 bool migrated = false;
3541 3542
@@ -3566,6 +3567,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3566 } 3567 }
3567 BUG_ON(is_zero_pfn(page_to_pfn(page))); 3568 BUG_ON(is_zero_pfn(page_to_pfn(page)));
3568 3569
3570 last_nid = page_nid_last(page);
3569 page_nid = page_to_nid(page); 3571 page_nid = page_to_nid(page);
3570 target_nid = numa_migrate_prep(page, vma, addr, page_nid); 3572 target_nid = numa_migrate_prep(page, vma, addr, page_nid);
3571 pte_unmap_unlock(ptep, ptl); 3573 pte_unmap_unlock(ptep, ptl);
@@ -3581,7 +3583,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3581 3583
3582out: 3584out:
3583 if (page_nid != -1) 3585 if (page_nid != -1)
3584 task_numa_fault(page_nid, 1, migrated); 3586 task_numa_fault(last_nid, page_nid, 1, migrated);
3585 return 0; 3587 return 0;
3586} 3588}
3587 3589
@@ -3596,6 +3598,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3596 unsigned long offset; 3598 unsigned long offset;
3597 spinlock_t *ptl; 3599 spinlock_t *ptl;
3598 bool numa = false; 3600 bool numa = false;
3601 int last_nid;
3599 3602
3600 spin_lock(&mm->page_table_lock); 3603 spin_lock(&mm->page_table_lock);
3601 pmd = *pmdp; 3604 pmd = *pmdp;
@@ -3643,6 +3646,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3643 if (unlikely(page_mapcount(page) != 1)) 3646 if (unlikely(page_mapcount(page) != 1))
3644 continue; 3647 continue;
3645 3648
3649 last_nid = page_nid_last(page);
3646 page_nid = page_to_nid(page); 3650 page_nid = page_to_nid(page);
3647 target_nid = numa_migrate_prep(page, vma, addr, page_nid); 3651 target_nid = numa_migrate_prep(page, vma, addr, page_nid);
3648 pte_unmap_unlock(pte, ptl); 3652 pte_unmap_unlock(pte, ptl);
@@ -3655,7 +3659,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3655 } 3659 }
3656 3660
3657 if (page_nid != -1) 3661 if (page_nid != -1)
3658 task_numa_fault(page_nid, 1, migrated); 3662 task_numa_fault(last_nid, page_nid, 1, migrated);
3659 3663
3660 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl); 3664 pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
3661 } 3665 }