diff options
author | Dave Hansen <dave@linux.vnet.ibm.com> | 2011-09-20 18:19:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-09-21 16:15:44 -0400 |
commit | 3200a8aaab0c9ccdc0f59b0dac2d4a47029137fa (patch) | |
tree | 33ba986eab9d2663b44082c73114e2599cd06537 /fs/proc | |
parent | eb4866d0066ffd5446751c102d64feb3318d8bd1 (diff) |
break out numa_maps gather_pte_stats() checks
gather_pte_stats() does a number of checks on a target page
to see whether it should even be considered for statistics.
This breaks that code out in to a separate function so that
we can use it in the transparent hugepage case in the next
patch.
Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: Hugh Dickins <hughd@google.com>
Reviewed-by: Christoph Lameter <cl@gentwo.org>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/task_mmu.c | 39 |
1 files changed, 24 insertions, 15 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 61342a454bd9..9dca07e0758d 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -904,6 +904,29 @@ static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, | |||
904 | md->node[page_to_nid(page)] += nr_pages; | 904 | md->node[page_to_nid(page)] += nr_pages; |
905 | } | 905 | } |
906 | 906 | ||
907 | static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, | ||
908 | unsigned long addr) | ||
909 | { | ||
910 | struct page *page; | ||
911 | int nid; | ||
912 | |||
913 | if (!pte_present(pte)) | ||
914 | return NULL; | ||
915 | |||
916 | page = vm_normal_page(vma, addr, pte); | ||
917 | if (!page) | ||
918 | return NULL; | ||
919 | |||
920 | if (PageReserved(page)) | ||
921 | return NULL; | ||
922 | |||
923 | nid = page_to_nid(page); | ||
924 | if (!node_isset(nid, node_states[N_HIGH_MEMORY])) | ||
925 | return NULL; | ||
926 | |||
927 | return page; | ||
928 | } | ||
929 | |||
907 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | 930 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, |
908 | unsigned long end, struct mm_walk *walk) | 931 | unsigned long end, struct mm_walk *walk) |
909 | { | 932 | { |
@@ -915,23 +938,9 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | |||
915 | md = walk->private; | 938 | md = walk->private; |
916 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | 939 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); |
917 | do { | 940 | do { |
918 | struct page *page; | 941 | struct page *page = can_gather_numa_stats(*pte, md->vma, addr); |
919 | int nid; | ||
920 | |||
921 | if (!pte_present(*pte)) | ||
922 | continue; | ||
923 | |||
924 | page = vm_normal_page(md->vma, addr, *pte); | ||
925 | if (!page) | 942 | if (!page) |
926 | continue; | 943 | continue; |
927 | |||
928 | if (PageReserved(page)) | ||
929 | continue; | ||
930 | |||
931 | nid = page_to_nid(page); | ||
932 | if (!node_isset(nid, node_states[N_HIGH_MEMORY])) | ||
933 | continue; | ||
934 | |||
935 | gather_stats(page, md, pte_dirty(*pte), 1); | 944 | gather_stats(page, md, pte_dirty(*pte), 1); |
936 | 945 | ||
937 | } while (pte++, addr += PAGE_SIZE, addr != end); | 946 | } while (pte++, addr += PAGE_SIZE, addr != end); |