diff options
author | Dave Hansen <dave@linux.vnet.ibm.com> | 2011-03-22 19:32:58 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 20:44:04 -0400 |
commit | ae11c4d9f646064cf086e2f8cd4b3c475df7739c (patch) | |
tree | 5c565c6250749daa757852a629b25815a6919476 /fs/proc | |
parent | 033193275b3ffcfe7f3fde7b569f3d207f6cd6a0 (diff) |
smaps: break out smaps_pte_entry() from smaps_pte_range()
We will use smaps_pte_entry() in a moment to handle both small and
transparent large pages. But, we must break it out of smaps_pte_range()
first.
Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Eric B Munson <emunson@mgebm.net>
Tested-by: Eric B Munson <emunson@mgebm.net>
Cc: Michael J Wolf <mjwolf@us.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/task_mmu.c | 87 |
1 files changed, 47 insertions, 40 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 78fd3621f565..5cd06fa3106b 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -333,56 +333,63 @@ struct mem_size_stats { | |||
333 | u64 pss; | 333 | u64 pss; |
334 | }; | 334 | }; |
335 | 335 | ||
336 | static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, | 336 | |
337 | struct mm_walk *walk) | 337 | static void smaps_pte_entry(pte_t ptent, unsigned long addr, |
338 | struct mm_walk *walk) | ||
338 | { | 339 | { |
339 | struct mem_size_stats *mss = walk->private; | 340 | struct mem_size_stats *mss = walk->private; |
340 | struct vm_area_struct *vma = mss->vma; | 341 | struct vm_area_struct *vma = mss->vma; |
341 | pte_t *pte, ptent; | ||
342 | spinlock_t *ptl; | ||
343 | struct page *page; | 342 | struct page *page; |
344 | int mapcount; | 343 | int mapcount; |
345 | 344 | ||
346 | split_huge_page_pmd(walk->mm, pmd); | 345 | if (is_swap_pte(ptent)) { |
347 | 346 | mss->swap += PAGE_SIZE; | |
348 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 347 | return; |
349 | for (; addr != end; pte++, addr += PAGE_SIZE) { | 348 | } |
350 | ptent = *pte; | ||
351 | 349 | ||
352 | if (is_swap_pte(ptent)) { | 350 | if (!pte_present(ptent)) |
353 | mss->swap += PAGE_SIZE; | 351 | return; |
354 | continue; | 352 | |
355 | } | 353 | page = vm_normal_page(vma, addr, ptent); |
354 | if (!page) | ||
355 | return; | ||
356 | |||
357 | if (PageAnon(page)) | ||
358 | mss->anonymous += PAGE_SIZE; | ||
359 | |||
360 | mss->resident += PAGE_SIZE; | ||
361 | /* Accumulate the size in pages that have been accessed. */ | ||
362 | if (pte_young(ptent) || PageReferenced(page)) | ||
363 | mss->referenced += PAGE_SIZE; | ||
364 | mapcount = page_mapcount(page); | ||
365 | if (mapcount >= 2) { | ||
366 | if (pte_dirty(ptent) || PageDirty(page)) | ||
367 | mss->shared_dirty += PAGE_SIZE; | ||
368 | else | ||
369 | mss->shared_clean += PAGE_SIZE; | ||
370 | mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; | ||
371 | } else { | ||
372 | if (pte_dirty(ptent) || PageDirty(page)) | ||
373 | mss->private_dirty += PAGE_SIZE; | ||
374 | else | ||
375 | mss->private_clean += PAGE_SIZE; | ||
376 | mss->pss += (PAGE_SIZE << PSS_SHIFT); | ||
377 | } | ||
378 | } | ||
356 | 379 | ||
357 | if (!pte_present(ptent)) | 380 | static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
358 | continue; | 381 | struct mm_walk *walk) |
382 | { | ||
383 | struct mem_size_stats *mss = walk->private; | ||
384 | struct vm_area_struct *vma = mss->vma; | ||
385 | pte_t *pte; | ||
386 | spinlock_t *ptl; | ||
359 | 387 | ||
360 | page = vm_normal_page(vma, addr, ptent); | 388 | split_huge_page_pmd(walk->mm, pmd); |
361 | if (!page) | ||
362 | continue; | ||
363 | 389 | ||
364 | if (PageAnon(page)) | 390 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
365 | mss->anonymous += PAGE_SIZE; | 391 | for (; addr != end; pte++, addr += PAGE_SIZE) |
366 | 392 | smaps_pte_entry(*pte, addr, walk); | |
367 | mss->resident += PAGE_SIZE; | ||
368 | /* Accumulate the size in pages that have been accessed. */ | ||
369 | if (pte_young(ptent) || PageReferenced(page)) | ||
370 | mss->referenced += PAGE_SIZE; | ||
371 | mapcount = page_mapcount(page); | ||
372 | if (mapcount >= 2) { | ||
373 | if (pte_dirty(ptent) || PageDirty(page)) | ||
374 | mss->shared_dirty += PAGE_SIZE; | ||
375 | else | ||
376 | mss->shared_clean += PAGE_SIZE; | ||
377 | mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; | ||
378 | } else { | ||
379 | if (pte_dirty(ptent) || PageDirty(page)) | ||
380 | mss->private_dirty += PAGE_SIZE; | ||
381 | else | ||
382 | mss->private_clean += PAGE_SIZE; | ||
383 | mss->pss += (PAGE_SIZE << PSS_SHIFT); | ||
384 | } | ||
385 | } | ||
386 | pte_unmap_unlock(pte - 1, ptl); | 393 | pte_unmap_unlock(pte - 1, ptl); |
387 | cond_resched(); | 394 | cond_resched(); |
388 | return 0; | 395 | return 0; |