aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2010-03-05 16:41:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-06 14:26:24 -0500
commitb084d4353ff99d824d3bc5a5c2c22c70b1fba722 (patch)
tree8178db2b337fc8a36e6ca2e1fc2e7d7473957e27 /mm
parent34e55232e59f7b19050267a05ff1226e5cd122a5 (diff)
mm: count swap usage
A frequent questions from users about memory management is what numbers of swap ents are user for processes. And this information will give some hints to oom-killer. Besides we can count the number of swapents per a process by scanning /proc/<pid>/smaps, this is very slow and not good for usual process information handler which works like 'ps' or 'top'. (ps or top is now enough slow..) This patch adds a counter of swapents to mm_counter and update is at each swap events. Information is exported via /proc/<pid>/status file as [kamezawa@bluextal memory]$ cat /proc/self/status Name: cat State: R (running) Tgid: 2910 Pid: 2910 PPid: 2823 TracerPid: 0 Uid: 500 500 500 500 Gid: 500 500 500 500 FDSize: 256 Groups: 500 VmPeak: 82696 kB VmSize: 82696 kB VmLck: 0 kB VmHWM: 432 kB VmRSS: 432 kB VmData: 172 kB VmStk: 84 kB VmExe: 48 kB VmLib: 1568 kB VmPTE: 40 kB VmSwap: 0 kB <=============== this. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c16
-rw-r--r--mm/rmap.c1
-rw-r--r--mm/swapfile.c1
3 files changed, 14 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c
index a4597614f18d..77d9f840936b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -679,7 +679,9 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
679 &src_mm->mmlist); 679 &src_mm->mmlist);
680 spin_unlock(&mmlist_lock); 680 spin_unlock(&mmlist_lock);
681 } 681 }
682 if (is_write_migration_entry(entry) && 682 if (likely(!non_swap_entry(entry)))
683 rss[MM_SWAPENTS]++;
684 else if (is_write_migration_entry(entry) &&
683 is_cow_mapping(vm_flags)) { 685 is_cow_mapping(vm_flags)) {
684 /* 686 /*
685 * COW mappings require pages in both parent 687 * COW mappings require pages in both parent
@@ -974,9 +976,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
974 if (pte_file(ptent)) { 976 if (pte_file(ptent)) {
975 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) 977 if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
976 print_bad_pte(vma, addr, ptent, NULL); 978 print_bad_pte(vma, addr, ptent, NULL);
977 } else if 979 } else {
978 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent)))) 980 swp_entry_t entry = pte_to_swp_entry(ptent);
979 print_bad_pte(vma, addr, ptent, NULL); 981
982 if (!non_swap_entry(entry))
983 rss[MM_SWAPENTS]--;
984 if (unlikely(!free_swap_and_cache(entry)))
985 print_bad_pte(vma, addr, ptent, NULL);
986 }
980 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 987 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
981 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 988 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
982 989
@@ -2692,6 +2699,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2692 */ 2699 */
2693 2700
2694 inc_mm_counter_fast(mm, MM_ANONPAGES); 2701 inc_mm_counter_fast(mm, MM_ANONPAGES);
2702 dec_mm_counter_fast(mm, MM_SWAPENTS);
2695 pte = mk_pte(page, vma->vm_page_prot); 2703 pte = mk_pte(page, vma->vm_page_prot);
2696 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { 2704 if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
2697 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2705 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
diff --git a/mm/rmap.c b/mm/rmap.c
index 73d0472884c2..5cb47111f79e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -840,6 +840,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
840 spin_unlock(&mmlist_lock); 840 spin_unlock(&mmlist_lock);
841 } 841 }
842 dec_mm_counter(mm, MM_ANONPAGES); 842 dec_mm_counter(mm, MM_ANONPAGES);
843 inc_mm_counter(mm, MM_SWAPENTS);
843 } else if (PAGE_MIGRATION) { 844 } else if (PAGE_MIGRATION) {
844 /* 845 /*
845 * Store the pfn of the page in a special migration 846 * Store the pfn of the page in a special migration
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 893984946a2c..187a21f8b7bd 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -840,6 +840,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
840 goto out; 840 goto out;
841 } 841 }
842 842
843 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
843 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 844 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
844 get_page(page); 845 get_page(page);
845 set_pte_at(vma->vm_mm, addr, pte, 846 set_pte_at(vma->vm_mm, addr, pte,