diff options
| author | Fengguang Wu <wfg@mail.ustc.edu.cn> | 2008-02-05 01:28:56 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 12:44:16 -0500 |
| commit | ec4dd3eb35759f9fbeb5c1abb01403b2fde64cc9 (patch) | |
| tree | 0eaf4d91180556df61da6300463d946390ce55fb | |
| parent | 61d5048f149572434daee0cce5e1374a8a7cf3e8 (diff) | |
maps4: add proportional set size accounting in smaps
The "proportional set size" (PSS) of a process is the count of pages it has
in memory, where each page is divided by the number of processes sharing
it. So if a process has 1000 pages all to itself, and 1000 shared with one
other process, its PSS will be 1500.
- lwn.net: "ELC: How much memory are applications really using?"
The PSS proposed by Matt Mackall is a very nice metic for measuring an
process's memory footprint. So collect and export it via
/proc/<pid>/smaps.
Matt Mackall's pagemap/kpagemap and John Berthels's exmap can also do the
job. They are comprehensive tools. But for PSS, let's do it in the simple
way.
Cc: John Berthels <jjberthels@gmail.com>
Cc: Bernardo Innocenti <bernie@codewiz.org>
Cc: Padraig Brady <P@draigBrady.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Signed-off-by: Matt Mackall <mpm@selenic.com>
Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | fs/proc/task_mmu.c | 28 |
1 files changed, 27 insertions, 1 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 8043a3eab52c..8952ce70315e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
| @@ -114,6 +114,25 @@ static void pad_len_spaces(struct seq_file *m, int len) | |||
| 114 | seq_printf(m, "%*c", len, ' '); | 114 | seq_printf(m, "%*c", len, ' '); |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | /* | ||
| 118 | * Proportional Set Size(PSS): my share of RSS. | ||
| 119 | * | ||
| 120 | * PSS of a process is the count of pages it has in memory, where each | ||
| 121 | * page is divided by the number of processes sharing it. So if a | ||
| 122 | * process has 1000 pages all to itself, and 1000 shared with one other | ||
| 123 | * process, its PSS will be 1500. | ||
| 124 | * | ||
| 125 | * To keep (accumulated) division errors low, we adopt a 64bit | ||
| 126 | * fixed-point pss counter to minimize division errors. So (pss >> | ||
| 127 | * PSS_SHIFT) would be the real byte count. | ||
| 128 | * | ||
| 129 | * A shift of 12 before division means (assuming 4K page size): | ||
| 130 | * - 1M 3-user-pages add up to 8KB errors; | ||
| 131 | * - supports mapcount up to 2^24, or 16M; | ||
| 132 | * - supports PSS up to 2^52 bytes, or 4PB. | ||
| 133 | */ | ||
| 134 | #define PSS_SHIFT 12 | ||
| 135 | |||
| 117 | struct mem_size_stats | 136 | struct mem_size_stats |
| 118 | { | 137 | { |
| 119 | unsigned long resident; | 138 | unsigned long resident; |
| @@ -122,6 +141,7 @@ struct mem_size_stats | |||
| 122 | unsigned long private_clean; | 141 | unsigned long private_clean; |
| 123 | unsigned long private_dirty; | 142 | unsigned long private_dirty; |
| 124 | unsigned long referenced; | 143 | unsigned long referenced; |
| 144 | u64 pss; | ||
| 125 | }; | 145 | }; |
| 126 | 146 | ||
| 127 | struct pmd_walker { | 147 | struct pmd_walker { |
| @@ -195,6 +215,7 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats | |||
| 195 | seq_printf(m, | 215 | seq_printf(m, |
| 196 | "Size: %8lu kB\n" | 216 | "Size: %8lu kB\n" |
| 197 | "Rss: %8lu kB\n" | 217 | "Rss: %8lu kB\n" |
| 218 | "Pss: %8lu kB\n" | ||
| 198 | "Shared_Clean: %8lu kB\n" | 219 | "Shared_Clean: %8lu kB\n" |
| 199 | "Shared_Dirty: %8lu kB\n" | 220 | "Shared_Dirty: %8lu kB\n" |
| 200 | "Private_Clean: %8lu kB\n" | 221 | "Private_Clean: %8lu kB\n" |
| @@ -202,6 +223,7 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats | |||
| 202 | "Referenced: %8lu kB\n", | 223 | "Referenced: %8lu kB\n", |
| 203 | (vma->vm_end - vma->vm_start) >> 10, | 224 | (vma->vm_end - vma->vm_start) >> 10, |
| 204 | mss->resident >> 10, | 225 | mss->resident >> 10, |
| 226 | (unsigned long)(mss->pss >> (10 + PSS_SHIFT)), | ||
| 205 | mss->shared_clean >> 10, | 227 | mss->shared_clean >> 10, |
| 206 | mss->shared_dirty >> 10, | 228 | mss->shared_dirty >> 10, |
| 207 | mss->private_clean >> 10, | 229 | mss->private_clean >> 10, |
| @@ -226,6 +248,7 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 226 | pte_t *pte, ptent; | 248 | pte_t *pte, ptent; |
| 227 | spinlock_t *ptl; | 249 | spinlock_t *ptl; |
| 228 | struct page *page; | 250 | struct page *page; |
| 251 | int mapcount; | ||
| 229 | 252 | ||
| 230 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 253 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
| 231 | for (; addr != end; pte++, addr += PAGE_SIZE) { | 254 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
| @@ -242,16 +265,19 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 242 | /* Accumulate the size in pages that have been accessed. */ | 265 | /* Accumulate the size in pages that have been accessed. */ |
| 243 | if (pte_young(ptent) || PageReferenced(page)) | 266 | if (pte_young(ptent) || PageReferenced(page)) |
| 244 | mss->referenced += PAGE_SIZE; | 267 | mss->referenced += PAGE_SIZE; |
| 245 | if (page_mapcount(page) >= 2) { | 268 | mapcount = page_mapcount(page); |
| 269 | if (mapcount >= 2) { | ||
| 246 | if (pte_dirty(ptent)) | 270 | if (pte_dirty(ptent)) |
| 247 | mss->shared_dirty += PAGE_SIZE; | 271 | mss->shared_dirty += PAGE_SIZE; |
| 248 | else | 272 | else |
| 249 | mss->shared_clean += PAGE_SIZE; | 273 | mss->shared_clean += PAGE_SIZE; |
| 274 | mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; | ||
| 250 | } else { | 275 | } else { |
| 251 | if (pte_dirty(ptent)) | 276 | if (pte_dirty(ptent)) |
| 252 | mss->private_dirty += PAGE_SIZE; | 277 | mss->private_dirty += PAGE_SIZE; |
| 253 | else | 278 | else |
| 254 | mss->private_clean += PAGE_SIZE; | 279 | mss->private_clean += PAGE_SIZE; |
| 280 | mss->pss += (PAGE_SIZE << PSS_SHIFT); | ||
| 255 | } | 281 | } |
| 256 | } | 282 | } |
| 257 | pte_unmap_unlock(pte - 1, ptl); | 283 | pte_unmap_unlock(pte - 1, ptl); |
