aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2007-05-06 17:49:24 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:52 -0400
commitb813e931b4c8235bb42e301096ea97dbdee3e8fe (patch)
treef8182687bffe8e3b95bac69b2cc7fdfe674ddc53 /fs/proc/task_mmu.c
parentf79f177c25016647cc92ffac8afa7cb96ce47011 (diff)
smaps: add clear_refs file to clear reference
Adds /proc/pid/clear_refs. When any non-zero number is written to this file, pte_mkold() and ClearPageReferenced() is called for each pte and its corresponding page, respectively, in that task's VMAs. This file is only writable by the user who owns the task. It is now possible to measure _approximately_ how much memory a task is using by clearing the reference bits with echo 1 > /proc/pid/clear_refs and checking the reference count for each VMA from the /proc/pid/smaps output at a measured time interval. For example, to observe the approximate change in memory footprint for a task, write a script that clears the references (echo 1 > /proc/pid/clear_refs), sleeps, and then greps for Pgs_Referenced and extracts the size in kB. Add the sizes for each VMA together for the total referenced footprint. Moments later, repeat the process and observe the difference. For example, using an efficient Mozilla: accumulated time referenced memory ---------------- ----------------- 0 s 408 kB 1 s 408 kB 2 s 556 kB 3 s 1028 kB 4 s 872 kB 5 s 1956 kB 6 s 416 kB 7 s 1560 kB 8 s 2336 kB 9 s 1044 kB 10 s 416 kB This is a valuable tool to get an approximate measurement of the memory footprint for a task. Cc: Hugh Dickins <hugh@veritas.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: David Rientjes <rientjes@google.com> [akpm@linux-foundation.org: build fixes] [mpm@selenic.com: rename for_each_pmd] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c79
1 files changed, 63 insertions, 16 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 199088ee969b..4008c060f7ef 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -195,7 +195,7 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
195 "Shared_Dirty: %8lu kB\n" 195 "Shared_Dirty: %8lu kB\n"
196 "Private_Clean: %8lu kB\n" 196 "Private_Clean: %8lu kB\n"
197 "Private_Dirty: %8lu kB\n" 197 "Private_Dirty: %8lu kB\n"
198 "Pgs_Referenced: %8lu kB\n", 198 "Referenced: %8lu kB\n",
199 (vma->vm_end - vma->vm_start) >> 10, 199 (vma->vm_end - vma->vm_start) >> 10,
200 mss->resident >> 10, 200 mss->resident >> 10,
201 mss->shared_clean >> 10, 201 mss->shared_clean >> 10,
@@ -214,9 +214,9 @@ static int show_map(struct seq_file *m, void *v)
214 return show_map_internal(m, v, NULL); 214 return show_map_internal(m, v, NULL);
215} 215}
216 216
217static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd, 217static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
218 unsigned long addr, unsigned long end, 218 unsigned long addr, unsigned long end,
219 void *private) 219 void *private)
220{ 220{
221 struct mem_size_stats *mss = private; 221 struct mem_size_stats *mss = private;
222 pte_t *pte, ptent; 222 pte_t *pte, ptent;
@@ -254,8 +254,34 @@ static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd,
254 cond_resched(); 254 cond_resched();
255} 255}
256 256
257static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud, 257static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
258 unsigned long addr, unsigned long end) 258 unsigned long addr, unsigned long end,
259 void *private)
260{
261 pte_t *pte, ptent;
262 spinlock_t *ptl;
263 struct page *page;
264
265 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
266 for (; addr != end; pte++, addr += PAGE_SIZE) {
267 ptent = *pte;
268 if (!pte_present(ptent))
269 continue;
270
271 page = vm_normal_page(vma, addr, ptent);
272 if (!page)
273 continue;
274
275 /* Clear accessed and referenced bits. */
276 ptep_test_and_clear_young(vma, addr, pte);
277 ClearPageReferenced(page);
278 }
279 pte_unmap_unlock(pte - 1, ptl);
280 cond_resched();
281}
282
283static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
284 unsigned long addr, unsigned long end)
259{ 285{
260 pmd_t *pmd; 286 pmd_t *pmd;
261 unsigned long next; 287 unsigned long next;
@@ -269,8 +295,8 @@ static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud,
269 } 295 }
270} 296}
271 297
272static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd, 298static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
273 unsigned long addr, unsigned long end) 299 unsigned long addr, unsigned long end)
274{ 300{
275 pud_t *pud; 301 pud_t *pud;
276 unsigned long next; 302 unsigned long next;
@@ -280,15 +306,24 @@ static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd,
280 next = pud_addr_end(addr, end); 306 next = pud_addr_end(addr, end);
281 if (pud_none_or_clear_bad(pud)) 307 if (pud_none_or_clear_bad(pud))
282 continue; 308 continue;
283 for_each_pmd_in_pud(walker, pud, addr, next); 309 walk_pmd_range(walker, pud, addr, next);
284 } 310 }
285} 311}
286 312
287static inline void for_each_pmd(struct vm_area_struct *vma, 313/*
288 void (*action)(struct vm_area_struct *, pmd_t *, 314 * walk_page_range - walk the page tables of a VMA with a callback
289 unsigned long, unsigned long, 315 * @vma - VMA to walk
290 void *), 316 * @action - callback invoked for every bottom-level (PTE) page table
291 void *private) 317 * @private - private data passed to the callback function
318 *
319 * Recursively walk the page table for the memory area in a VMA, calling
320 * a callback for every bottom-level (PTE) page table.
321 */
322static inline void walk_page_range(struct vm_area_struct *vma,
323 void (*action)(struct vm_area_struct *,
324 pmd_t *, unsigned long,
325 unsigned long, void *),
326 void *private)
292{ 327{
293 unsigned long addr = vma->vm_start; 328 unsigned long addr = vma->vm_start;
294 unsigned long end = vma->vm_end; 329 unsigned long end = vma->vm_end;
@@ -305,7 +340,7 @@ static inline void for_each_pmd(struct vm_area_struct *vma,
305 next = pgd_addr_end(addr, end); 340 next = pgd_addr_end(addr, end);
306 if (pgd_none_or_clear_bad(pgd)) 341 if (pgd_none_or_clear_bad(pgd))
307 continue; 342 continue;
308 for_each_pud_in_pgd(&walker, pgd, addr, next); 343 walk_pud_range(&walker, pgd, addr, next);
309 } 344 }
310} 345}
311 346
@@ -316,10 +351,22 @@ static int show_smap(struct seq_file *m, void *v)
316 351
317 memset(&mss, 0, sizeof mss); 352 memset(&mss, 0, sizeof mss);
318 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 353 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
319 for_each_pmd(vma, smaps_one_pmd, &mss); 354 walk_page_range(vma, smaps_pte_range, &mss);
320 return show_map_internal(m, v, &mss); 355 return show_map_internal(m, v, &mss);
321} 356}
322 357
358void clear_refs_smap(struct mm_struct *mm)
359{
360 struct vm_area_struct *vma;
361
362 down_read(&mm->mmap_sem);
363 for (vma = mm->mmap; vma; vma = vma->vm_next)
364 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
365 walk_page_range(vma, clear_refs_pte_range, NULL);
366 flush_tlb_mm(mm);
367 up_read(&mm->mmap_sem);
368}
369
323static void *m_start(struct seq_file *m, loff_t *pos) 370static void *m_start(struct seq_file *m, loff_t *pos)
324{ 371{
325 struct proc_maps_private *priv = m->private; 372 struct proc_maps_private *priv = m->private;