aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-10-30 21:37:12 -0500
committerPaul Mackerras <paulus@samba.org>2005-10-30 21:37:12 -0500
commit23fd07750a789a66fe88cf173d52a18f1a387da4 (patch)
tree06fdd6df35fdb835abdaa9b754d62f6b84b97250 /fs/proc/task_mmu.c
parentbd787d438a59266af3c9f6351644c85ef1dd21fe (diff)
parented28f96ac1960f30f818374d65be71d2fdf811b0 (diff)
Merge ../linux-2.6 by hand
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c51
1 files changed, 31 insertions, 20 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index c7ef3e48e35b..d2fa42006d8f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -14,22 +14,41 @@
14char *task_mem(struct mm_struct *mm, char *buffer) 14char *task_mem(struct mm_struct *mm, char *buffer)
15{ 15{
16 unsigned long data, text, lib; 16 unsigned long data, text, lib;
17 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
18
19 /*
20 * Note: to minimize their overhead, mm maintains hiwater_vm and
21 * hiwater_rss only when about to *lower* total_vm or rss. Any
22 * collector of these hiwater stats must therefore get total_vm
23 * and rss too, which will usually be the higher. Barriers? not
24 * worth the effort, such snapshots can always be inconsistent.
25 */
26 hiwater_vm = total_vm = mm->total_vm;
27 if (hiwater_vm < mm->hiwater_vm)
28 hiwater_vm = mm->hiwater_vm;
29 hiwater_rss = total_rss = get_mm_rss(mm);
30 if (hiwater_rss < mm->hiwater_rss)
31 hiwater_rss = mm->hiwater_rss;
17 32
18 data = mm->total_vm - mm->shared_vm - mm->stack_vm; 33 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
19 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 34 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
20 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 35 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
21 buffer += sprintf(buffer, 36 buffer += sprintf(buffer,
37 "VmPeak:\t%8lu kB\n"
22 "VmSize:\t%8lu kB\n" 38 "VmSize:\t%8lu kB\n"
23 "VmLck:\t%8lu kB\n" 39 "VmLck:\t%8lu kB\n"
40 "VmHWM:\t%8lu kB\n"
24 "VmRSS:\t%8lu kB\n" 41 "VmRSS:\t%8lu kB\n"
25 "VmData:\t%8lu kB\n" 42 "VmData:\t%8lu kB\n"
26 "VmStk:\t%8lu kB\n" 43 "VmStk:\t%8lu kB\n"
27 "VmExe:\t%8lu kB\n" 44 "VmExe:\t%8lu kB\n"
28 "VmLib:\t%8lu kB\n" 45 "VmLib:\t%8lu kB\n"
29 "VmPTE:\t%8lu kB\n", 46 "VmPTE:\t%8lu kB\n",
30 (mm->total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), 47 hiwater_vm << (PAGE_SHIFT-10),
48 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
31 mm->locked_vm << (PAGE_SHIFT-10), 49 mm->locked_vm << (PAGE_SHIFT-10),
32 get_mm_counter(mm, rss) << (PAGE_SHIFT-10), 50 hiwater_rss << (PAGE_SHIFT-10),
51 total_rss << (PAGE_SHIFT-10),
33 data << (PAGE_SHIFT-10), 52 data << (PAGE_SHIFT-10),
34 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 53 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
35 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); 54 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
@@ -44,13 +63,11 @@ unsigned long task_vsize(struct mm_struct *mm)
44int task_statm(struct mm_struct *mm, int *shared, int *text, 63int task_statm(struct mm_struct *mm, int *shared, int *text,
45 int *data, int *resident) 64 int *data, int *resident)
46{ 65{
47 int rss = get_mm_counter(mm, rss); 66 *shared = get_mm_counter(mm, file_rss);
48
49 *shared = rss - get_mm_counter(mm, anon_rss);
50 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 67 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
51 >> PAGE_SHIFT; 68 >> PAGE_SHIFT;
52 *data = mm->total_vm - mm->shared_vm; 69 *data = mm->total_vm - mm->shared_vm;
53 *resident = rss; 70 *resident = *shared + get_mm_counter(mm, anon_rss);
54 return mm->total_vm; 71 return mm->total_vm;
55} 72}
56 73
@@ -186,13 +203,14 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
186 struct mem_size_stats *mss) 203 struct mem_size_stats *mss)
187{ 204{
188 pte_t *pte, ptent; 205 pte_t *pte, ptent;
206 spinlock_t *ptl;
189 unsigned long pfn; 207 unsigned long pfn;
190 struct page *page; 208 struct page *page;
191 209
192 pte = pte_offset_map(pmd, addr); 210 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
193 do { 211 do {
194 ptent = *pte; 212 ptent = *pte;
195 if (pte_none(ptent) || !pte_present(ptent)) 213 if (!pte_present(ptent))
196 continue; 214 continue;
197 215
198 mss->resident += PAGE_SIZE; 216 mss->resident += PAGE_SIZE;
@@ -213,8 +231,8 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
213 mss->private_clean += PAGE_SIZE; 231 mss->private_clean += PAGE_SIZE;
214 } 232 }
215 } while (pte++, addr += PAGE_SIZE, addr != end); 233 } while (pte++, addr += PAGE_SIZE, addr != end);
216 pte_unmap(pte - 1); 234 pte_unmap_unlock(pte - 1, ptl);
217 cond_resched_lock(&vma->vm_mm->page_table_lock); 235 cond_resched();
218} 236}
219 237
220static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud, 238static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
@@ -268,17 +286,11 @@ static inline void smaps_pgd_range(struct vm_area_struct *vma,
268static int show_smap(struct seq_file *m, void *v) 286static int show_smap(struct seq_file *m, void *v)
269{ 287{
270 struct vm_area_struct *vma = v; 288 struct vm_area_struct *vma = v;
271 struct mm_struct *mm = vma->vm_mm;
272 struct mem_size_stats mss; 289 struct mem_size_stats mss;
273 290
274 memset(&mss, 0, sizeof mss); 291 memset(&mss, 0, sizeof mss);
275 292 if (vma->vm_mm)
276 if (mm) {
277 spin_lock(&mm->page_table_lock);
278 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss); 293 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
279 spin_unlock(&mm->page_table_lock);
280 }
281
282 return show_map_internal(m, v, &mss); 294 return show_map_internal(m, v, &mss);
283} 295}
284 296
@@ -407,7 +419,6 @@ static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
407 for_each_node(i) 419 for_each_node(i)
408 md->node[i] =0; 420 md->node[i] =0;
409 421
410 spin_lock(&mm->page_table_lock);
411 for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { 422 for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
412 page = follow_page(mm, vaddr, 0); 423 page = follow_page(mm, vaddr, 0);
413 if (page) { 424 if (page) {
@@ -422,8 +433,8 @@ static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
422 md->anon++; 433 md->anon++;
423 md->node[page_to_nid(page)]++; 434 md->node[page_to_nid(page)]++;
424 } 435 }
436 cond_resched();
425 } 437 }
426 spin_unlock(&mm->page_table_lock);
427 return md; 438 return md;
428} 439}
429 440
@@ -469,7 +480,7 @@ static int show_numa_map(struct seq_file *m, void *v)
469 seq_printf(m, " interleave={"); 480 seq_printf(m, " interleave={");
470 first = 1; 481 first = 1;
471 for_each_node(n) { 482 for_each_node(n) {
472 if (test_bit(n, pol->v.nodes)) { 483 if (node_isset(n, pol->v.nodes)) {
473 if (!first) 484 if (!first)
474 seq_putc(m,','); 485 seq_putc(m,',');
475 else 486 else