aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2009-01-08 07:04:47 -0500
committerDavid Howells <dhowells@redhat.com>2009-01-08 07:04:47 -0500
commit38f714795b7cf4103c54152200ca66b524f8ed6e (patch)
tree9378b4a9f8e862e3faa63b3874fc8917d4aad2ea /fs
parentdd8632a12e500a684478fea0951f380478d56fed (diff)
NOMMU: Improve procfs output using per-MM VMAs
Improve procfs output using per-MM VMAs for process memory accounting. Signed-off-by: David Howells <dhowells@redhat.com> Tested-by: Mike Frysinger <vapier.adi@gmail.com> Acked-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/proc/task_nommu.c32
1 files changed, 22 insertions, 10 deletions
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index ca4a48d0d311..343ea1216bc8 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -16,24 +16,31 @@
16void task_mem(struct seq_file *m, struct mm_struct *mm) 16void task_mem(struct seq_file *m, struct mm_struct *mm)
17{ 17{
18 struct vm_area_struct *vma; 18 struct vm_area_struct *vma;
19 struct vm_region *region;
19 struct rb_node *p; 20 struct rb_node *p;
20 unsigned long bytes = 0, sbytes = 0, slack = 0; 21 unsigned long bytes = 0, sbytes = 0, slack = 0, size;
21 22
22 down_read(&mm->mmap_sem); 23 down_read(&mm->mmap_sem);
23 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { 24 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
24 vma = rb_entry(p, struct vm_area_struct, vm_rb); 25 vma = rb_entry(p, struct vm_area_struct, vm_rb);
25 26
26 bytes += kobjsize(vma); 27 bytes += kobjsize(vma);
28
29 region = vma->vm_region;
30 if (region) {
31 size = kobjsize(region);
32 size += region->vm_end - region->vm_start;
33 } else {
34 size = vma->vm_end - vma->vm_start;
35 }
36
27 if (atomic_read(&mm->mm_count) > 1 || 37 if (atomic_read(&mm->mm_count) > 1 ||
28 vma->vm_region ||
29 vma->vm_flags & VM_MAYSHARE) { 38 vma->vm_flags & VM_MAYSHARE) {
30 sbytes += kobjsize((void *) vma->vm_start); 39 sbytes += size;
31 if (vma->vm_region)
32 sbytes += kobjsize(vma->vm_region);
33 } else { 40 } else {
34 bytes += kobjsize((void *) vma->vm_start); 41 bytes += size;
35 slack += kobjsize((void *) vma->vm_start) - 42 if (region)
36 (vma->vm_end - vma->vm_start); 43 slack = region->vm_end - vma->vm_end;
37 } 44 }
38 } 45 }
39 46
@@ -77,7 +84,7 @@ unsigned long task_vsize(struct mm_struct *mm)
77 down_read(&mm->mmap_sem); 84 down_read(&mm->mmap_sem);
78 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { 85 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
79 vma = rb_entry(p, struct vm_area_struct, vm_rb); 86 vma = rb_entry(p, struct vm_area_struct, vm_rb);
80 vsize += vma->vm_region->vm_end - vma->vm_region->vm_start; 87 vsize += vma->vm_end - vma->vm_start;
81 } 88 }
82 up_read(&mm->mmap_sem); 89 up_read(&mm->mmap_sem);
83 return vsize; 90 return vsize;
@@ -87,6 +94,7 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
87 int *data, int *resident) 94 int *data, int *resident)
88{ 95{
89 struct vm_area_struct *vma; 96 struct vm_area_struct *vma;
97 struct vm_region *region;
90 struct rb_node *p; 98 struct rb_node *p;
91 int size = kobjsize(mm); 99 int size = kobjsize(mm);
92 100
@@ -94,7 +102,11 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
94 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { 102 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
95 vma = rb_entry(p, struct vm_area_struct, vm_rb); 103 vma = rb_entry(p, struct vm_area_struct, vm_rb);
96 size += kobjsize(vma); 104 size += kobjsize(vma);
97 size += kobjsize((void *) vma->vm_start); 105 region = vma->vm_region;
106 if (region) {
107 size += kobjsize(region);
108 size += region->vm_end - region->vm_start;
109 }
98 } 110 }
99 111
100 size += (*text = mm->end_code - mm->start_code); 112 size += (*text = mm->end_code - mm->start_code);