aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/task_nommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc/task_nommu.c')
-rw-r--r--fs/proc/task_nommu.c108
1 files changed, 75 insertions, 33 deletions
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index d4a8be32b90..ca4a48d0d31 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -15,25 +15,25 @@
15 */ 15 */
16void task_mem(struct seq_file *m, struct mm_struct *mm) 16void task_mem(struct seq_file *m, struct mm_struct *mm)
17{ 17{
18 struct vm_list_struct *vml; 18 struct vm_area_struct *vma;
19 struct rb_node *p;
19 unsigned long bytes = 0, sbytes = 0, slack = 0; 20 unsigned long bytes = 0, sbytes = 0, slack = 0;
20 21
21 down_read(&mm->mmap_sem); 22 down_read(&mm->mmap_sem);
22 for (vml = mm->context.vmlist; vml; vml = vml->next) { 23 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
23 if (!vml->vma) 24 vma = rb_entry(p, struct vm_area_struct, vm_rb);
24 continue;
25 25
26 bytes += kobjsize(vml); 26 bytes += kobjsize(vma);
27 if (atomic_read(&mm->mm_count) > 1 || 27 if (atomic_read(&mm->mm_count) > 1 ||
28 atomic_read(&vml->vma->vm_usage) > 1 28 vma->vm_region ||
29 ) { 29 vma->vm_flags & VM_MAYSHARE) {
30 sbytes += kobjsize((void *) vml->vma->vm_start); 30 sbytes += kobjsize((void *) vma->vm_start);
31 sbytes += kobjsize(vml->vma); 31 if (vma->vm_region)
32 sbytes += kobjsize(vma->vm_region);
32 } else { 33 } else {
33 bytes += kobjsize((void *) vml->vma->vm_start); 34 bytes += kobjsize((void *) vma->vm_start);
34 bytes += kobjsize(vml->vma); 35 slack += kobjsize((void *) vma->vm_start) -
35 slack += kobjsize((void *) vml->vma->vm_start) - 36 (vma->vm_end - vma->vm_start);
36 (vml->vma->vm_end - vml->vma->vm_start);
37 } 37 }
38 } 38 }
39 39
@@ -70,13 +70,14 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
70 70
71unsigned long task_vsize(struct mm_struct *mm) 71unsigned long task_vsize(struct mm_struct *mm)
72{ 72{
73 struct vm_list_struct *tbp; 73 struct vm_area_struct *vma;
74 struct rb_node *p;
74 unsigned long vsize = 0; 75 unsigned long vsize = 0;
75 76
76 down_read(&mm->mmap_sem); 77 down_read(&mm->mmap_sem);
77 for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) { 78 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
78 if (tbp->vma) 79 vma = rb_entry(p, struct vm_area_struct, vm_rb);
79 vsize += kobjsize((void *) tbp->vma->vm_start); 80 vsize += vma->vm_region->vm_end - vma->vm_region->vm_start;
80 } 81 }
81 up_read(&mm->mmap_sem); 82 up_read(&mm->mmap_sem);
82 return vsize; 83 return vsize;
@@ -85,16 +86,15 @@ unsigned long task_vsize(struct mm_struct *mm)
85int task_statm(struct mm_struct *mm, int *shared, int *text, 86int task_statm(struct mm_struct *mm, int *shared, int *text,
86 int *data, int *resident) 87 int *data, int *resident)
87{ 88{
88 struct vm_list_struct *tbp; 89 struct vm_area_struct *vma;
90 struct rb_node *p;
89 int size = kobjsize(mm); 91 int size = kobjsize(mm);
90 92
91 down_read(&mm->mmap_sem); 93 down_read(&mm->mmap_sem);
92 for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) { 94 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
93 size += kobjsize(tbp); 95 vma = rb_entry(p, struct vm_area_struct, vm_rb);
94 if (tbp->vma) { 96 size += kobjsize(vma);
95 size += kobjsize(tbp->vma); 97 size += kobjsize((void *) vma->vm_start);
96 size += kobjsize((void *) tbp->vma->vm_start);
97 }
98 } 98 }
99 99
100 size += (*text = mm->end_code - mm->start_code); 100 size += (*text = mm->end_code - mm->start_code);
@@ -105,20 +105,62 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
105} 105}
106 106
107/* 107/*
108 * display a single VMA to a sequenced file
109 */
110static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
111{
112 unsigned long ino = 0;
113 struct file *file;
114 dev_t dev = 0;
115 int flags, len;
116
117 flags = vma->vm_flags;
118 file = vma->vm_file;
119
120 if (file) {
121 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
122 dev = inode->i_sb->s_dev;
123 ino = inode->i_ino;
124 }
125
126 seq_printf(m,
127 "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
128 vma->vm_start,
129 vma->vm_end,
130 flags & VM_READ ? 'r' : '-',
131 flags & VM_WRITE ? 'w' : '-',
132 flags & VM_EXEC ? 'x' : '-',
133 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
134 vma->vm_pgoff << PAGE_SHIFT,
135 MAJOR(dev), MINOR(dev), ino, &len);
136
137 if (file) {
138 len = 25 + sizeof(void *) * 6 - len;
139 if (len < 1)
140 len = 1;
141 seq_printf(m, "%*c", len, ' ');
142 seq_path(m, &file->f_path, "");
143 }
144
145 seq_putc(m, '\n');
146 return 0;
147}
148
149/*
108 * display mapping lines for a particular process's /proc/pid/maps 150 * display mapping lines for a particular process's /proc/pid/maps
109 */ 151 */
110static int show_map(struct seq_file *m, void *_vml) 152static int show_map(struct seq_file *m, void *_p)
111{ 153{
112 struct vm_list_struct *vml = _vml; 154 struct rb_node *p = _p;
113 155
114 return nommu_vma_show(m, vml->vma); 156 return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
115} 157}
116 158
117static void *m_start(struct seq_file *m, loff_t *pos) 159static void *m_start(struct seq_file *m, loff_t *pos)
118{ 160{
119 struct proc_maps_private *priv = m->private; 161 struct proc_maps_private *priv = m->private;
120 struct vm_list_struct *vml;
121 struct mm_struct *mm; 162 struct mm_struct *mm;
163 struct rb_node *p;
122 loff_t n = *pos; 164 loff_t n = *pos;
123 165
124 /* pin the task and mm whilst we play with them */ 166 /* pin the task and mm whilst we play with them */
@@ -134,9 +176,9 @@ static void *m_start(struct seq_file *m, loff_t *pos)
134 } 176 }
135 177
136 /* start from the Nth VMA */ 178 /* start from the Nth VMA */
137 for (vml = mm->context.vmlist; vml; vml = vml->next) 179 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
138 if (n-- == 0) 180 if (n-- == 0)
139 return vml; 181 return p;
140 return NULL; 182 return NULL;
141} 183}
142 184
@@ -152,12 +194,12 @@ static void m_stop(struct seq_file *m, void *_vml)
152 } 194 }
153} 195}
154 196
155static void *m_next(struct seq_file *m, void *_vml, loff_t *pos) 197static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
156{ 198{
157 struct vm_list_struct *vml = _vml; 199 struct rb_node *p = _p;
158 200
159 (*pos)++; 201 (*pos)++;
160 return vml ? vml->next : NULL; 202 return p ? rb_next(p) : NULL;
161} 203}
162 204
163static const struct seq_operations proc_pid_maps_ops = { 205static const struct seq_operations proc_pid_maps_ops = {