aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-09-28 08:29:59 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-09-28 08:29:59 -0400
commit185a257f2f73bcd89050ad02da5bedbc28fc43fa (patch)
tree5e32586114534ed3f2165614cba3d578f5d87307 /fs/proc
parent3f1a9aaeffd8d1cbc5ab9776c45cbd66af1c9699 (diff)
parenta77c64c1a641950626181b4857abb701d8f38ccc (diff)
Merge branch 'master' into gfs2
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/internal.h1
-rw-r--r--fs/proc/kcore.c6
-rw-r--r--fs/proc/nommu.c20
-rw-r--r--fs/proc/proc_misc.c11
-rw-r--r--fs/proc/task_mmu.c5
-rw-r--r--fs/proc/task_nommu.c74
6 files changed, 87 insertions, 30 deletions
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 146a434ba944..987c773dbb20 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -28,6 +28,7 @@ do { \
28 (vmi)->largest_chunk = 0; \ 28 (vmi)->largest_chunk = 0; \
29} while(0) 29} while(0)
30 30
31extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *);
31#endif 32#endif
32 33
33extern void create_seq_entry(char *name, mode_t mode, const struct file_operations *f); 34extern void create_seq_entry(char *name, mode_t mode, const struct file_operations *f);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 6a984f64edd7..3ceff3857272 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -279,12 +279,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
279 tsz = elf_buflen - *fpos; 279 tsz = elf_buflen - *fpos;
280 if (buflen < tsz) 280 if (buflen < tsz)
281 tsz = buflen; 281 tsz = buflen;
282 elf_buf = kmalloc(elf_buflen, GFP_ATOMIC); 282 elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
283 if (!elf_buf) { 283 if (!elf_buf) {
284 read_unlock(&kclist_lock); 284 read_unlock(&kclist_lock);
285 return -ENOMEM; 285 return -ENOMEM;
286 } 286 }
287 memset(elf_buf, 0, elf_buflen);
288 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen); 287 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
289 read_unlock(&kclist_lock); 288 read_unlock(&kclist_lock);
290 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) { 289 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
@@ -330,10 +329,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
330 unsigned long curstart = start; 329 unsigned long curstart = start;
331 unsigned long cursize = tsz; 330 unsigned long cursize = tsz;
332 331
333 elf_buf = kmalloc(tsz, GFP_KERNEL); 332 elf_buf = kzalloc(tsz, GFP_KERNEL);
334 if (!elf_buf) 333 if (!elf_buf)
335 return -ENOMEM; 334 return -ENOMEM;
336 memset(elf_buf, 0, tsz);
337 335
338 read_lock(&vmlist_lock); 336 read_lock(&vmlist_lock);
339 for (m=vmlist; m && cursize; m=m->next) { 337 for (m=vmlist; m && cursize; m=m->next) {
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index cff10ab1af63..d7dbdf9e0f49 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -33,19 +33,15 @@
33#include "internal.h" 33#include "internal.h"
34 34
35/* 35/*
36 * display a list of all the VMAs the kernel knows about 36 * display a single VMA to a sequenced file
37 * - nommu kernals have a single flat list
38 */ 37 */
39static int nommu_vma_list_show(struct seq_file *m, void *v) 38int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
40{ 39{
41 struct vm_area_struct *vma;
42 unsigned long ino = 0; 40 unsigned long ino = 0;
43 struct file *file; 41 struct file *file;
44 dev_t dev = 0; 42 dev_t dev = 0;
45 int flags, len; 43 int flags, len;
46 44
47 vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb);
48
49 flags = vma->vm_flags; 45 flags = vma->vm_flags;
50 file = vma->vm_file; 46 file = vma->vm_file;
51 47
@@ -78,6 +74,18 @@ static int nommu_vma_list_show(struct seq_file *m, void *v)
78 return 0; 74 return 0;
79} 75}
80 76
77/*
78 * display a list of all the VMAs the kernel knows about
79 * - nommu kernals have a single flat list
80 */
81static int nommu_vma_list_show(struct seq_file *m, void *v)
82{
83 struct vm_area_struct *vma;
84
85 vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb);
86 return nommu_vma_show(m, vma);
87}
88
81static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos) 89static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos)
82{ 90{
83 struct rb_node *_rb; 91 struct rb_node *_rb;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 942156225447..5bbd60896050 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -157,10 +157,12 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
157 "SwapCached: %8lu kB\n" 157 "SwapCached: %8lu kB\n"
158 "Active: %8lu kB\n" 158 "Active: %8lu kB\n"
159 "Inactive: %8lu kB\n" 159 "Inactive: %8lu kB\n"
160#ifdef CONFIG_HIGHMEM
160 "HighTotal: %8lu kB\n" 161 "HighTotal: %8lu kB\n"
161 "HighFree: %8lu kB\n" 162 "HighFree: %8lu kB\n"
162 "LowTotal: %8lu kB\n" 163 "LowTotal: %8lu kB\n"
163 "LowFree: %8lu kB\n" 164 "LowFree: %8lu kB\n"
165#endif
164 "SwapTotal: %8lu kB\n" 166 "SwapTotal: %8lu kB\n"
165 "SwapFree: %8lu kB\n" 167 "SwapFree: %8lu kB\n"
166 "Dirty: %8lu kB\n" 168 "Dirty: %8lu kB\n"
@@ -168,6 +170,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
168 "AnonPages: %8lu kB\n" 170 "AnonPages: %8lu kB\n"
169 "Mapped: %8lu kB\n" 171 "Mapped: %8lu kB\n"
170 "Slab: %8lu kB\n" 172 "Slab: %8lu kB\n"
173 "SReclaimable: %8lu kB\n"
174 "SUnreclaim: %8lu kB\n"
171 "PageTables: %8lu kB\n" 175 "PageTables: %8lu kB\n"
172 "NFS_Unstable: %8lu kB\n" 176 "NFS_Unstable: %8lu kB\n"
173 "Bounce: %8lu kB\n" 177 "Bounce: %8lu kB\n"
@@ -183,17 +187,22 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
183 K(total_swapcache_pages), 187 K(total_swapcache_pages),
184 K(active), 188 K(active),
185 K(inactive), 189 K(inactive),
190#ifdef CONFIG_HIGHMEM
186 K(i.totalhigh), 191 K(i.totalhigh),
187 K(i.freehigh), 192 K(i.freehigh),
188 K(i.totalram-i.totalhigh), 193 K(i.totalram-i.totalhigh),
189 K(i.freeram-i.freehigh), 194 K(i.freeram-i.freehigh),
195#endif
190 K(i.totalswap), 196 K(i.totalswap),
191 K(i.freeswap), 197 K(i.freeswap),
192 K(global_page_state(NR_FILE_DIRTY)), 198 K(global_page_state(NR_FILE_DIRTY)),
193 K(global_page_state(NR_WRITEBACK)), 199 K(global_page_state(NR_WRITEBACK)),
194 K(global_page_state(NR_ANON_PAGES)), 200 K(global_page_state(NR_ANON_PAGES)),
195 K(global_page_state(NR_FILE_MAPPED)), 201 K(global_page_state(NR_FILE_MAPPED)),
196 K(global_page_state(NR_SLAB)), 202 K(global_page_state(NR_SLAB_RECLAIMABLE) +
203 global_page_state(NR_SLAB_UNRECLAIMABLE)),
204 K(global_page_state(NR_SLAB_RECLAIMABLE)),
205 K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
197 K(global_page_state(NR_PAGETABLE)), 206 K(global_page_state(NR_PAGETABLE)),
198 K(global_page_state(NR_UNSTABLE_NFS)), 207 K(global_page_state(NR_UNSTABLE_NFS)),
199 K(global_page_state(NR_BOUNCE)), 208 K(global_page_state(NR_BOUNCE)),
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 0a163a4f7764..6b769afac55a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -122,11 +122,6 @@ struct mem_size_stats
122 unsigned long private_dirty; 122 unsigned long private_dirty;
123}; 123};
124 124
125__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
126{
127 return NULL;
128}
129
130static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) 125static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
131{ 126{
132 struct proc_maps_private *priv = m->private; 127 struct proc_maps_private *priv = m->private;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 4616ed50ffcd..091aa8e48e02 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -138,25 +138,63 @@ out:
138} 138}
139 139
140/* 140/*
141 * Albert D. Cahalan suggested to fake entries for the traditional 141 * display mapping lines for a particular process's /proc/pid/maps
142 * sections here. This might be worth investigating.
143 */ 142 */
144static int show_map(struct seq_file *m, void *v) 143static int show_map(struct seq_file *m, void *_vml)
145{ 144{
146 return 0; 145 struct vm_list_struct *vml = _vml;
146 return nommu_vma_show(m, vml->vma);
147} 147}
148
148static void *m_start(struct seq_file *m, loff_t *pos) 149static void *m_start(struct seq_file *m, loff_t *pos)
149{ 150{
151 struct proc_maps_private *priv = m->private;
152 struct vm_list_struct *vml;
153 struct mm_struct *mm;
154 loff_t n = *pos;
155
156 /* pin the task and mm whilst we play with them */
157 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
158 if (!priv->task)
159 return NULL;
160
161 mm = get_task_mm(priv->task);
162 if (!mm) {
163 put_task_struct(priv->task);
164 priv->task = NULL;
165 return NULL;
166 }
167
168 down_read(&mm->mmap_sem);
169
170 /* start from the Nth VMA */
171 for (vml = mm->context.vmlist; vml; vml = vml->next)
172 if (n-- == 0)
173 return vml;
150 return NULL; 174 return NULL;
151} 175}
152static void m_stop(struct seq_file *m, void *v) 176
177static void m_stop(struct seq_file *m, void *_vml)
153{ 178{
179 struct proc_maps_private *priv = m->private;
180
181 if (priv->task) {
182 struct mm_struct *mm = priv->task->mm;
183 up_read(&mm->mmap_sem);
184 mmput(mm);
185 put_task_struct(priv->task);
186 }
154} 187}
155static void *m_next(struct seq_file *m, void *v, loff_t *pos) 188
189static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
156{ 190{
157 return NULL; 191 struct vm_list_struct *vml = _vml;
192
193 (*pos)++;
194 return vml ? vml->next : NULL;
158} 195}
159static struct seq_operations proc_pid_maps_op = { 196
197static struct seq_operations proc_pid_maps_ops = {
160 .start = m_start, 198 .start = m_start,
161 .next = m_next, 199 .next = m_next,
162 .stop = m_stop, 200 .stop = m_stop,
@@ -165,11 +203,19 @@ static struct seq_operations proc_pid_maps_op = {
165 203
166static int maps_open(struct inode *inode, struct file *file) 204static int maps_open(struct inode *inode, struct file *file)
167{ 205{
168 int ret; 206 struct proc_maps_private *priv;
169 ret = seq_open(file, &proc_pid_maps_op); 207 int ret = -ENOMEM;
170 if (!ret) { 208
171 struct seq_file *m = file->private_data; 209 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
172 m->private = NULL; 210 if (priv) {
211 priv->pid = proc_pid(inode);
212 ret = seq_open(file, &proc_pid_maps_ops);
213 if (!ret) {
214 struct seq_file *m = file->private_data;
215 m->private = priv;
216 } else {
217 kfree(priv);
218 }
173 } 219 }
174 return ret; 220 return ret;
175} 221}
@@ -178,6 +224,6 @@ struct file_operations proc_maps_operations = {
178 .open = maps_open, 224 .open = maps_open,
179 .read = seq_read, 225 .read = seq_read,
180 .llseek = seq_lseek, 226 .llseek = seq_lseek,
181 .release = seq_release, 227 .release = seq_release_private,
182}; 228};
183 229