aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/base.c61
-rw-r--r--fs/proc/task_mmu.c225
2 files changed, 244 insertions, 42 deletions
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b796bf90a0b1..520978e49e92 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -11,6 +11,40 @@
11 * go into icache. We cache the reference to task_struct upon lookup too. 11 * go into icache. We cache the reference to task_struct upon lookup too.
12 * Eventually it should become a filesystem in its own. We don't use the 12 * Eventually it should become a filesystem in its own. We don't use the
13 * rest of procfs anymore. 13 * rest of procfs anymore.
14 *
15 *
16 * Changelog:
17 * 17-Jan-2005
18 * Allan Bezerra
19 * Bruna Moreira <bruna.moreira@indt.org.br>
20 * Edjard Mota <edjard.mota@indt.org.br>
21 * Ilias Biris <ilias.biris@indt.org.br>
22 * Mauricio Lin <mauricio.lin@indt.org.br>
23 *
24 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
25 *
26 * A new process specific entry (smaps) included in /proc. It shows the
27 * size of rss for each memory area. The maps entry lacks information
28 * about physical memory size (rss) for each mapped file, i.e.,
29 * rss information for executables and library files.
30 * This additional information is useful for any tools that need to know
31 * about physical memory consumption for a process specific library.
32 *
33 * Changelog:
34 * 21-Feb-2005
35 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT
36 * Pud inclusion in the page table walking.
37 *
38 * ChangeLog:
39 * 10-Mar-2005
40 * 10LE Instituto Nokia de Tecnologia - INdT:
41 * A better way to walks through the page table as suggested by Hugh Dickins.
42 *
43 * Simo Piiroinen <simo.piiroinen@nokia.com>:
44 * Smaps information related to shared, private, clean and dirty pages.
45 *
46 * Paul Mundt <paul.mundt@nokia.com>:
47 * Overall revision about smaps.
14 */ 48 */
15 49
16#include <asm/uaccess.h> 50#include <asm/uaccess.h>
@@ -68,6 +102,7 @@ enum pid_directory_inos {
68 PROC_TGID_NUMA_MAPS, 102 PROC_TGID_NUMA_MAPS,
69 PROC_TGID_MOUNTS, 103 PROC_TGID_MOUNTS,
70 PROC_TGID_WCHAN, 104 PROC_TGID_WCHAN,
105 PROC_TGID_SMAPS,
71#ifdef CONFIG_SCHEDSTATS 106#ifdef CONFIG_SCHEDSTATS
72 PROC_TGID_SCHEDSTAT, 107 PROC_TGID_SCHEDSTAT,
73#endif 108#endif
@@ -106,6 +141,7 @@ enum pid_directory_inos {
106 PROC_TID_NUMA_MAPS, 141 PROC_TID_NUMA_MAPS,
107 PROC_TID_MOUNTS, 142 PROC_TID_MOUNTS,
108 PROC_TID_WCHAN, 143 PROC_TID_WCHAN,
144 PROC_TID_SMAPS,
109#ifdef CONFIG_SCHEDSTATS 145#ifdef CONFIG_SCHEDSTATS
110 PROC_TID_SCHEDSTAT, 146 PROC_TID_SCHEDSTAT,
111#endif 147#endif
@@ -157,6 +193,7 @@ static struct pid_entry tgid_base_stuff[] = {
157 E(PROC_TGID_ROOT, "root", S_IFLNK|S_IRWXUGO), 193 E(PROC_TGID_ROOT, "root", S_IFLNK|S_IRWXUGO),
158 E(PROC_TGID_EXE, "exe", S_IFLNK|S_IRWXUGO), 194 E(PROC_TGID_EXE, "exe", S_IFLNK|S_IRWXUGO),
159 E(PROC_TGID_MOUNTS, "mounts", S_IFREG|S_IRUGO), 195 E(PROC_TGID_MOUNTS, "mounts", S_IFREG|S_IRUGO),
196 E(PROC_TGID_SMAPS, "smaps", S_IFREG|S_IRUGO),
160#ifdef CONFIG_SECURITY 197#ifdef CONFIG_SECURITY
161 E(PROC_TGID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO), 198 E(PROC_TGID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO),
162#endif 199#endif
@@ -196,6 +233,7 @@ static struct pid_entry tid_base_stuff[] = {
196 E(PROC_TID_ROOT, "root", S_IFLNK|S_IRWXUGO), 233 E(PROC_TID_ROOT, "root", S_IFLNK|S_IRWXUGO),
197 E(PROC_TID_EXE, "exe", S_IFLNK|S_IRWXUGO), 234 E(PROC_TID_EXE, "exe", S_IFLNK|S_IRWXUGO),
198 E(PROC_TID_MOUNTS, "mounts", S_IFREG|S_IRUGO), 235 E(PROC_TID_MOUNTS, "mounts", S_IFREG|S_IRUGO),
236 E(PROC_TID_SMAPS, "smaps", S_IFREG|S_IRUGO),
199#ifdef CONFIG_SECURITY 237#ifdef CONFIG_SECURITY
200 E(PROC_TID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO), 238 E(PROC_TID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO),
201#endif 239#endif
@@ -544,6 +582,25 @@ static struct file_operations proc_numa_maps_operations = {
544}; 582};
545#endif 583#endif
546 584
585extern struct seq_operations proc_pid_smaps_op;
586static int smaps_open(struct inode *inode, struct file *file)
587{
588 struct task_struct *task = proc_task(inode);
589 int ret = seq_open(file, &proc_pid_smaps_op);
590 if (!ret) {
591 struct seq_file *m = file->private_data;
592 m->private = task;
593 }
594 return ret;
595}
596
597static struct file_operations proc_smaps_operations = {
598 .open = smaps_open,
599 .read = seq_read,
600 .llseek = seq_lseek,
601 .release = seq_release,
602};
603
547extern struct seq_operations mounts_op; 604extern struct seq_operations mounts_op;
548static int mounts_open(struct inode *inode, struct file *file) 605static int mounts_open(struct inode *inode, struct file *file)
549{ 606{
@@ -1574,6 +1631,10 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
1574 case PROC_TGID_MOUNTS: 1631 case PROC_TGID_MOUNTS:
1575 inode->i_fop = &proc_mounts_operations; 1632 inode->i_fop = &proc_mounts_operations;
1576 break; 1633 break;
1634 case PROC_TID_SMAPS:
1635 case PROC_TGID_SMAPS:
1636 inode->i_fop = &proc_smaps_operations;
1637 break;
1577#ifdef CONFIG_SECURITY 1638#ifdef CONFIG_SECURITY
1578 case PROC_TID_ATTR: 1639 case PROC_TID_ATTR:
1579 inode->i_nlink = 2; 1640 inode->i_nlink = 2;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 64e84cadfa3c..c7ef3e48e35b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -2,10 +2,13 @@
2#include <linux/hugetlb.h> 2#include <linux/hugetlb.h>
3#include <linux/mount.h> 3#include <linux/mount.h>
4#include <linux/seq_file.h> 4#include <linux/seq_file.h>
5#include <linux/highmem.h>
5#include <linux/pagemap.h> 6#include <linux/pagemap.h>
6#include <linux/mempolicy.h> 7#include <linux/mempolicy.h>
8
7#include <asm/elf.h> 9#include <asm/elf.h>
8#include <asm/uaccess.h> 10#include <asm/uaccess.h>
11#include <asm/tlbflush.h>
9#include "internal.h" 12#include "internal.h"
10 13
11char *task_mem(struct mm_struct *mm, char *buffer) 14char *task_mem(struct mm_struct *mm, char *buffer)
@@ -89,49 +92,58 @@ static void pad_len_spaces(struct seq_file *m, int len)
89 seq_printf(m, "%*c", len, ' '); 92 seq_printf(m, "%*c", len, ' ');
90} 93}
91 94
92static int show_map(struct seq_file *m, void *v) 95struct mem_size_stats
96{
97 unsigned long resident;
98 unsigned long shared_clean;
99 unsigned long shared_dirty;
100 unsigned long private_clean;
101 unsigned long private_dirty;
102};
103
104static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
93{ 105{
94 struct task_struct *task = m->private; 106 struct task_struct *task = m->private;
95 struct vm_area_struct *map = v; 107 struct vm_area_struct *vma = v;
96 struct mm_struct *mm = map->vm_mm; 108 struct mm_struct *mm = vma->vm_mm;
97 struct file *file = map->vm_file; 109 struct file *file = vma->vm_file;
98 int flags = map->vm_flags; 110 int flags = vma->vm_flags;
99 unsigned long ino = 0; 111 unsigned long ino = 0;
100 dev_t dev = 0; 112 dev_t dev = 0;
101 int len; 113 int len;
102 114
103 if (file) { 115 if (file) {
104 struct inode *inode = map->vm_file->f_dentry->d_inode; 116 struct inode *inode = vma->vm_file->f_dentry->d_inode;
105 dev = inode->i_sb->s_dev; 117 dev = inode->i_sb->s_dev;
106 ino = inode->i_ino; 118 ino = inode->i_ino;
107 } 119 }
108 120
109 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", 121 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
110 map->vm_start, 122 vma->vm_start,
111 map->vm_end, 123 vma->vm_end,
112 flags & VM_READ ? 'r' : '-', 124 flags & VM_READ ? 'r' : '-',
113 flags & VM_WRITE ? 'w' : '-', 125 flags & VM_WRITE ? 'w' : '-',
114 flags & VM_EXEC ? 'x' : '-', 126 flags & VM_EXEC ? 'x' : '-',
115 flags & VM_MAYSHARE ? 's' : 'p', 127 flags & VM_MAYSHARE ? 's' : 'p',
116 map->vm_pgoff << PAGE_SHIFT, 128 vma->vm_pgoff << PAGE_SHIFT,
117 MAJOR(dev), MINOR(dev), ino, &len); 129 MAJOR(dev), MINOR(dev), ino, &len);
118 130
119 /* 131 /*
120 * Print the dentry name for named mappings, and a 132 * Print the dentry name for named mappings, and a
121 * special [heap] marker for the heap: 133 * special [heap] marker for the heap:
122 */ 134 */
123 if (map->vm_file) { 135 if (file) {
124 pad_len_spaces(m, len); 136 pad_len_spaces(m, len);
125 seq_path(m, file->f_vfsmnt, file->f_dentry, ""); 137 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
126 } else { 138 } else {
127 if (mm) { 139 if (mm) {
128 if (map->vm_start <= mm->start_brk && 140 if (vma->vm_start <= mm->start_brk &&
129 map->vm_end >= mm->brk) { 141 vma->vm_end >= mm->brk) {
130 pad_len_spaces(m, len); 142 pad_len_spaces(m, len);
131 seq_puts(m, "[heap]"); 143 seq_puts(m, "[heap]");
132 } else { 144 } else {
133 if (map->vm_start <= mm->start_stack && 145 if (vma->vm_start <= mm->start_stack &&
134 map->vm_end >= mm->start_stack) { 146 vma->vm_end >= mm->start_stack) {
135 147
136 pad_len_spaces(m, len); 148 pad_len_spaces(m, len);
137 seq_puts(m, "[stack]"); 149 seq_puts(m, "[stack]");
@@ -143,24 +155,146 @@ static int show_map(struct seq_file *m, void *v)
143 } 155 }
144 } 156 }
145 seq_putc(m, '\n'); 157 seq_putc(m, '\n');
146 if (m->count < m->size) /* map is copied successfully */ 158
147 m->version = (map != get_gate_vma(task))? map->vm_start: 0; 159 if (mss)
160 seq_printf(m,
161 "Size: %8lu kB\n"
162 "Rss: %8lu kB\n"
163 "Shared_Clean: %8lu kB\n"
164 "Shared_Dirty: %8lu kB\n"
165 "Private_Clean: %8lu kB\n"
166 "Private_Dirty: %8lu kB\n",
167 (vma->vm_end - vma->vm_start) >> 10,
168 mss->resident >> 10,
169 mss->shared_clean >> 10,
170 mss->shared_dirty >> 10,
171 mss->private_clean >> 10,
172 mss->private_dirty >> 10);
173
174 if (m->count < m->size) /* vma is copied successfully */
175 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
148 return 0; 176 return 0;
149} 177}
150 178
179static int show_map(struct seq_file *m, void *v)
180{
181 return show_map_internal(m, v, 0);
182}
183
184static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
185 unsigned long addr, unsigned long end,
186 struct mem_size_stats *mss)
187{
188 pte_t *pte, ptent;
189 unsigned long pfn;
190 struct page *page;
191
192 pte = pte_offset_map(pmd, addr);
193 do {
194 ptent = *pte;
195 if (pte_none(ptent) || !pte_present(ptent))
196 continue;
197
198 mss->resident += PAGE_SIZE;
199 pfn = pte_pfn(ptent);
200 if (!pfn_valid(pfn))
201 continue;
202
203 page = pfn_to_page(pfn);
204 if (page_count(page) >= 2) {
205 if (pte_dirty(ptent))
206 mss->shared_dirty += PAGE_SIZE;
207 else
208 mss->shared_clean += PAGE_SIZE;
209 } else {
210 if (pte_dirty(ptent))
211 mss->private_dirty += PAGE_SIZE;
212 else
213 mss->private_clean += PAGE_SIZE;
214 }
215 } while (pte++, addr += PAGE_SIZE, addr != end);
216 pte_unmap(pte - 1);
217 cond_resched_lock(&vma->vm_mm->page_table_lock);
218}
219
220static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
221 unsigned long addr, unsigned long end,
222 struct mem_size_stats *mss)
223{
224 pmd_t *pmd;
225 unsigned long next;
226
227 pmd = pmd_offset(pud, addr);
228 do {
229 next = pmd_addr_end(addr, end);
230 if (pmd_none_or_clear_bad(pmd))
231 continue;
232 smaps_pte_range(vma, pmd, addr, next, mss);
233 } while (pmd++, addr = next, addr != end);
234}
235
236static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
237 unsigned long addr, unsigned long end,
238 struct mem_size_stats *mss)
239{
240 pud_t *pud;
241 unsigned long next;
242
243 pud = pud_offset(pgd, addr);
244 do {
245 next = pud_addr_end(addr, end);
246 if (pud_none_or_clear_bad(pud))
247 continue;
248 smaps_pmd_range(vma, pud, addr, next, mss);
249 } while (pud++, addr = next, addr != end);
250}
251
252static inline void smaps_pgd_range(struct vm_area_struct *vma,
253 unsigned long addr, unsigned long end,
254 struct mem_size_stats *mss)
255{
256 pgd_t *pgd;
257 unsigned long next;
258
259 pgd = pgd_offset(vma->vm_mm, addr);
260 do {
261 next = pgd_addr_end(addr, end);
262 if (pgd_none_or_clear_bad(pgd))
263 continue;
264 smaps_pud_range(vma, pgd, addr, next, mss);
265 } while (pgd++, addr = next, addr != end);
266}
267
268static int show_smap(struct seq_file *m, void *v)
269{
270 struct vm_area_struct *vma = v;
271 struct mm_struct *mm = vma->vm_mm;
272 struct mem_size_stats mss;
273
274 memset(&mss, 0, sizeof mss);
275
276 if (mm) {
277 spin_lock(&mm->page_table_lock);
278 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
279 spin_unlock(&mm->page_table_lock);
280 }
281
282 return show_map_internal(m, v, &mss);
283}
284
151static void *m_start(struct seq_file *m, loff_t *pos) 285static void *m_start(struct seq_file *m, loff_t *pos)
152{ 286{
153 struct task_struct *task = m->private; 287 struct task_struct *task = m->private;
154 unsigned long last_addr = m->version; 288 unsigned long last_addr = m->version;
155 struct mm_struct *mm; 289 struct mm_struct *mm;
156 struct vm_area_struct *map, *tail_map; 290 struct vm_area_struct *vma, *tail_vma;
157 loff_t l = *pos; 291 loff_t l = *pos;
158 292
159 /* 293 /*
160 * We remember last_addr rather than next_addr to hit with 294 * We remember last_addr rather than next_addr to hit with
161 * mmap_cache most of the time. We have zero last_addr at 295 * mmap_cache most of the time. We have zero last_addr at
162 * the begining and also after lseek. We will have -1 last_addr 296 * the beginning and also after lseek. We will have -1 last_addr
163 * after the end of the maps. 297 * after the end of the vmas.
164 */ 298 */
165 299
166 if (last_addr == -1UL) 300 if (last_addr == -1UL)
@@ -170,47 +304,47 @@ static void *m_start(struct seq_file *m, loff_t *pos)
170 if (!mm) 304 if (!mm)
171 return NULL; 305 return NULL;
172 306
173 tail_map = get_gate_vma(task); 307 tail_vma = get_gate_vma(task);
174 down_read(&mm->mmap_sem); 308 down_read(&mm->mmap_sem);
175 309
176 /* Start with last addr hint */ 310 /* Start with last addr hint */
177 if (last_addr && (map = find_vma(mm, last_addr))) { 311 if (last_addr && (vma = find_vma(mm, last_addr))) {
178 map = map->vm_next; 312 vma = vma->vm_next;
179 goto out; 313 goto out;
180 } 314 }
181 315
182 /* 316 /*
183 * Check the map index is within the range and do 317 * Check the vma index is within the range and do
184 * sequential scan until m_index. 318 * sequential scan until m_index.
185 */ 319 */
186 map = NULL; 320 vma = NULL;
187 if ((unsigned long)l < mm->map_count) { 321 if ((unsigned long)l < mm->map_count) {
188 map = mm->mmap; 322 vma = mm->mmap;
189 while (l-- && map) 323 while (l-- && vma)
190 map = map->vm_next; 324 vma = vma->vm_next;
191 goto out; 325 goto out;
192 } 326 }
193 327
194 if (l != mm->map_count) 328 if (l != mm->map_count)
195 tail_map = NULL; /* After gate map */ 329 tail_vma = NULL; /* After gate vma */
196 330
197out: 331out:
198 if (map) 332 if (vma)
199 return map; 333 return vma;
200 334
201 /* End of maps has reached */ 335 /* End of vmas has been reached */
202 m->version = (tail_map != NULL)? 0: -1UL; 336 m->version = (tail_vma != NULL)? 0: -1UL;
203 up_read(&mm->mmap_sem); 337 up_read(&mm->mmap_sem);
204 mmput(mm); 338 mmput(mm);
205 return tail_map; 339 return tail_vma;
206} 340}
207 341
208static void m_stop(struct seq_file *m, void *v) 342static void m_stop(struct seq_file *m, void *v)
209{ 343{
210 struct task_struct *task = m->private; 344 struct task_struct *task = m->private;
211 struct vm_area_struct *map = v; 345 struct vm_area_struct *vma = v;
212 if (map && map != get_gate_vma(task)) { 346 if (vma && vma != get_gate_vma(task)) {
213 struct mm_struct *mm = map->vm_mm; 347 struct mm_struct *mm = vma->vm_mm;
214 up_read(&mm->mmap_sem); 348 up_read(&mm->mmap_sem);
215 mmput(mm); 349 mmput(mm);
216 } 350 }
@@ -219,14 +353,14 @@ static void m_stop(struct seq_file *m, void *v)
219static void *m_next(struct seq_file *m, void *v, loff_t *pos) 353static void *m_next(struct seq_file *m, void *v, loff_t *pos)
220{ 354{
221 struct task_struct *task = m->private; 355 struct task_struct *task = m->private;
222 struct vm_area_struct *map = v; 356 struct vm_area_struct *vma = v;
223 struct vm_area_struct *tail_map = get_gate_vma(task); 357 struct vm_area_struct *tail_vma = get_gate_vma(task);
224 358
225 (*pos)++; 359 (*pos)++;
226 if (map && (map != tail_map) && map->vm_next) 360 if (vma && (vma != tail_vma) && vma->vm_next)
227 return map->vm_next; 361 return vma->vm_next;
228 m_stop(m, v); 362 m_stop(m, v);
229 return (map != tail_map)? tail_map: NULL; 363 return (vma != tail_vma)? tail_vma: NULL;
230} 364}
231 365
232struct seq_operations proc_pid_maps_op = { 366struct seq_operations proc_pid_maps_op = {
@@ -236,6 +370,13 @@ struct seq_operations proc_pid_maps_op = {
236 .show = show_map 370 .show = show_map
237}; 371};
238 372
373struct seq_operations proc_pid_smaps_op = {
374 .start = m_start,
375 .next = m_next,
376 .stop = m_stop,
377 .show = show_smap
378};
379
239#ifdef CONFIG_NUMA 380#ifdef CONFIG_NUMA
240 381
241struct numa_maps { 382struct numa_maps {