aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorMauricio Lin <mauriciolin@gmail.com>2005-09-03 18:55:10 -0400
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 03:05:49 -0400
commite070ad49f31155d872d8e96cab2142840993e3c0 (patch)
tree16d5bfd3d7627d6616c6b1008fac80e4cf77379e /fs/proc/task_mmu.c
parent00e145b6d59a16dd7740197a18f7abdb3af004a9 (diff)
[PATCH] add /proc/pid/smaps
Add a "smaps" entry to /proc/pid: show howmuch memory is resident in each mapping. People that want to perform a memory consumption analysing can use it mainly if someone needs to figure out which libraries can be reduced for embedded systems. So the new features are the physical size of shared and clean [or dirty]; private and clean [or dirty]. Take a look the example below: # cat /proc/4576/smaps 08048000-080dc000 r-xp /bin/bash Size: 592 KB Rss: 500 KB Shared_Clean: 500 KB Shared_Dirty: 0 KB Private_Clean: 0 KB Private_Dirty: 0 KB 080dc000-080e2000 rw-p /bin/bash Size: 24 KB Rss: 24 KB Shared_Clean: 0 KB Shared_Dirty: 0 KB Private_Clean: 0 KB Private_Dirty: 24 KB 080e2000-08116000 rw-p Size: 208 KB Rss: 208 KB Shared_Clean: 0 KB Shared_Dirty: 0 KB Private_Clean: 0 KB Private_Dirty: 208 KB b7e2b000-b7e34000 r-xp /lib/tls/libnss_files-2.3.2.so Size: 36 KB Rss: 12 KB Shared_Clean: 12 KB Shared_Dirty: 0 KB Private_Clean: 0 KB Private_Dirty: 0 KB ... (Includes a cleanup from "Richard Purdie" <rpurdie@rpsys.net>) From: Torsten Foertsch <torsten.foertsch@gmx.net> show_smap calls first show_map and then prints its additional information to the seq_file. show_map checks if all it has to print fits into the buffer and if yes marks the current vma as written. While that is correct for show_map it is not for show_smap. Here the vma should be marked as written only after the additional information is also written. The attached patch cures the problem. It moves the functionality of the show_map function to a new function show_map_internal that is called with an additional struct mem_size_stats* argument. Then show_map calls show_map_internal with NULL as struct mem_size_stats* whereas show_smap calls it with a real pointer. Now the final if (m->count < m->size) /* vma is copied successfully */ m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; is done only if the whole entry fits into the buffer. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c225
1 files changed, 183 insertions, 42 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 64e84cadfa3c..c7ef3e48e35b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -2,10 +2,13 @@
2#include <linux/hugetlb.h> 2#include <linux/hugetlb.h>
3#include <linux/mount.h> 3#include <linux/mount.h>
4#include <linux/seq_file.h> 4#include <linux/seq_file.h>
5#include <linux/highmem.h>
5#include <linux/pagemap.h> 6#include <linux/pagemap.h>
6#include <linux/mempolicy.h> 7#include <linux/mempolicy.h>
8
7#include <asm/elf.h> 9#include <asm/elf.h>
8#include <asm/uaccess.h> 10#include <asm/uaccess.h>
11#include <asm/tlbflush.h>
9#include "internal.h" 12#include "internal.h"
10 13
11char *task_mem(struct mm_struct *mm, char *buffer) 14char *task_mem(struct mm_struct *mm, char *buffer)
@@ -89,49 +92,58 @@ static void pad_len_spaces(struct seq_file *m, int len)
89 seq_printf(m, "%*c", len, ' '); 92 seq_printf(m, "%*c", len, ' ');
90} 93}
91 94
92static int show_map(struct seq_file *m, void *v) 95struct mem_size_stats
96{
97 unsigned long resident;
98 unsigned long shared_clean;
99 unsigned long shared_dirty;
100 unsigned long private_clean;
101 unsigned long private_dirty;
102};
103
104static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
93{ 105{
94 struct task_struct *task = m->private; 106 struct task_struct *task = m->private;
95 struct vm_area_struct *map = v; 107 struct vm_area_struct *vma = v;
96 struct mm_struct *mm = map->vm_mm; 108 struct mm_struct *mm = vma->vm_mm;
97 struct file *file = map->vm_file; 109 struct file *file = vma->vm_file;
98 int flags = map->vm_flags; 110 int flags = vma->vm_flags;
99 unsigned long ino = 0; 111 unsigned long ino = 0;
100 dev_t dev = 0; 112 dev_t dev = 0;
101 int len; 113 int len;
102 114
103 if (file) { 115 if (file) {
104 struct inode *inode = map->vm_file->f_dentry->d_inode; 116 struct inode *inode = vma->vm_file->f_dentry->d_inode;
105 dev = inode->i_sb->s_dev; 117 dev = inode->i_sb->s_dev;
106 ino = inode->i_ino; 118 ino = inode->i_ino;
107 } 119 }
108 120
109 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", 121 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
110 map->vm_start, 122 vma->vm_start,
111 map->vm_end, 123 vma->vm_end,
112 flags & VM_READ ? 'r' : '-', 124 flags & VM_READ ? 'r' : '-',
113 flags & VM_WRITE ? 'w' : '-', 125 flags & VM_WRITE ? 'w' : '-',
114 flags & VM_EXEC ? 'x' : '-', 126 flags & VM_EXEC ? 'x' : '-',
115 flags & VM_MAYSHARE ? 's' : 'p', 127 flags & VM_MAYSHARE ? 's' : 'p',
116 map->vm_pgoff << PAGE_SHIFT, 128 vma->vm_pgoff << PAGE_SHIFT,
117 MAJOR(dev), MINOR(dev), ino, &len); 129 MAJOR(dev), MINOR(dev), ino, &len);
118 130
119 /* 131 /*
120 * Print the dentry name for named mappings, and a 132 * Print the dentry name for named mappings, and a
121 * special [heap] marker for the heap: 133 * special [heap] marker for the heap:
122 */ 134 */
123 if (map->vm_file) { 135 if (file) {
124 pad_len_spaces(m, len); 136 pad_len_spaces(m, len);
125 seq_path(m, file->f_vfsmnt, file->f_dentry, ""); 137 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
126 } else { 138 } else {
127 if (mm) { 139 if (mm) {
128 if (map->vm_start <= mm->start_brk && 140 if (vma->vm_start <= mm->start_brk &&
129 map->vm_end >= mm->brk) { 141 vma->vm_end >= mm->brk) {
130 pad_len_spaces(m, len); 142 pad_len_spaces(m, len);
131 seq_puts(m, "[heap]"); 143 seq_puts(m, "[heap]");
132 } else { 144 } else {
133 if (map->vm_start <= mm->start_stack && 145 if (vma->vm_start <= mm->start_stack &&
134 map->vm_end >= mm->start_stack) { 146 vma->vm_end >= mm->start_stack) {
135 147
136 pad_len_spaces(m, len); 148 pad_len_spaces(m, len);
137 seq_puts(m, "[stack]"); 149 seq_puts(m, "[stack]");
@@ -143,24 +155,146 @@ static int show_map(struct seq_file *m, void *v)
143 } 155 }
144 } 156 }
145 seq_putc(m, '\n'); 157 seq_putc(m, '\n');
146 if (m->count < m->size) /* map is copied successfully */ 158
147 m->version = (map != get_gate_vma(task))? map->vm_start: 0; 159 if (mss)
160 seq_printf(m,
161 "Size: %8lu kB\n"
162 "Rss: %8lu kB\n"
163 "Shared_Clean: %8lu kB\n"
164 "Shared_Dirty: %8lu kB\n"
165 "Private_Clean: %8lu kB\n"
166 "Private_Dirty: %8lu kB\n",
167 (vma->vm_end - vma->vm_start) >> 10,
168 mss->resident >> 10,
169 mss->shared_clean >> 10,
170 mss->shared_dirty >> 10,
171 mss->private_clean >> 10,
172 mss->private_dirty >> 10);
173
174 if (m->count < m->size) /* vma is copied successfully */
175 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
148 return 0; 176 return 0;
149} 177}
150 178
179static int show_map(struct seq_file *m, void *v)
180{
181 return show_map_internal(m, v, 0);
182}
183
184static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
185 unsigned long addr, unsigned long end,
186 struct mem_size_stats *mss)
187{
188 pte_t *pte, ptent;
189 unsigned long pfn;
190 struct page *page;
191
192 pte = pte_offset_map(pmd, addr);
193 do {
194 ptent = *pte;
195 if (pte_none(ptent) || !pte_present(ptent))
196 continue;
197
198 mss->resident += PAGE_SIZE;
199 pfn = pte_pfn(ptent);
200 if (!pfn_valid(pfn))
201 continue;
202
203 page = pfn_to_page(pfn);
204 if (page_count(page) >= 2) {
205 if (pte_dirty(ptent))
206 mss->shared_dirty += PAGE_SIZE;
207 else
208 mss->shared_clean += PAGE_SIZE;
209 } else {
210 if (pte_dirty(ptent))
211 mss->private_dirty += PAGE_SIZE;
212 else
213 mss->private_clean += PAGE_SIZE;
214 }
215 } while (pte++, addr += PAGE_SIZE, addr != end);
216 pte_unmap(pte - 1);
217 cond_resched_lock(&vma->vm_mm->page_table_lock);
218}
219
220static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
221 unsigned long addr, unsigned long end,
222 struct mem_size_stats *mss)
223{
224 pmd_t *pmd;
225 unsigned long next;
226
227 pmd = pmd_offset(pud, addr);
228 do {
229 next = pmd_addr_end(addr, end);
230 if (pmd_none_or_clear_bad(pmd))
231 continue;
232 smaps_pte_range(vma, pmd, addr, next, mss);
233 } while (pmd++, addr = next, addr != end);
234}
235
236static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
237 unsigned long addr, unsigned long end,
238 struct mem_size_stats *mss)
239{
240 pud_t *pud;
241 unsigned long next;
242
243 pud = pud_offset(pgd, addr);
244 do {
245 next = pud_addr_end(addr, end);
246 if (pud_none_or_clear_bad(pud))
247 continue;
248 smaps_pmd_range(vma, pud, addr, next, mss);
249 } while (pud++, addr = next, addr != end);
250}
251
252static inline void smaps_pgd_range(struct vm_area_struct *vma,
253 unsigned long addr, unsigned long end,
254 struct mem_size_stats *mss)
255{
256 pgd_t *pgd;
257 unsigned long next;
258
259 pgd = pgd_offset(vma->vm_mm, addr);
260 do {
261 next = pgd_addr_end(addr, end);
262 if (pgd_none_or_clear_bad(pgd))
263 continue;
264 smaps_pud_range(vma, pgd, addr, next, mss);
265 } while (pgd++, addr = next, addr != end);
266}
267
268static int show_smap(struct seq_file *m, void *v)
269{
270 struct vm_area_struct *vma = v;
271 struct mm_struct *mm = vma->vm_mm;
272 struct mem_size_stats mss;
273
274 memset(&mss, 0, sizeof mss);
275
276 if (mm) {
277 spin_lock(&mm->page_table_lock);
278 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
279 spin_unlock(&mm->page_table_lock);
280 }
281
282 return show_map_internal(m, v, &mss);
283}
284
151static void *m_start(struct seq_file *m, loff_t *pos) 285static void *m_start(struct seq_file *m, loff_t *pos)
152{ 286{
153 struct task_struct *task = m->private; 287 struct task_struct *task = m->private;
154 unsigned long last_addr = m->version; 288 unsigned long last_addr = m->version;
155 struct mm_struct *mm; 289 struct mm_struct *mm;
156 struct vm_area_struct *map, *tail_map; 290 struct vm_area_struct *vma, *tail_vma;
157 loff_t l = *pos; 291 loff_t l = *pos;
158 292
159 /* 293 /*
160 * We remember last_addr rather than next_addr to hit with 294 * We remember last_addr rather than next_addr to hit with
161 * mmap_cache most of the time. We have zero last_addr at 295 * mmap_cache most of the time. We have zero last_addr at
162 * the begining and also after lseek. We will have -1 last_addr 296 * the beginning and also after lseek. We will have -1 last_addr
163 * after the end of the maps. 297 * after the end of the vmas.
164 */ 298 */
165 299
166 if (last_addr == -1UL) 300 if (last_addr == -1UL)
@@ -170,47 +304,47 @@ static void *m_start(struct seq_file *m, loff_t *pos)
170 if (!mm) 304 if (!mm)
171 return NULL; 305 return NULL;
172 306
173 tail_map = get_gate_vma(task); 307 tail_vma = get_gate_vma(task);
174 down_read(&mm->mmap_sem); 308 down_read(&mm->mmap_sem);
175 309
176 /* Start with last addr hint */ 310 /* Start with last addr hint */
177 if (last_addr && (map = find_vma(mm, last_addr))) { 311 if (last_addr && (vma = find_vma(mm, last_addr))) {
178 map = map->vm_next; 312 vma = vma->vm_next;
179 goto out; 313 goto out;
180 } 314 }
181 315
182 /* 316 /*
183 * Check the map index is within the range and do 317 * Check the vma index is within the range and do
184 * sequential scan until m_index. 318 * sequential scan until m_index.
185 */ 319 */
186 map = NULL; 320 vma = NULL;
187 if ((unsigned long)l < mm->map_count) { 321 if ((unsigned long)l < mm->map_count) {
188 map = mm->mmap; 322 vma = mm->mmap;
189 while (l-- && map) 323 while (l-- && vma)
190 map = map->vm_next; 324 vma = vma->vm_next;
191 goto out; 325 goto out;
192 } 326 }
193 327
194 if (l != mm->map_count) 328 if (l != mm->map_count)
195 tail_map = NULL; /* After gate map */ 329 tail_vma = NULL; /* After gate vma */
196 330
197out: 331out:
198 if (map) 332 if (vma)
199 return map; 333 return vma;
200 334
201 /* End of maps has reached */ 335 /* End of vmas has been reached */
202 m->version = (tail_map != NULL)? 0: -1UL; 336 m->version = (tail_vma != NULL)? 0: -1UL;
203 up_read(&mm->mmap_sem); 337 up_read(&mm->mmap_sem);
204 mmput(mm); 338 mmput(mm);
205 return tail_map; 339 return tail_vma;
206} 340}
207 341
208static void m_stop(struct seq_file *m, void *v) 342static void m_stop(struct seq_file *m, void *v)
209{ 343{
210 struct task_struct *task = m->private; 344 struct task_struct *task = m->private;
211 struct vm_area_struct *map = v; 345 struct vm_area_struct *vma = v;
212 if (map && map != get_gate_vma(task)) { 346 if (vma && vma != get_gate_vma(task)) {
213 struct mm_struct *mm = map->vm_mm; 347 struct mm_struct *mm = vma->vm_mm;
214 up_read(&mm->mmap_sem); 348 up_read(&mm->mmap_sem);
215 mmput(mm); 349 mmput(mm);
216 } 350 }
@@ -219,14 +353,14 @@ static void m_stop(struct seq_file *m, void *v)
219static void *m_next(struct seq_file *m, void *v, loff_t *pos) 353static void *m_next(struct seq_file *m, void *v, loff_t *pos)
220{ 354{
221 struct task_struct *task = m->private; 355 struct task_struct *task = m->private;
222 struct vm_area_struct *map = v; 356 struct vm_area_struct *vma = v;
223 struct vm_area_struct *tail_map = get_gate_vma(task); 357 struct vm_area_struct *tail_vma = get_gate_vma(task);
224 358
225 (*pos)++; 359 (*pos)++;
226 if (map && (map != tail_map) && map->vm_next) 360 if (vma && (vma != tail_vma) && vma->vm_next)
227 return map->vm_next; 361 return vma->vm_next;
228 m_stop(m, v); 362 m_stop(m, v);
229 return (map != tail_map)? tail_map: NULL; 363 return (vma != tail_vma)? tail_vma: NULL;
230} 364}
231 365
232struct seq_operations proc_pid_maps_op = { 366struct seq_operations proc_pid_maps_op = {
@@ -236,6 +370,13 @@ struct seq_operations proc_pid_maps_op = {
236 .show = show_map 370 .show = show_map
237}; 371};
238 372
373struct seq_operations proc_pid_smaps_op = {
374 .start = m_start,
375 .next = m_next,
376 .stop = m_stop,
377 .show = show_smap
378};
379
239#ifdef CONFIG_NUMA 380#ifdef CONFIG_NUMA
240 381
241struct numa_maps { 382struct numa_maps {