diff options
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r-- | fs/proc/task_mmu.c | 235 |
1 files changed, 235 insertions, 0 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c new file mode 100644 index 000000000000..28b4a0253a92 --- /dev/null +++ b/fs/proc/task_mmu.c | |||
@@ -0,0 +1,235 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <linux/hugetlb.h> | ||
3 | #include <linux/mount.h> | ||
4 | #include <linux/seq_file.h> | ||
5 | #include <asm/elf.h> | ||
6 | #include <asm/uaccess.h> | ||
7 | #include "internal.h" | ||
8 | |||
9 | char *task_mem(struct mm_struct *mm, char *buffer) | ||
10 | { | ||
11 | unsigned long data, text, lib; | ||
12 | |||
13 | data = mm->total_vm - mm->shared_vm - mm->stack_vm; | ||
14 | text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; | ||
15 | lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; | ||
16 | buffer += sprintf(buffer, | ||
17 | "VmSize:\t%8lu kB\n" | ||
18 | "VmLck:\t%8lu kB\n" | ||
19 | "VmRSS:\t%8lu kB\n" | ||
20 | "VmData:\t%8lu kB\n" | ||
21 | "VmStk:\t%8lu kB\n" | ||
22 | "VmExe:\t%8lu kB\n" | ||
23 | "VmLib:\t%8lu kB\n" | ||
24 | "VmPTE:\t%8lu kB\n", | ||
25 | (mm->total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), | ||
26 | mm->locked_vm << (PAGE_SHIFT-10), | ||
27 | get_mm_counter(mm, rss) << (PAGE_SHIFT-10), | ||
28 | data << (PAGE_SHIFT-10), | ||
29 | mm->stack_vm << (PAGE_SHIFT-10), text, lib, | ||
30 | (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); | ||
31 | return buffer; | ||
32 | } | ||
33 | |||
34 | unsigned long task_vsize(struct mm_struct *mm) | ||
35 | { | ||
36 | return PAGE_SIZE * mm->total_vm; | ||
37 | } | ||
38 | |||
39 | int task_statm(struct mm_struct *mm, int *shared, int *text, | ||
40 | int *data, int *resident) | ||
41 | { | ||
42 | int rss = get_mm_counter(mm, rss); | ||
43 | |||
44 | *shared = rss - get_mm_counter(mm, anon_rss); | ||
45 | *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) | ||
46 | >> PAGE_SHIFT; | ||
47 | *data = mm->total_vm - mm->shared_vm; | ||
48 | *resident = rss; | ||
49 | return mm->total_vm; | ||
50 | } | ||
51 | |||
52 | int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) | ||
53 | { | ||
54 | struct vm_area_struct * vma; | ||
55 | int result = -ENOENT; | ||
56 | struct task_struct *task = proc_task(inode); | ||
57 | struct mm_struct * mm = get_task_mm(task); | ||
58 | |||
59 | if (!mm) | ||
60 | goto out; | ||
61 | down_read(&mm->mmap_sem); | ||
62 | |||
63 | vma = mm->mmap; | ||
64 | while (vma) { | ||
65 | if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) | ||
66 | break; | ||
67 | vma = vma->vm_next; | ||
68 | } | ||
69 | |||
70 | if (vma) { | ||
71 | *mnt = mntget(vma->vm_file->f_vfsmnt); | ||
72 | *dentry = dget(vma->vm_file->f_dentry); | ||
73 | result = 0; | ||
74 | } | ||
75 | |||
76 | up_read(&mm->mmap_sem); | ||
77 | mmput(mm); | ||
78 | out: | ||
79 | return result; | ||
80 | } | ||
81 | |||
82 | static void pad_len_spaces(struct seq_file *m, int len) | ||
83 | { | ||
84 | len = 25 + sizeof(void*) * 6 - len; | ||
85 | if (len < 1) | ||
86 | len = 1; | ||
87 | seq_printf(m, "%*c", len, ' '); | ||
88 | } | ||
89 | |||
90 | static int show_map(struct seq_file *m, void *v) | ||
91 | { | ||
92 | struct task_struct *task = m->private; | ||
93 | struct vm_area_struct *map = v; | ||
94 | struct mm_struct *mm = map->vm_mm; | ||
95 | struct file *file = map->vm_file; | ||
96 | int flags = map->vm_flags; | ||
97 | unsigned long ino = 0; | ||
98 | dev_t dev = 0; | ||
99 | int len; | ||
100 | |||
101 | if (file) { | ||
102 | struct inode *inode = map->vm_file->f_dentry->d_inode; | ||
103 | dev = inode->i_sb->s_dev; | ||
104 | ino = inode->i_ino; | ||
105 | } | ||
106 | |||
107 | seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", | ||
108 | map->vm_start, | ||
109 | map->vm_end, | ||
110 | flags & VM_READ ? 'r' : '-', | ||
111 | flags & VM_WRITE ? 'w' : '-', | ||
112 | flags & VM_EXEC ? 'x' : '-', | ||
113 | flags & VM_MAYSHARE ? 's' : 'p', | ||
114 | map->vm_pgoff << PAGE_SHIFT, | ||
115 | MAJOR(dev), MINOR(dev), ino, &len); | ||
116 | |||
117 | /* | ||
118 | * Print the dentry name for named mappings, and a | ||
119 | * special [heap] marker for the heap: | ||
120 | */ | ||
121 | if (map->vm_file) { | ||
122 | pad_len_spaces(m, len); | ||
123 | seq_path(m, file->f_vfsmnt, file->f_dentry, ""); | ||
124 | } else { | ||
125 | if (mm) { | ||
126 | if (map->vm_start <= mm->start_brk && | ||
127 | map->vm_end >= mm->brk) { | ||
128 | pad_len_spaces(m, len); | ||
129 | seq_puts(m, "[heap]"); | ||
130 | } else { | ||
131 | if (map->vm_start <= mm->start_stack && | ||
132 | map->vm_end >= mm->start_stack) { | ||
133 | |||
134 | pad_len_spaces(m, len); | ||
135 | seq_puts(m, "[stack]"); | ||
136 | } | ||
137 | } | ||
138 | } else { | ||
139 | pad_len_spaces(m, len); | ||
140 | seq_puts(m, "[vdso]"); | ||
141 | } | ||
142 | } | ||
143 | seq_putc(m, '\n'); | ||
144 | if (m->count < m->size) /* map is copied successfully */ | ||
145 | m->version = (map != get_gate_vma(task))? map->vm_start: 0; | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static void *m_start(struct seq_file *m, loff_t *pos) | ||
150 | { | ||
151 | struct task_struct *task = m->private; | ||
152 | unsigned long last_addr = m->version; | ||
153 | struct mm_struct *mm; | ||
154 | struct vm_area_struct *map, *tail_map; | ||
155 | loff_t l = *pos; | ||
156 | |||
157 | /* | ||
158 | * We remember last_addr rather than next_addr to hit with | ||
159 | * mmap_cache most of the time. We have zero last_addr at | ||
160 | * the begining and also after lseek. We will have -1 last_addr | ||
161 | * after the end of the maps. | ||
162 | */ | ||
163 | |||
164 | if (last_addr == -1UL) | ||
165 | return NULL; | ||
166 | |||
167 | mm = get_task_mm(task); | ||
168 | if (!mm) | ||
169 | return NULL; | ||
170 | |||
171 | tail_map = get_gate_vma(task); | ||
172 | down_read(&mm->mmap_sem); | ||
173 | |||
174 | /* Start with last addr hint */ | ||
175 | if (last_addr && (map = find_vma(mm, last_addr))) { | ||
176 | map = map->vm_next; | ||
177 | goto out; | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * Check the map index is within the range and do | ||
182 | * sequential scan until m_index. | ||
183 | */ | ||
184 | map = NULL; | ||
185 | if ((unsigned long)l < mm->map_count) { | ||
186 | map = mm->mmap; | ||
187 | while (l-- && map) | ||
188 | map = map->vm_next; | ||
189 | goto out; | ||
190 | } | ||
191 | |||
192 | if (l != mm->map_count) | ||
193 | tail_map = NULL; /* After gate map */ | ||
194 | |||
195 | out: | ||
196 | if (map) | ||
197 | return map; | ||
198 | |||
199 | /* End of maps has reached */ | ||
200 | m->version = (tail_map != NULL)? 0: -1UL; | ||
201 | up_read(&mm->mmap_sem); | ||
202 | mmput(mm); | ||
203 | return tail_map; | ||
204 | } | ||
205 | |||
206 | static void m_stop(struct seq_file *m, void *v) | ||
207 | { | ||
208 | struct task_struct *task = m->private; | ||
209 | struct vm_area_struct *map = v; | ||
210 | if (map && map != get_gate_vma(task)) { | ||
211 | struct mm_struct *mm = map->vm_mm; | ||
212 | up_read(&mm->mmap_sem); | ||
213 | mmput(mm); | ||
214 | } | ||
215 | } | ||
216 | |||
217 | static void *m_next(struct seq_file *m, void *v, loff_t *pos) | ||
218 | { | ||
219 | struct task_struct *task = m->private; | ||
220 | struct vm_area_struct *map = v; | ||
221 | struct vm_area_struct *tail_map = get_gate_vma(task); | ||
222 | |||
223 | (*pos)++; | ||
224 | if (map && (map != tail_map) && map->vm_next) | ||
225 | return map->vm_next; | ||
226 | m_stop(m, v); | ||
227 | return (map != tail_map)? tail_map: NULL; | ||
228 | } | ||
229 | |||
230 | struct seq_operations proc_pid_maps_op = { | ||
231 | .start = m_start, | ||
232 | .next = m_next, | ||
233 | .stop = m_stop, | ||
234 | .show = show_map | ||
235 | }; | ||