diff options
author | Matt Mackall <mpm@selenic.com> | 2008-02-05 01:29:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 12:44:16 -0500 |
commit | b3ae5acbbb98d95c1300c8ced56d15f97d09c506 (patch) | |
tree | e8a159c7c3309564458f7dea955b6a9daf948b61 /fs/proc | |
parent | e6473092bd9116583ce9ab8cf1b6570e1aa6fc83 (diff) |
maps4: use pagewalker in clear_refs and smaps
Use the generic pagewalker for smaps and clear_refs
Signed-off-by: Matt Mackall <mpm@selenic.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r-- | fs/proc/task_mmu.c | 95 |
1 files changed, 17 insertions, 78 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 8952ce70315e..791b2d400be5 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -135,6 +135,7 @@ static void pad_len_spaces(struct seq_file *m, int len) | |||
135 | 135 | ||
136 | struct mem_size_stats | 136 | struct mem_size_stats |
137 | { | 137 | { |
138 | struct vm_area_struct *vma; | ||
138 | unsigned long resident; | 139 | unsigned long resident; |
139 | unsigned long shared_clean; | 140 | unsigned long shared_clean; |
140 | unsigned long shared_dirty; | 141 | unsigned long shared_dirty; |
@@ -144,13 +145,6 @@ struct mem_size_stats | |||
144 | u64 pss; | 145 | u64 pss; |
145 | }; | 146 | }; |
146 | 147 | ||
147 | struct pmd_walker { | ||
148 | struct vm_area_struct *vma; | ||
149 | void *private; | ||
150 | void (*action)(struct vm_area_struct *, pmd_t *, unsigned long, | ||
151 | unsigned long, void *); | ||
152 | }; | ||
153 | |||
154 | static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) | 148 | static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) |
155 | { | 149 | { |
156 | struct proc_maps_private *priv = m->private; | 150 | struct proc_maps_private *priv = m->private; |
@@ -240,11 +234,11 @@ static int show_map(struct seq_file *m, void *v) | |||
240 | return show_map_internal(m, v, NULL); | 234 | return show_map_internal(m, v, NULL); |
241 | } | 235 | } |
242 | 236 | ||
243 | static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | 237 | static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
244 | unsigned long addr, unsigned long end, | 238 | void *private) |
245 | void *private) | ||
246 | { | 239 | { |
247 | struct mem_size_stats *mss = private; | 240 | struct mem_size_stats *mss = private; |
241 | struct vm_area_struct *vma = mss->vma; | ||
248 | pte_t *pte, ptent; | 242 | pte_t *pte, ptent; |
249 | spinlock_t *ptl; | 243 | spinlock_t *ptl; |
250 | struct page *page; | 244 | struct page *page; |
@@ -282,12 +276,13 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
282 | } | 276 | } |
283 | pte_unmap_unlock(pte - 1, ptl); | 277 | pte_unmap_unlock(pte - 1, ptl); |
284 | cond_resched(); | 278 | cond_resched(); |
279 | return 0; | ||
285 | } | 280 | } |
286 | 281 | ||
287 | static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | 282 | static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, |
288 | unsigned long addr, unsigned long end, | 283 | unsigned long end, void *private) |
289 | void *private) | ||
290 | { | 284 | { |
285 | struct vm_area_struct *vma = private; | ||
291 | pte_t *pte, ptent; | 286 | pte_t *pte, ptent; |
292 | spinlock_t *ptl; | 287 | spinlock_t *ptl; |
293 | struct page *page; | 288 | struct page *page; |
@@ -308,71 +303,10 @@ static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
308 | } | 303 | } |
309 | pte_unmap_unlock(pte - 1, ptl); | 304 | pte_unmap_unlock(pte - 1, ptl); |
310 | cond_resched(); | 305 | cond_resched(); |
306 | return 0; | ||
311 | } | 307 | } |
312 | 308 | ||
313 | static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud, | 309 | static struct mm_walk smaps_walk = { .pmd_entry = smaps_pte_range }; |
314 | unsigned long addr, unsigned long end) | ||
315 | { | ||
316 | pmd_t *pmd; | ||
317 | unsigned long next; | ||
318 | |||
319 | for (pmd = pmd_offset(pud, addr); addr != end; | ||
320 | pmd++, addr = next) { | ||
321 | next = pmd_addr_end(addr, end); | ||
322 | if (pmd_none_or_clear_bad(pmd)) | ||
323 | continue; | ||
324 | walker->action(walker->vma, pmd, addr, next, walker->private); | ||
325 | } | ||
326 | } | ||
327 | |||
328 | static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd, | ||
329 | unsigned long addr, unsigned long end) | ||
330 | { | ||
331 | pud_t *pud; | ||
332 | unsigned long next; | ||
333 | |||
334 | for (pud = pud_offset(pgd, addr); addr != end; | ||
335 | pud++, addr = next) { | ||
336 | next = pud_addr_end(addr, end); | ||
337 | if (pud_none_or_clear_bad(pud)) | ||
338 | continue; | ||
339 | walk_pmd_range(walker, pud, addr, next); | ||
340 | } | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * walk_page_range - walk the page tables of a VMA with a callback | ||
345 | * @vma - VMA to walk | ||
346 | * @action - callback invoked for every bottom-level (PTE) page table | ||
347 | * @private - private data passed to the callback function | ||
348 | * | ||
349 | * Recursively walk the page table for the memory area in a VMA, calling | ||
350 | * a callback for every bottom-level (PTE) page table. | ||
351 | */ | ||
352 | static inline void walk_page_range(struct vm_area_struct *vma, | ||
353 | void (*action)(struct vm_area_struct *, | ||
354 | pmd_t *, unsigned long, | ||
355 | unsigned long, void *), | ||
356 | void *private) | ||
357 | { | ||
358 | unsigned long addr = vma->vm_start; | ||
359 | unsigned long end = vma->vm_end; | ||
360 | struct pmd_walker walker = { | ||
361 | .vma = vma, | ||
362 | .private = private, | ||
363 | .action = action, | ||
364 | }; | ||
365 | pgd_t *pgd; | ||
366 | unsigned long next; | ||
367 | |||
368 | for (pgd = pgd_offset(vma->vm_mm, addr); addr != end; | ||
369 | pgd++, addr = next) { | ||
370 | next = pgd_addr_end(addr, end); | ||
371 | if (pgd_none_or_clear_bad(pgd)) | ||
372 | continue; | ||
373 | walk_pud_range(&walker, pgd, addr, next); | ||
374 | } | ||
375 | } | ||
376 | 310 | ||
377 | static int show_smap(struct seq_file *m, void *v) | 311 | static int show_smap(struct seq_file *m, void *v) |
378 | { | 312 | { |
@@ -380,11 +314,15 @@ static int show_smap(struct seq_file *m, void *v) | |||
380 | struct mem_size_stats mss; | 314 | struct mem_size_stats mss; |
381 | 315 | ||
382 | memset(&mss, 0, sizeof mss); | 316 | memset(&mss, 0, sizeof mss); |
317 | mss.vma = vma; | ||
383 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) | 318 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) |
384 | walk_page_range(vma, smaps_pte_range, &mss); | 319 | walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end, |
320 | &smaps_walk, &mss); | ||
385 | return show_map_internal(m, v, &mss); | 321 | return show_map_internal(m, v, &mss); |
386 | } | 322 | } |
387 | 323 | ||
324 | static struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range }; | ||
325 | |||
388 | void clear_refs_smap(struct mm_struct *mm) | 326 | void clear_refs_smap(struct mm_struct *mm) |
389 | { | 327 | { |
390 | struct vm_area_struct *vma; | 328 | struct vm_area_struct *vma; |
@@ -392,7 +330,8 @@ void clear_refs_smap(struct mm_struct *mm) | |||
392 | down_read(&mm->mmap_sem); | 330 | down_read(&mm->mmap_sem); |
393 | for (vma = mm->mmap; vma; vma = vma->vm_next) | 331 | for (vma = mm->mmap; vma; vma = vma->vm_next) |
394 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) | 332 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) |
395 | walk_page_range(vma, clear_refs_pte_range, NULL); | 333 | walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end, |
334 | &clear_refs_walk, vma); | ||
396 | flush_tlb_mm(mm); | 335 | flush_tlb_mm(mm); |
397 | up_read(&mm->mmap_sem); | 336 | up_read(&mm->mmap_sem); |
398 | } | 337 | } |