aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2007-05-06 17:49:21 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:52 -0400
commit826fad1b93fdb4ffacfd9cd860f06140e852e377 (patch)
tree8135bd60f839a83521d457904e4d2578203df339
parent0013572b2ae535bfd6314f22d9aef53725ea00d8 (diff)
smaps: extract pmd walker from smaps code
Extracts the pmd walker from smaps-specific code in fs/proc/task_mmu.c. The new struct pmd_walker includes the struct vm_area_struct of the memory to walk over. Iteration begins at the vma->vm_start and completes at vma->vm_end. A pointer to another data structure may be stored in the private field such as struct mem_size_stats, which acts as the smaps accumulator. For each pmd in the VMA, the action function is called with a pointer to its struct vm_area_struct, a pointer to the pmd_t, its start and end addresses, and the private field. The interface for walking pmd's in a VMA for fs/proc/task_mmu.c is now: void for_each_pmd(struct vm_area_struct *vma, void (*action)(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, void *private), void *private); Since the pmd walker is now extracted from the smaps code, smaps_one_pmd() is invoked for each pmd in the VMA. Its behavior and efficiency is identical to the existing implementation. Cc: Hugh Dickins <hugh@veritas.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/proc/task_mmu.c69
1 files changed, 42 insertions, 27 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 7445980c8022..9d22c1c1caa8 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -122,6 +122,13 @@ struct mem_size_stats
122 unsigned long private_dirty; 122 unsigned long private_dirty;
123}; 123};
124 124
125struct pmd_walker {
126 struct vm_area_struct *vma;
127 void *private;
128 void (*action)(struct vm_area_struct *, pmd_t *, unsigned long,
129 unsigned long, void *);
130};
131
125static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) 132static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
126{ 133{
127 struct proc_maps_private *priv = m->private; 134 struct proc_maps_private *priv = m->private;
@@ -204,16 +211,17 @@ static int show_map(struct seq_file *m, void *v)
204 return show_map_internal(m, v, NULL); 211 return show_map_internal(m, v, NULL);
205} 212}
206 213
207static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 214static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd,
208 unsigned long addr, unsigned long end, 215 unsigned long addr, unsigned long end,
209 struct mem_size_stats *mss) 216 void *private)
210{ 217{
218 struct mem_size_stats *mss = private;
211 pte_t *pte, ptent; 219 pte_t *pte, ptent;
212 spinlock_t *ptl; 220 spinlock_t *ptl;
213 struct page *page; 221 struct page *page;
214 222
215 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 223 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
216 do { 224 for (; addr != end; pte++, addr += PAGE_SIZE) {
217 ptent = *pte; 225 ptent = *pte;
218 if (!pte_present(ptent)) 226 if (!pte_present(ptent))
219 continue; 227 continue;
@@ -235,57 +243,64 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
235 else 243 else
236 mss->private_clean += PAGE_SIZE; 244 mss->private_clean += PAGE_SIZE;
237 } 245 }
238 } while (pte++, addr += PAGE_SIZE, addr != end); 246 }
239 pte_unmap_unlock(pte - 1, ptl); 247 pte_unmap_unlock(pte - 1, ptl);
240 cond_resched(); 248 cond_resched();
241} 249}
242 250
243static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud, 251static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud,
244 unsigned long addr, unsigned long end, 252 unsigned long addr, unsigned long end)
245 struct mem_size_stats *mss)
246{ 253{
247 pmd_t *pmd; 254 pmd_t *pmd;
248 unsigned long next; 255 unsigned long next;
249 256
250 pmd = pmd_offset(pud, addr); 257 for (pmd = pmd_offset(pud, addr); addr != end;
251 do { 258 pmd++, addr = next) {
252 next = pmd_addr_end(addr, end); 259 next = pmd_addr_end(addr, end);
253 if (pmd_none_or_clear_bad(pmd)) 260 if (pmd_none_or_clear_bad(pmd))
254 continue; 261 continue;
255 smaps_pte_range(vma, pmd, addr, next, mss); 262 walker->action(walker->vma, pmd, addr, next, walker->private);
256 } while (pmd++, addr = next, addr != end); 263 }
257} 264}
258 265
259static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 266static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd,
260 unsigned long addr, unsigned long end, 267 unsigned long addr, unsigned long end)
261 struct mem_size_stats *mss)
262{ 268{
263 pud_t *pud; 269 pud_t *pud;
264 unsigned long next; 270 unsigned long next;
265 271
266 pud = pud_offset(pgd, addr); 272 for (pud = pud_offset(pgd, addr); addr != end;
267 do { 273 pud++, addr = next) {
268 next = pud_addr_end(addr, end); 274 next = pud_addr_end(addr, end);
269 if (pud_none_or_clear_bad(pud)) 275 if (pud_none_or_clear_bad(pud))
270 continue; 276 continue;
271 smaps_pmd_range(vma, pud, addr, next, mss); 277 for_each_pmd_in_pud(walker, pud, addr, next);
272 } while (pud++, addr = next, addr != end); 278 }
273} 279}
274 280
275static inline void smaps_pgd_range(struct vm_area_struct *vma, 281static inline void for_each_pmd(struct vm_area_struct *vma,
276 unsigned long addr, unsigned long end, 282 void (*action)(struct vm_area_struct *, pmd_t *,
277 struct mem_size_stats *mss) 283 unsigned long, unsigned long,
284 void *),
285 void *private)
278{ 286{
287 unsigned long addr = vma->vm_start;
288 unsigned long end = vma->vm_end;
289 struct pmd_walker walker = {
290 .vma = vma,
291 .private = private,
292 .action = action,
293 };
279 pgd_t *pgd; 294 pgd_t *pgd;
280 unsigned long next; 295 unsigned long next;
281 296
282 pgd = pgd_offset(vma->vm_mm, addr); 297 for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
283 do { 298 pgd++, addr = next) {
284 next = pgd_addr_end(addr, end); 299 next = pgd_addr_end(addr, end);
285 if (pgd_none_or_clear_bad(pgd)) 300 if (pgd_none_or_clear_bad(pgd))
286 continue; 301 continue;
287 smaps_pud_range(vma, pgd, addr, next, mss); 302 for_each_pud_in_pgd(&walker, pgd, addr, next);
288 } while (pgd++, addr = next, addr != end); 303 }
289} 304}
290 305
291static int show_smap(struct seq_file *m, void *v) 306static int show_smap(struct seq_file *m, void *v)
@@ -295,7 +310,7 @@ static int show_smap(struct seq_file *m, void *v)
295 310
296 memset(&mss, 0, sizeof mss); 311 memset(&mss, 0, sizeof mss);
297 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 312 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
298 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss); 313 for_each_pmd(vma, smaps_one_pmd, &mss);
299 return show_map_internal(m, v, &mss); 314 return show_map_internal(m, v, &mss);
300} 315}
301 316