diff options
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r-- | fs/proc/task_mmu.c | 79 |
1 files changed, 63 insertions, 16 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 199088ee969b..4008c060f7ef 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -195,7 +195,7 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats | |||
195 | "Shared_Dirty: %8lu kB\n" | 195 | "Shared_Dirty: %8lu kB\n" |
196 | "Private_Clean: %8lu kB\n" | 196 | "Private_Clean: %8lu kB\n" |
197 | "Private_Dirty: %8lu kB\n" | 197 | "Private_Dirty: %8lu kB\n" |
198 | "Pgs_Referenced: %8lu kB\n", | 198 | "Referenced: %8lu kB\n", |
199 | (vma->vm_end - vma->vm_start) >> 10, | 199 | (vma->vm_end - vma->vm_start) >> 10, |
200 | mss->resident >> 10, | 200 | mss->resident >> 10, |
201 | mss->shared_clean >> 10, | 201 | mss->shared_clean >> 10, |
@@ -214,9 +214,9 @@ static int show_map(struct seq_file *m, void *v) | |||
214 | return show_map_internal(m, v, NULL); | 214 | return show_map_internal(m, v, NULL); |
215 | } | 215 | } |
216 | 216 | ||
217 | static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 217 | static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
218 | unsigned long addr, unsigned long end, | 218 | unsigned long addr, unsigned long end, |
219 | void *private) | 219 | void *private) |
220 | { | 220 | { |
221 | struct mem_size_stats *mss = private; | 221 | struct mem_size_stats *mss = private; |
222 | pte_t *pte, ptent; | 222 | pte_t *pte, ptent; |
@@ -254,8 +254,34 @@ static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
254 | cond_resched(); | 254 | cond_resched(); |
255 | } | 255 | } |
256 | 256 | ||
257 | static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud, | 257 | static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd, |
258 | unsigned long addr, unsigned long end) | 258 | unsigned long addr, unsigned long end, |
259 | void *private) | ||
260 | { | ||
261 | pte_t *pte, ptent; | ||
262 | spinlock_t *ptl; | ||
263 | struct page *page; | ||
264 | |||
265 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | ||
266 | for (; addr != end; pte++, addr += PAGE_SIZE) { | ||
267 | ptent = *pte; | ||
268 | if (!pte_present(ptent)) | ||
269 | continue; | ||
270 | |||
271 | page = vm_normal_page(vma, addr, ptent); | ||
272 | if (!page) | ||
273 | continue; | ||
274 | |||
275 | /* Clear accessed and referenced bits. */ | ||
276 | ptep_test_and_clear_young(vma, addr, pte); | ||
277 | ClearPageReferenced(page); | ||
278 | } | ||
279 | pte_unmap_unlock(pte - 1, ptl); | ||
280 | cond_resched(); | ||
281 | } | ||
282 | |||
283 | static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud, | ||
284 | unsigned long addr, unsigned long end) | ||
259 | { | 285 | { |
260 | pmd_t *pmd; | 286 | pmd_t *pmd; |
261 | unsigned long next; | 287 | unsigned long next; |
@@ -269,8 +295,8 @@ static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud, | |||
269 | } | 295 | } |
270 | } | 296 | } |
271 | 297 | ||
272 | static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd, | 298 | static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd, |
273 | unsigned long addr, unsigned long end) | 299 | unsigned long addr, unsigned long end) |
274 | { | 300 | { |
275 | pud_t *pud; | 301 | pud_t *pud; |
276 | unsigned long next; | 302 | unsigned long next; |
@@ -280,15 +306,24 @@ static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd, | |||
280 | next = pud_addr_end(addr, end); | 306 | next = pud_addr_end(addr, end); |
281 | if (pud_none_or_clear_bad(pud)) | 307 | if (pud_none_or_clear_bad(pud)) |
282 | continue; | 308 | continue; |
283 | for_each_pmd_in_pud(walker, pud, addr, next); | 309 | walk_pmd_range(walker, pud, addr, next); |
284 | } | 310 | } |
285 | } | 311 | } |
286 | 312 | ||
287 | static inline void for_each_pmd(struct vm_area_struct *vma, | 313 | /* |
288 | void (*action)(struct vm_area_struct *, pmd_t *, | 314 | * walk_page_range - walk the page tables of a VMA with a callback |
289 | unsigned long, unsigned long, | 315 | * @vma - VMA to walk |
290 | void *), | 316 | * @action - callback invoked for every bottom-level (PTE) page table |
291 | void *private) | 317 | * @private - private data passed to the callback function |
318 | * | ||
319 | * Recursively walk the page table for the memory area in a VMA, calling | ||
320 | * a callback for every bottom-level (PTE) page table. | ||
321 | */ | ||
322 | static inline void walk_page_range(struct vm_area_struct *vma, | ||
323 | void (*action)(struct vm_area_struct *, | ||
324 | pmd_t *, unsigned long, | ||
325 | unsigned long, void *), | ||
326 | void *private) | ||
292 | { | 327 | { |
293 | unsigned long addr = vma->vm_start; | 328 | unsigned long addr = vma->vm_start; |
294 | unsigned long end = vma->vm_end; | 329 | unsigned long end = vma->vm_end; |
@@ -305,7 +340,7 @@ static inline void for_each_pmd(struct vm_area_struct *vma, | |||
305 | next = pgd_addr_end(addr, end); | 340 | next = pgd_addr_end(addr, end); |
306 | if (pgd_none_or_clear_bad(pgd)) | 341 | if (pgd_none_or_clear_bad(pgd)) |
307 | continue; | 342 | continue; |
308 | for_each_pud_in_pgd(&walker, pgd, addr, next); | 343 | walk_pud_range(&walker, pgd, addr, next); |
309 | } | 344 | } |
310 | } | 345 | } |
311 | 346 | ||
@@ -316,10 +351,22 @@ static int show_smap(struct seq_file *m, void *v) | |||
316 | 351 | ||
317 | memset(&mss, 0, sizeof mss); | 352 | memset(&mss, 0, sizeof mss); |
318 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) | 353 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) |
319 | for_each_pmd(vma, smaps_one_pmd, &mss); | 354 | walk_page_range(vma, smaps_pte_range, &mss); |
320 | return show_map_internal(m, v, &mss); | 355 | return show_map_internal(m, v, &mss); |
321 | } | 356 | } |
322 | 357 | ||
358 | void clear_refs_smap(struct mm_struct *mm) | ||
359 | { | ||
360 | struct vm_area_struct *vma; | ||
361 | |||
362 | down_read(&mm->mmap_sem); | ||
363 | for (vma = mm->mmap; vma; vma = vma->vm_next) | ||
364 | if (vma->vm_mm && !is_vm_hugetlb_page(vma)) | ||
365 | walk_page_range(vma, clear_refs_pte_range, NULL); | ||
366 | flush_tlb_mm(mm); | ||
367 | up_read(&mm->mmap_sem); | ||
368 | } | ||
369 | |||
323 | static void *m_start(struct seq_file *m, loff_t *pos) | 370 | static void *m_start(struct seq_file *m, loff_t *pos) |
324 | { | 371 | { |
325 | struct proc_maps_private *priv = m->private; | 372 | struct proc_maps_private *priv = m->private; |