aboutsummaryrefslogtreecommitdiffstats
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c83
1 files changed, 55 insertions, 28 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 17403629e330..ab8ccc9d14ff 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -315,9 +315,9 @@ struct mem_size_stats {
315}; 315};
316 316
317static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 317static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
318 void *private) 318 struct mm_walk *walk)
319{ 319{
320 struct mem_size_stats *mss = private; 320 struct mem_size_stats *mss = walk->private;
321 struct vm_area_struct *vma = mss->vma; 321 struct vm_area_struct *vma = mss->vma;
322 pte_t *pte, ptent; 322 pte_t *pte, ptent;
323 spinlock_t *ptl; 323 spinlock_t *ptl;
@@ -365,19 +365,21 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
365 return 0; 365 return 0;
366} 366}
367 367
368static struct mm_walk smaps_walk = { .pmd_entry = smaps_pte_range };
369
370static int show_smap(struct seq_file *m, void *v) 368static int show_smap(struct seq_file *m, void *v)
371{ 369{
372 struct vm_area_struct *vma = v; 370 struct vm_area_struct *vma = v;
373 struct mem_size_stats mss; 371 struct mem_size_stats mss;
374 int ret; 372 int ret;
373 struct mm_walk smaps_walk = {
374 .pmd_entry = smaps_pte_range,
375 .mm = vma->vm_mm,
376 .private = &mss,
377 };
375 378
376 memset(&mss, 0, sizeof mss); 379 memset(&mss, 0, sizeof mss);
377 mss.vma = vma; 380 mss.vma = vma;
378 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 381 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
379 walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end, 382 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
380 &smaps_walk, &mss);
381 383
382 ret = show_map(m, v); 384 ret = show_map(m, v);
383 if (ret) 385 if (ret)
@@ -426,9 +428,9 @@ const struct file_operations proc_smaps_operations = {
426}; 428};
427 429
428static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 430static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
429 unsigned long end, void *private) 431 unsigned long end, struct mm_walk *walk)
430{ 432{
431 struct vm_area_struct *vma = private; 433 struct vm_area_struct *vma = walk->private;
432 pte_t *pte, ptent; 434 pte_t *pte, ptent;
433 spinlock_t *ptl; 435 spinlock_t *ptl;
434 struct page *page; 436 struct page *page;
@@ -452,8 +454,6 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
452 return 0; 454 return 0;
453} 455}
454 456
455static struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range };
456
457static ssize_t clear_refs_write(struct file *file, const char __user *buf, 457static ssize_t clear_refs_write(struct file *file, const char __user *buf,
458 size_t count, loff_t *ppos) 458 size_t count, loff_t *ppos)
459{ 459{
@@ -476,11 +476,17 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
476 return -ESRCH; 476 return -ESRCH;
477 mm = get_task_mm(task); 477 mm = get_task_mm(task);
478 if (mm) { 478 if (mm) {
479 static struct mm_walk clear_refs_walk;
480 memset(&clear_refs_walk, 0, sizeof(clear_refs_walk));
481 clear_refs_walk.pmd_entry = clear_refs_pte_range;
482 clear_refs_walk.mm = mm;
479 down_read(&mm->mmap_sem); 483 down_read(&mm->mmap_sem);
480 for (vma = mm->mmap; vma; vma = vma->vm_next) 484 for (vma = mm->mmap; vma; vma = vma->vm_next) {
485 clear_refs_walk.private = vma;
481 if (!is_vm_hugetlb_page(vma)) 486 if (!is_vm_hugetlb_page(vma))
482 walk_page_range(mm, vma->vm_start, vma->vm_end, 487 walk_page_range(vma->vm_start, vma->vm_end,
483 &clear_refs_walk, vma); 488 &clear_refs_walk);
489 }
484 flush_tlb_mm(mm); 490 flush_tlb_mm(mm);
485 up_read(&mm->mmap_sem); 491 up_read(&mm->mmap_sem);
486 mmput(mm); 492 mmput(mm);
@@ -528,9 +534,9 @@ static int add_to_pagemap(unsigned long addr, u64 pfn,
528} 534}
529 535
530static int pagemap_pte_hole(unsigned long start, unsigned long end, 536static int pagemap_pte_hole(unsigned long start, unsigned long end,
531 void *private) 537 struct mm_walk *walk)
532{ 538{
533 struct pagemapread *pm = private; 539 struct pagemapread *pm = walk->private;
534 unsigned long addr; 540 unsigned long addr;
535 int err = 0; 541 int err = 0;
536 for (addr = start; addr < end; addr += PAGE_SIZE) { 542 for (addr = start; addr < end; addr += PAGE_SIZE) {
@@ -547,24 +553,45 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte)
547 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); 553 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
548} 554}
549 555
556static unsigned long pte_to_pagemap_entry(pte_t pte)
557{
558 unsigned long pme = 0;
559 if (is_swap_pte(pte))
560 pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
561 | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
562 else if (pte_present(pte))
563 pme = PM_PFRAME(pte_pfn(pte))
564 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
565 return pme;
566}
567
550static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 568static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
551 void *private) 569 struct mm_walk *walk)
552{ 570{
553 struct pagemapread *pm = private; 571 struct vm_area_struct *vma;
572 struct pagemapread *pm = walk->private;
554 pte_t *pte; 573 pte_t *pte;
555 int err = 0; 574 int err = 0;
556 575
576 /* find the first VMA at or above 'addr' */
577 vma = find_vma(walk->mm, addr);
557 for (; addr != end; addr += PAGE_SIZE) { 578 for (; addr != end; addr += PAGE_SIZE) {
558 u64 pfn = PM_NOT_PRESENT; 579 u64 pfn = PM_NOT_PRESENT;
559 pte = pte_offset_map(pmd, addr); 580
560 if (is_swap_pte(*pte)) 581 /* check to see if we've left 'vma' behind
561 pfn = PM_PFRAME(swap_pte_to_pagemap_entry(*pte)) 582 * and need a new, higher one */
562 | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; 583 if (vma && (addr >= vma->vm_end))
563 else if (pte_present(*pte)) 584 vma = find_vma(walk->mm, addr);
564 pfn = PM_PFRAME(pte_pfn(*pte)) 585
565 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 586 /* check that 'vma' actually covers this address,
566 /* unmap so we're not in atomic when we copy to userspace */ 587 * and that it isn't a huge page vma */
567 pte_unmap(pte); 588 if (vma && (vma->vm_start <= addr) &&
589 !is_vm_hugetlb_page(vma)) {
590 pte = pte_offset_map(pmd, addr);
591 pfn = pte_to_pagemap_entry(*pte);
592 /* unmap before userspace copy */
593 pte_unmap(pte);
594 }
568 err = add_to_pagemap(addr, pfn, pm); 595 err = add_to_pagemap(addr, pfn, pm);
569 if (err) 596 if (err)
570 return err; 597 return err;
@@ -675,8 +702,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
675 * user buffer is tracked in "pm", and the walk 702 * user buffer is tracked in "pm", and the walk
676 * will stop when we hit the end of the buffer. 703 * will stop when we hit the end of the buffer.
677 */ 704 */
678 ret = walk_page_range(mm, start_vaddr, end_vaddr, 705 ret = walk_page_range(start_vaddr, end_vaddr,
679 &pagemap_walk, &pm); 706 &pagemap_walk);
680 if (ret == PM_END_OF_BUFFER) 707 if (ret == PM_END_OF_BUFFER)
681 ret = 0; 708 ret = 0;
682 /* don't need mmap_sem for these, but this looks cleaner */ 709 /* don't need mmap_sem for these, but this looks cleaner */