aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2012-10-08 19:31:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:41 -0400
commit86c2ad19956f84f2191e062fcb979367b6365871 (patch)
treebca7d7106964266b24ecfa1256d2586a315571cf /mm/rmap.c
parentbf181b9f9d8dfbba58b23441ad60d0bc33806d64 (diff)
mm rmap: remove vma_address check for address inside vma
In file and anon rmap, we use interval trees to find potentially relevant vmas and then call vma_address() to find the virtual address the given page might be found at in these vmas. vma_address() used to include a check that the returned address falls within the limits of the vma, but this check isn't necessary now that we always use interval trees in rmap: the interval tree just doesn't return any vmas which this check would find to be irrelevant. As a result, we can replace the use of -EFAULT error code (which then needed to be checked in every call site) with a VM_BUG_ON(). Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Daniel Santos <daniel.santos@pobox.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c48
1 files changed, 21 insertions, 27 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 9c61bf387fd1..28777412de62 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -510,22 +510,26 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma)
510 510
511/* 511/*
512 * At what user virtual address is page expected in @vma? 512 * At what user virtual address is page expected in @vma?
513 * Returns virtual address or -EFAULT if page's index/offset is not
514 * within the range mapped the @vma.
515 */ 513 */
516inline unsigned long 514static inline unsigned long
517vma_address(struct page *page, struct vm_area_struct *vma) 515__vma_address(struct page *page, struct vm_area_struct *vma)
518{ 516{
519 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 517 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
520 unsigned long address;
521 518
522 if (unlikely(is_vm_hugetlb_page(vma))) 519 if (unlikely(is_vm_hugetlb_page(vma)))
523 pgoff = page->index << huge_page_order(page_hstate(page)); 520 pgoff = page->index << huge_page_order(page_hstate(page));
524 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 521
525 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 522 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
526 /* page should be within @vma mapping range */ 523}
527 return -EFAULT; 524
528 } 525inline unsigned long
526vma_address(struct page *page, struct vm_area_struct *vma)
527{
528 unsigned long address = __vma_address(page, vma);
529
530 /* page should be within @vma mapping range */
531 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
532
529 return address; 533 return address;
530} 534}
531 535
@@ -535,6 +539,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
535 */ 539 */
536unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 540unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
537{ 541{
542 unsigned long address;
538 if (PageAnon(page)) { 543 if (PageAnon(page)) {
539 struct anon_vma *page__anon_vma = page_anon_vma(page); 544 struct anon_vma *page__anon_vma = page_anon_vma(page);
540 /* 545 /*
@@ -550,7 +555,10 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
550 return -EFAULT; 555 return -EFAULT;
551 } else 556 } else
552 return -EFAULT; 557 return -EFAULT;
553 return vma_address(page, vma); 558 address = __vma_address(page, vma);
559 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
560 return -EFAULT;
561 return address;
554} 562}
555 563
556/* 564/*
@@ -624,8 +632,8 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
624 pte_t *pte; 632 pte_t *pte;
625 spinlock_t *ptl; 633 spinlock_t *ptl;
626 634
627 address = vma_address(page, vma); 635 address = __vma_address(page, vma);
628 if (address == -EFAULT) /* out of vma range */ 636 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
629 return 0; 637 return 0;
630 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 638 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
631 if (!pte) /* the page is not in this mm */ 639 if (!pte) /* the page is not in this mm */
@@ -732,8 +740,6 @@ static int page_referenced_anon(struct page *page,
732 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 740 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
733 struct vm_area_struct *vma = avc->vma; 741 struct vm_area_struct *vma = avc->vma;
734 unsigned long address = vma_address(page, vma); 742 unsigned long address = vma_address(page, vma);
735 if (address == -EFAULT)
736 continue;
737 /* 743 /*
738 * If we are reclaiming on behalf of a cgroup, skip 744 * If we are reclaiming on behalf of a cgroup, skip
739 * counting on behalf of references from different 745 * counting on behalf of references from different
@@ -799,8 +805,6 @@ static int page_referenced_file(struct page *page,
799 805
800 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 806 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
801 unsigned long address = vma_address(page, vma); 807 unsigned long address = vma_address(page, vma);
802 if (address == -EFAULT)
803 continue;
804 /* 808 /*
805 * If we are reclaiming on behalf of a cgroup, skip 809 * If we are reclaiming on behalf of a cgroup, skip
806 * counting on behalf of references from different 810 * counting on behalf of references from different
@@ -904,8 +908,6 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
904 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 908 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
905 if (vma->vm_flags & VM_SHARED) { 909 if (vma->vm_flags & VM_SHARED) {
906 unsigned long address = vma_address(page, vma); 910 unsigned long address = vma_address(page, vma);
907 if (address == -EFAULT)
908 continue;
909 ret += page_mkclean_one(page, vma, address); 911 ret += page_mkclean_one(page, vma, address);
910 } 912 }
911 } 913 }
@@ -1468,8 +1470,6 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1468 continue; 1470 continue;
1469 1471
1470 address = vma_address(page, vma); 1472 address = vma_address(page, vma);
1471 if (address == -EFAULT)
1472 continue;
1473 ret = try_to_unmap_one(page, vma, address, flags); 1473 ret = try_to_unmap_one(page, vma, address, flags);
1474 if (ret != SWAP_AGAIN || !page_mapped(page)) 1474 if (ret != SWAP_AGAIN || !page_mapped(page))
1475 break; 1475 break;
@@ -1508,8 +1508,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1508 mutex_lock(&mapping->i_mmap_mutex); 1508 mutex_lock(&mapping->i_mmap_mutex);
1509 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1509 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1510 unsigned long address = vma_address(page, vma); 1510 unsigned long address = vma_address(page, vma);
1511 if (address == -EFAULT)
1512 continue;
1513 ret = try_to_unmap_one(page, vma, address, flags); 1511 ret = try_to_unmap_one(page, vma, address, flags);
1514 if (ret != SWAP_AGAIN || !page_mapped(page)) 1512 if (ret != SWAP_AGAIN || !page_mapped(page))
1515 goto out; 1513 goto out;
@@ -1684,8 +1682,6 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1684 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1682 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1685 struct vm_area_struct *vma = avc->vma; 1683 struct vm_area_struct *vma = avc->vma;
1686 unsigned long address = vma_address(page, vma); 1684 unsigned long address = vma_address(page, vma);
1687 if (address == -EFAULT)
1688 continue;
1689 ret = rmap_one(page, vma, address, arg); 1685 ret = rmap_one(page, vma, address, arg);
1690 if (ret != SWAP_AGAIN) 1686 if (ret != SWAP_AGAIN)
1691 break; 1687 break;
@@ -1707,8 +1703,6 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1707 mutex_lock(&mapping->i_mmap_mutex); 1703 mutex_lock(&mapping->i_mmap_mutex);
1708 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1704 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1709 unsigned long address = vma_address(page, vma); 1705 unsigned long address = vma_address(page, vma);
1710 if (address == -EFAULT)
1711 continue;
1712 ret = rmap_one(page, vma, address, arg); 1706 ret = rmap_one(page, vma, address, arg);
1713 if (ret != SWAP_AGAIN) 1707 if (ret != SWAP_AGAIN)
1714 break; 1708 break;