aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/rmap.c48
2 files changed, 21 insertions, 31 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ce59ada09462..7cf8b0ec11ec 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1386,8 +1386,6 @@ static void __split_huge_page(struct page *page,
1386 struct vm_area_struct *vma = avc->vma; 1386 struct vm_area_struct *vma = avc->vma;
1387 unsigned long addr = vma_address(page, vma); 1387 unsigned long addr = vma_address(page, vma);
1388 BUG_ON(is_vma_temporary_stack(vma)); 1388 BUG_ON(is_vma_temporary_stack(vma));
1389 if (addr == -EFAULT)
1390 continue;
1391 mapcount += __split_huge_page_splitting(page, vma, addr); 1389 mapcount += __split_huge_page_splitting(page, vma, addr);
1392 } 1390 }
1393 /* 1391 /*
@@ -1412,8 +1410,6 @@ static void __split_huge_page(struct page *page,
1412 struct vm_area_struct *vma = avc->vma; 1410 struct vm_area_struct *vma = avc->vma;
1413 unsigned long addr = vma_address(page, vma); 1411 unsigned long addr = vma_address(page, vma);
1414 BUG_ON(is_vma_temporary_stack(vma)); 1412 BUG_ON(is_vma_temporary_stack(vma));
1415 if (addr == -EFAULT)
1416 continue;
1417 mapcount2 += __split_huge_page_map(page, vma, addr); 1413 mapcount2 += __split_huge_page_map(page, vma, addr);
1418 } 1414 }
1419 if (mapcount != mapcount2) 1415 if (mapcount != mapcount2)
diff --git a/mm/rmap.c b/mm/rmap.c
index 9c61bf387fd1..28777412de62 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -510,22 +510,26 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma)
510 510
511/* 511/*
512 * At what user virtual address is page expected in @vma? 512 * At what user virtual address is page expected in @vma?
513 * Returns virtual address or -EFAULT if page's index/offset is not
514 * within the range mapped the @vma.
515 */ 513 */
516inline unsigned long 514static inline unsigned long
517vma_address(struct page *page, struct vm_area_struct *vma) 515__vma_address(struct page *page, struct vm_area_struct *vma)
518{ 516{
519 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 517 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
520 unsigned long address;
521 518
522 if (unlikely(is_vm_hugetlb_page(vma))) 519 if (unlikely(is_vm_hugetlb_page(vma)))
523 pgoff = page->index << huge_page_order(page_hstate(page)); 520 pgoff = page->index << huge_page_order(page_hstate(page));
524 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 521
525 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 522 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
526 /* page should be within @vma mapping range */ 523}
527 return -EFAULT; 524
528 } 525inline unsigned long
526vma_address(struct page *page, struct vm_area_struct *vma)
527{
528 unsigned long address = __vma_address(page, vma);
529
530 /* page should be within @vma mapping range */
531 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
532
529 return address; 533 return address;
530} 534}
531 535
@@ -535,6 +539,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
535 */ 539 */
536unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 540unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
537{ 541{
542 unsigned long address;
538 if (PageAnon(page)) { 543 if (PageAnon(page)) {
539 struct anon_vma *page__anon_vma = page_anon_vma(page); 544 struct anon_vma *page__anon_vma = page_anon_vma(page);
540 /* 545 /*
@@ -550,7 +555,10 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
550 return -EFAULT; 555 return -EFAULT;
551 } else 556 } else
552 return -EFAULT; 557 return -EFAULT;
553 return vma_address(page, vma); 558 address = __vma_address(page, vma);
559 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
560 return -EFAULT;
561 return address;
554} 562}
555 563
556/* 564/*
@@ -624,8 +632,8 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
624 pte_t *pte; 632 pte_t *pte;
625 spinlock_t *ptl; 633 spinlock_t *ptl;
626 634
627 address = vma_address(page, vma); 635 address = __vma_address(page, vma);
628 if (address == -EFAULT) /* out of vma range */ 636 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
629 return 0; 637 return 0;
630 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 638 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
631 if (!pte) /* the page is not in this mm */ 639 if (!pte) /* the page is not in this mm */
@@ -732,8 +740,6 @@ static int page_referenced_anon(struct page *page,
732 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 740 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
733 struct vm_area_struct *vma = avc->vma; 741 struct vm_area_struct *vma = avc->vma;
734 unsigned long address = vma_address(page, vma); 742 unsigned long address = vma_address(page, vma);
735 if (address == -EFAULT)
736 continue;
737 /* 743 /*
738 * If we are reclaiming on behalf of a cgroup, skip 744 * If we are reclaiming on behalf of a cgroup, skip
739 * counting on behalf of references from different 745 * counting on behalf of references from different
@@ -799,8 +805,6 @@ static int page_referenced_file(struct page *page,
799 805
800 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 806 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
801 unsigned long address = vma_address(page, vma); 807 unsigned long address = vma_address(page, vma);
802 if (address == -EFAULT)
803 continue;
804 /* 808 /*
805 * If we are reclaiming on behalf of a cgroup, skip 809 * If we are reclaiming on behalf of a cgroup, skip
806 * counting on behalf of references from different 810 * counting on behalf of references from different
@@ -904,8 +908,6 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
904 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 908 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
905 if (vma->vm_flags & VM_SHARED) { 909 if (vma->vm_flags & VM_SHARED) {
906 unsigned long address = vma_address(page, vma); 910 unsigned long address = vma_address(page, vma);
907 if (address == -EFAULT)
908 continue;
909 ret += page_mkclean_one(page, vma, address); 911 ret += page_mkclean_one(page, vma, address);
910 } 912 }
911 } 913 }
@@ -1468,8 +1470,6 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1468 continue; 1470 continue;
1469 1471
1470 address = vma_address(page, vma); 1472 address = vma_address(page, vma);
1471 if (address == -EFAULT)
1472 continue;
1473 ret = try_to_unmap_one(page, vma, address, flags); 1473 ret = try_to_unmap_one(page, vma, address, flags);
1474 if (ret != SWAP_AGAIN || !page_mapped(page)) 1474 if (ret != SWAP_AGAIN || !page_mapped(page))
1475 break; 1475 break;
@@ -1508,8 +1508,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1508 mutex_lock(&mapping->i_mmap_mutex); 1508 mutex_lock(&mapping->i_mmap_mutex);
1509 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1509 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1510 unsigned long address = vma_address(page, vma); 1510 unsigned long address = vma_address(page, vma);
1511 if (address == -EFAULT)
1512 continue;
1513 ret = try_to_unmap_one(page, vma, address, flags); 1511 ret = try_to_unmap_one(page, vma, address, flags);
1514 if (ret != SWAP_AGAIN || !page_mapped(page)) 1512 if (ret != SWAP_AGAIN || !page_mapped(page))
1515 goto out; 1513 goto out;
@@ -1684,8 +1682,6 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1684 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1682 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1685 struct vm_area_struct *vma = avc->vma; 1683 struct vm_area_struct *vma = avc->vma;
1686 unsigned long address = vma_address(page, vma); 1684 unsigned long address = vma_address(page, vma);
1687 if (address == -EFAULT)
1688 continue;
1689 ret = rmap_one(page, vma, address, arg); 1685 ret = rmap_one(page, vma, address, arg);
1690 if (ret != SWAP_AGAIN) 1686 if (ret != SWAP_AGAIN)
1691 break; 1687 break;
@@ -1707,8 +1703,6 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1707 mutex_lock(&mapping->i_mmap_mutex); 1703 mutex_lock(&mapping->i_mmap_mutex);
1708 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1704 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1709 unsigned long address = vma_address(page, vma); 1705 unsigned long address = vma_address(page, vma);
1710 if (address == -EFAULT)
1711 continue;
1712 ret = rmap_one(page, vma, address, arg); 1706 ret = rmap_one(page, vma, address, arg);
1713 if (ret != SWAP_AGAIN) 1707 if (ret != SWAP_AGAIN)
1714 break; 1708 break;