aboutsummaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c40
1 files changed, 32 insertions, 8 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 47c885368890..ae539f0b8aa1 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -709,15 +709,22 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
709 spinlock_t *ptl; 709 spinlock_t *ptl;
710 int swapped; 710 int swapped;
711 int err = -EFAULT; 711 int err = -EFAULT;
712 unsigned long mmun_start; /* For mmu_notifiers */
713 unsigned long mmun_end; /* For mmu_notifiers */
712 714
713 addr = page_address_in_vma(page, vma); 715 addr = page_address_in_vma(page, vma);
714 if (addr == -EFAULT) 716 if (addr == -EFAULT)
715 goto out; 717 goto out;
716 718
717 BUG_ON(PageTransCompound(page)); 719 BUG_ON(PageTransCompound(page));
720
721 mmun_start = addr;
722 mmun_end = addr + PAGE_SIZE;
723 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
724
718 ptep = page_check_address(page, mm, addr, &ptl, 0); 725 ptep = page_check_address(page, mm, addr, &ptl, 0);
719 if (!ptep) 726 if (!ptep)
720 goto out; 727 goto out_mn;
721 728
722 if (pte_write(*ptep) || pte_dirty(*ptep)) { 729 if (pte_write(*ptep) || pte_dirty(*ptep)) {
723 pte_t entry; 730 pte_t entry;
@@ -752,6 +759,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
752 759
753out_unlock: 760out_unlock:
754 pte_unmap_unlock(ptep, ptl); 761 pte_unmap_unlock(ptep, ptl);
762out_mn:
763 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
755out: 764out:
756 return err; 765 return err;
757} 766}
@@ -776,6 +785,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
776 spinlock_t *ptl; 785 spinlock_t *ptl;
777 unsigned long addr; 786 unsigned long addr;
778 int err = -EFAULT; 787 int err = -EFAULT;
788 unsigned long mmun_start; /* For mmu_notifiers */
789 unsigned long mmun_end; /* For mmu_notifiers */
779 790
780 addr = page_address_in_vma(page, vma); 791 addr = page_address_in_vma(page, vma);
781 if (addr == -EFAULT) 792 if (addr == -EFAULT)
@@ -794,10 +805,14 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
794 if (!pmd_present(*pmd)) 805 if (!pmd_present(*pmd))
795 goto out; 806 goto out;
796 807
808 mmun_start = addr;
809 mmun_end = addr + PAGE_SIZE;
810 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
811
797 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 812 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
798 if (!pte_same(*ptep, orig_pte)) { 813 if (!pte_same(*ptep, orig_pte)) {
799 pte_unmap_unlock(ptep, ptl); 814 pte_unmap_unlock(ptep, ptl);
800 goto out; 815 goto out_mn;
801 } 816 }
802 817
803 get_page(kpage); 818 get_page(kpage);
@@ -814,6 +829,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
814 829
815 pte_unmap_unlock(ptep, ptl); 830 pte_unmap_unlock(ptep, ptl);
816 err = 0; 831 err = 0;
832out_mn:
833 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
817out: 834out:
818 return err; 835 return err;
819} 836}
@@ -1469,10 +1486,14 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
1469 */ 1486 */
1470 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 1487 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
1471 VM_PFNMAP | VM_IO | VM_DONTEXPAND | 1488 VM_PFNMAP | VM_IO | VM_DONTEXPAND |
1472 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | 1489 VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP))
1473 VM_NONLINEAR | VM_MIXEDMAP | VM_SAO))
1474 return 0; /* just ignore the advice */ 1490 return 0; /* just ignore the advice */
1475 1491
1492#ifdef VM_SAO
1493 if (*vm_flags & VM_SAO)
1494 return 0;
1495#endif
1496
1476 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 1497 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
1477 err = __ksm_enter(mm); 1498 err = __ksm_enter(mm);
1478 if (err) 1499 if (err)
@@ -1582,7 +1603,7 @@ struct page *ksm_does_need_to_copy(struct page *page,
1582 SetPageSwapBacked(new_page); 1603 SetPageSwapBacked(new_page);
1583 __set_page_locked(new_page); 1604 __set_page_locked(new_page);
1584 1605
1585 if (page_evictable(new_page, vma)) 1606 if (!mlocked_vma_newpage(vma, new_page))
1586 lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); 1607 lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
1587 else 1608 else
1588 add_page_to_unevictable_list(new_page); 1609 add_page_to_unevictable_list(new_page);
@@ -1614,7 +1635,8 @@ again:
1614 struct vm_area_struct *vma; 1635 struct vm_area_struct *vma;
1615 1636
1616 anon_vma_lock(anon_vma); 1637 anon_vma_lock(anon_vma);
1617 list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { 1638 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
1639 0, ULONG_MAX) {
1618 vma = vmac->vma; 1640 vma = vmac->vma;
1619 if (rmap_item->address < vma->vm_start || 1641 if (rmap_item->address < vma->vm_start ||
1620 rmap_item->address >= vma->vm_end) 1642 rmap_item->address >= vma->vm_end)
@@ -1667,7 +1689,8 @@ again:
1667 struct vm_area_struct *vma; 1689 struct vm_area_struct *vma;
1668 1690
1669 anon_vma_lock(anon_vma); 1691 anon_vma_lock(anon_vma);
1670 list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { 1692 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
1693 0, ULONG_MAX) {
1671 vma = vmac->vma; 1694 vma = vmac->vma;
1672 if (rmap_item->address < vma->vm_start || 1695 if (rmap_item->address < vma->vm_start ||
1673 rmap_item->address >= vma->vm_end) 1696 rmap_item->address >= vma->vm_end)
@@ -1719,7 +1742,8 @@ again:
1719 struct vm_area_struct *vma; 1742 struct vm_area_struct *vma;
1720 1743
1721 anon_vma_lock(anon_vma); 1744 anon_vma_lock(anon_vma);
1722 list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { 1745 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
1746 0, ULONG_MAX) {
1723 vma = vmac->vma; 1747 vma = vmac->vma;
1724 if (rmap_item->address < vma->vm_start || 1748 if (rmap_item->address < vma->vm_start ||
1725 rmap_item->address >= vma->vm_end) 1749 rmap_item->address >= vma->vm_end)