diff options
author | Mike Kravetz <mike.kravetz@oracle.com> | 2018-12-28 03:39:38 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 15:11:51 -0500 |
commit | b43a9990055958e70347c56f90ea2ae32c67334c (patch) | |
tree | 91f90f0c3e73ca076cbc4a9780bd7d5a271b6257 /mm/hugetlb.c | |
parent | 1ecc07fd0a6d350bbf4dc176e0d654661b304a30 (diff) |
hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization
While looking at BUGs associated with invalid huge page map counts, it was
discovered and observed that a huge pte pointer could become 'invalid' and
point to another task's page table. Consider the following:
A task takes a page fault on a shared hugetlbfs file and calls
huge_pte_alloc to get a ptep. Suppose the returned ptep points to a
shared pmd.
Now, another task truncates the hugetlbfs file. As part of truncation, it
unmaps everyone who has the file mapped. If the range being truncated is
covered by a shared pmd, huge_pmd_unshare will be called. For all but the
last user of the shared pmd, huge_pmd_unshare will clear the pud pointing
to the pmd. If the task in the middle of the page fault is not the last
user, the ptep returned by huge_pte_alloc now points to another task's
page table or worse. This leads to bad things such as incorrect page
map/reference counts or invalid memory references.
To fix, expand the use of i_mmap_rwsem as follows:
- i_mmap_rwsem is held in read mode whenever huge_pmd_share is called.
huge_pmd_share is only called via huge_pte_alloc, so callers of
huge_pte_alloc take i_mmap_rwsem before calling. In addition, callers
of huge_pte_alloc continue to hold the semaphore until finished with the
ptep.
- i_mmap_rwsem is held in write mode whenever huge_pmd_unshare is
called.
[mike.kravetz@oracle.com: add explicit check for mapping != null]
Link: http://lkml.kernel.org/r/20181218223557.5202-2-mike.kravetz@oracle.com
Fixes: 39dde65c9940 ("shared page table for hugetlb page")
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Prakash Sangappa <prakash.sangappa@oracle.com>
Cc: Colin Ian King <colin.king@canonical.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 64 |
1 files changed, 49 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 12000ba5c868..87fd3ab809c6 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -3238,6 +3238,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
3238 | struct page *ptepage; | 3238 | struct page *ptepage; |
3239 | unsigned long addr; | 3239 | unsigned long addr; |
3240 | int cow; | 3240 | int cow; |
3241 | struct address_space *mapping = vma->vm_file->f_mapping; | ||
3241 | struct hstate *h = hstate_vma(vma); | 3242 | struct hstate *h = hstate_vma(vma); |
3242 | unsigned long sz = huge_page_size(h); | 3243 | unsigned long sz = huge_page_size(h); |
3243 | struct mmu_notifier_range range; | 3244 | struct mmu_notifier_range range; |
@@ -3249,13 +3250,23 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
3249 | mmu_notifier_range_init(&range, src, vma->vm_start, | 3250 | mmu_notifier_range_init(&range, src, vma->vm_start, |
3250 | vma->vm_end); | 3251 | vma->vm_end); |
3251 | mmu_notifier_invalidate_range_start(&range); | 3252 | mmu_notifier_invalidate_range_start(&range); |
3253 | } else { | ||
3254 | /* | ||
3255 | * For shared mappings i_mmap_rwsem must be held to call | ||
3256 | * huge_pte_alloc, otherwise the returned ptep could go | ||
3257 | * away if part of a shared pmd and another thread calls | ||
3258 | * huge_pmd_unshare. | ||
3259 | */ | ||
3260 | i_mmap_lock_read(mapping); | ||
3252 | } | 3261 | } |
3253 | 3262 | ||
3254 | for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { | 3263 | for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { |
3255 | spinlock_t *src_ptl, *dst_ptl; | 3264 | spinlock_t *src_ptl, *dst_ptl; |
3265 | |||
3256 | src_pte = huge_pte_offset(src, addr, sz); | 3266 | src_pte = huge_pte_offset(src, addr, sz); |
3257 | if (!src_pte) | 3267 | if (!src_pte) |
3258 | continue; | 3268 | continue; |
3269 | |||
3259 | dst_pte = huge_pte_alloc(dst, addr, sz); | 3270 | dst_pte = huge_pte_alloc(dst, addr, sz); |
3260 | if (!dst_pte) { | 3271 | if (!dst_pte) { |
3261 | ret = -ENOMEM; | 3272 | ret = -ENOMEM; |
@@ -3326,6 +3337,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
3326 | 3337 | ||
3327 | if (cow) | 3338 | if (cow) |
3328 | mmu_notifier_invalidate_range_end(&range); | 3339 | mmu_notifier_invalidate_range_end(&range); |
3340 | else | ||
3341 | i_mmap_unlock_read(mapping); | ||
3329 | 3342 | ||
3330 | return ret; | 3343 | return ret; |
3331 | } | 3344 | } |
@@ -3771,14 +3784,18 @@ retry: | |||
3771 | }; | 3784 | }; |
3772 | 3785 | ||
3773 | /* | 3786 | /* |
3774 | * hugetlb_fault_mutex must be dropped before | 3787 | * hugetlb_fault_mutex and i_mmap_rwsem must be |
3775 | * handling userfault. Reacquire after handling | 3788 | * dropped before handling userfault. Reacquire |
3776 | * fault to make calling code simpler. | 3789 | * after handling fault to make calling code simpler. |
3777 | */ | 3790 | */ |
3778 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, | 3791 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, |
3779 | idx, haddr); | 3792 | idx, haddr); |
3780 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 3793 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
3794 | i_mmap_unlock_read(mapping); | ||
3795 | |||
3781 | ret = handle_userfault(&vmf, VM_UFFD_MISSING); | 3796 | ret = handle_userfault(&vmf, VM_UFFD_MISSING); |
3797 | |||
3798 | i_mmap_lock_read(mapping); | ||
3782 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 3799 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
3783 | goto out; | 3800 | goto out; |
3784 | } | 3801 | } |
@@ -3926,6 +3943,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3926 | 3943 | ||
3927 | ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); | 3944 | ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); |
3928 | if (ptep) { | 3945 | if (ptep) { |
3946 | /* | ||
3947 | * Since we hold no locks, ptep could be stale. That is | ||
3948 | * OK as we are only making decisions based on content and | ||
3949 | * not actually modifying content here. | ||
3950 | */ | ||
3929 | entry = huge_ptep_get(ptep); | 3951 | entry = huge_ptep_get(ptep); |
3930 | if (unlikely(is_hugetlb_entry_migration(entry))) { | 3952 | if (unlikely(is_hugetlb_entry_migration(entry))) { |
3931 | migration_entry_wait_huge(vma, mm, ptep); | 3953 | migration_entry_wait_huge(vma, mm, ptep); |
@@ -3933,20 +3955,31 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3933 | } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) | 3955 | } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) |
3934 | return VM_FAULT_HWPOISON_LARGE | | 3956 | return VM_FAULT_HWPOISON_LARGE | |
3935 | VM_FAULT_SET_HINDEX(hstate_index(h)); | 3957 | VM_FAULT_SET_HINDEX(hstate_index(h)); |
3936 | } else { | ||
3937 | ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); | ||
3938 | if (!ptep) | ||
3939 | return VM_FAULT_OOM; | ||
3940 | } | 3958 | } |
3941 | 3959 | ||
3960 | /* | ||
3961 | * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold | ||
3962 | * until finished with ptep. This prevents huge_pmd_unshare from | ||
3963 | * being called elsewhere and making the ptep no longer valid. | ||
3964 | * | ||
3965 | * ptep could have already be assigned via huge_pte_offset. That | ||
3966 | * is OK, as huge_pte_alloc will return the same value unless | ||
3967 | * something changed. | ||
3968 | */ | ||
3942 | mapping = vma->vm_file->f_mapping; | 3969 | mapping = vma->vm_file->f_mapping; |
3943 | idx = vma_hugecache_offset(h, vma, haddr); | 3970 | i_mmap_lock_read(mapping); |
3971 | ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); | ||
3972 | if (!ptep) { | ||
3973 | i_mmap_unlock_read(mapping); | ||
3974 | return VM_FAULT_OOM; | ||
3975 | } | ||
3944 | 3976 | ||
3945 | /* | 3977 | /* |
3946 | * Serialize hugepage allocation and instantiation, so that we don't | 3978 | * Serialize hugepage allocation and instantiation, so that we don't |
3947 | * get spurious allocation failures if two CPUs race to instantiate | 3979 | * get spurious allocation failures if two CPUs race to instantiate |
3948 | * the same page in the page cache. | 3980 | * the same page in the page cache. |
3949 | */ | 3981 | */ |
3982 | idx = vma_hugecache_offset(h, vma, haddr); | ||
3950 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); | 3983 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); |
3951 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 3984 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
3952 | 3985 | ||
@@ -4034,6 +4067,7 @@ out_ptl: | |||
4034 | } | 4067 | } |
4035 | out_mutex: | 4068 | out_mutex: |
4036 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 4069 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
4070 | i_mmap_unlock_read(mapping); | ||
4037 | /* | 4071 | /* |
4038 | * Generally it's safe to hold refcount during waiting page lock. But | 4072 | * Generally it's safe to hold refcount during waiting page lock. But |
4039 | * here we just wait to defer the next page fault to avoid busy loop and | 4073 | * here we just wait to defer the next page fault to avoid busy loop and |
@@ -4638,10 +4672,12 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, | |||
4638 | * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() | 4672 | * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() |
4639 | * and returns the corresponding pte. While this is not necessary for the | 4673 | * and returns the corresponding pte. While this is not necessary for the |
4640 | * !shared pmd case because we can allocate the pmd later as well, it makes the | 4674 | * !shared pmd case because we can allocate the pmd later as well, it makes the |
4641 | * code much cleaner. pmd allocation is essential for the shared case because | 4675 | * code much cleaner. |
4642 | * pud has to be populated inside the same i_mmap_rwsem section - otherwise | 4676 | * |
4643 | * racing tasks could either miss the sharing (see huge_pte_offset) or select a | 4677 | * This routine must be called with i_mmap_rwsem held in at least read mode. |
4644 | * bad pmd for sharing. | 4678 | * For hugetlbfs, this prevents removal of any page table entries associated |
4679 | * with the address space. This is important as we are setting up sharing | ||
4680 | * based on existing page table entries (mappings). | ||
4645 | */ | 4681 | */ |
4646 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | 4682 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) |
4647 | { | 4683 | { |
@@ -4658,7 +4694,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
4658 | if (!vma_shareable(vma, addr)) | 4694 | if (!vma_shareable(vma, addr)) |
4659 | return (pte_t *)pmd_alloc(mm, pud, addr); | 4695 | return (pte_t *)pmd_alloc(mm, pud, addr); |
4660 | 4696 | ||
4661 | i_mmap_lock_write(mapping); | ||
4662 | vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { | 4697 | vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { |
4663 | if (svma == vma) | 4698 | if (svma == vma) |
4664 | continue; | 4699 | continue; |
@@ -4688,7 +4723,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
4688 | spin_unlock(ptl); | 4723 | spin_unlock(ptl); |
4689 | out: | 4724 | out: |
4690 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | 4725 | pte = (pte_t *)pmd_alloc(mm, pud, addr); |
4691 | i_mmap_unlock_write(mapping); | ||
4692 | return pte; | 4726 | return pte; |
4693 | } | 4727 | } |
4694 | 4728 | ||
@@ -4699,7 +4733,7 @@ out: | |||
4699 | * indicated by page_count > 1, unmap is achieved by clearing pud and | 4733 | * indicated by page_count > 1, unmap is achieved by clearing pud and |
4700 | * decrementing the ref count. If count == 1, the pte page is not shared. | 4734 | * decrementing the ref count. If count == 1, the pte page is not shared. |
4701 | * | 4735 | * |
4702 | * called with page table lock held. | 4736 | * Called with page table lock held and i_mmap_rwsem held in write mode. |
4703 | * | 4737 | * |
4704 | * returns: 1 successfully unmapped a shared pte page | 4738 | * returns: 1 successfully unmapped a shared pte page |
4705 | * 0 the underlying pte page is not shared, or it is the last user | 4739 | * 0 the underlying pte page is not shared, or it is the last user |