aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c81
1 files changed, 24 insertions, 57 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 745088810965..df2e7dd5ff17 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3238,7 +3238,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3238 struct page *ptepage; 3238 struct page *ptepage;
3239 unsigned long addr; 3239 unsigned long addr;
3240 int cow; 3240 int cow;
3241 struct address_space *mapping = vma->vm_file->f_mapping;
3242 struct hstate *h = hstate_vma(vma); 3241 struct hstate *h = hstate_vma(vma);
3243 unsigned long sz = huge_page_size(h); 3242 unsigned long sz = huge_page_size(h);
3244 struct mmu_notifier_range range; 3243 struct mmu_notifier_range range;
@@ -3250,23 +3249,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3250 mmu_notifier_range_init(&range, src, vma->vm_start, 3249 mmu_notifier_range_init(&range, src, vma->vm_start,
3251 vma->vm_end); 3250 vma->vm_end);
3252 mmu_notifier_invalidate_range_start(&range); 3251 mmu_notifier_invalidate_range_start(&range);
3253 } else {
3254 /*
3255 * For shared mappings i_mmap_rwsem must be held to call
3256 * huge_pte_alloc, otherwise the returned ptep could go
3257 * away if part of a shared pmd and another thread calls
3258 * huge_pmd_unshare.
3259 */
3260 i_mmap_lock_read(mapping);
3261 } 3252 }
3262 3253
3263 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 3254 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3264 spinlock_t *src_ptl, *dst_ptl; 3255 spinlock_t *src_ptl, *dst_ptl;
3265
3266 src_pte = huge_pte_offset(src, addr, sz); 3256 src_pte = huge_pte_offset(src, addr, sz);
3267 if (!src_pte) 3257 if (!src_pte)
3268 continue; 3258 continue;
3269
3270 dst_pte = huge_pte_alloc(dst, addr, sz); 3259 dst_pte = huge_pte_alloc(dst, addr, sz);
3271 if (!dst_pte) { 3260 if (!dst_pte) {
3272 ret = -ENOMEM; 3261 ret = -ENOMEM;
@@ -3337,8 +3326,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3337 3326
3338 if (cow) 3327 if (cow)
3339 mmu_notifier_invalidate_range_end(&range); 3328 mmu_notifier_invalidate_range_end(&range);
3340 else
3341 i_mmap_unlock_read(mapping);
3342 3329
3343 return ret; 3330 return ret;
3344} 3331}
@@ -3755,16 +3742,16 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3755 } 3742 }
3756 3743
3757 /* 3744 /*
3758 * We can not race with truncation due to holding i_mmap_rwsem. 3745 * Use page lock to guard against racing truncation
3759 * Check once here for faults beyond end of file. 3746 * before we get page_table_lock.
3760 */ 3747 */
3761 size = i_size_read(mapping->host) >> huge_page_shift(h);
3762 if (idx >= size)
3763 goto out;
3764
3765retry: 3748retry:
3766 page = find_lock_page(mapping, idx); 3749 page = find_lock_page(mapping, idx);
3767 if (!page) { 3750 if (!page) {
3751 size = i_size_read(mapping->host) >> huge_page_shift(h);
3752 if (idx >= size)
3753 goto out;
3754
3768 /* 3755 /*
3769 * Check for page in userfault range 3756 * Check for page in userfault range
3770 */ 3757 */
@@ -3784,18 +3771,14 @@ retry:
3784 }; 3771 };
3785 3772
3786 /* 3773 /*
3787 * hugetlb_fault_mutex and i_mmap_rwsem must be 3774 * hugetlb_fault_mutex must be dropped before
3788 * dropped before handling userfault. Reacquire 3775 * handling userfault. Reacquire after handling
3789 * after handling fault to make calling code simpler. 3776 * fault to make calling code simpler.
3790 */ 3777 */
3791 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, 3778 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
3792 idx, haddr); 3779 idx, haddr);
3793 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 3780 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3794 i_mmap_unlock_read(mapping);
3795
3796 ret = handle_userfault(&vmf, VM_UFFD_MISSING); 3781 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3797
3798 i_mmap_lock_read(mapping);
3799 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3782 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3800 goto out; 3783 goto out;
3801 } 3784 }
@@ -3854,6 +3837,9 @@ retry:
3854 } 3837 }
3855 3838
3856 ptl = huge_pte_lock(h, mm, ptep); 3839 ptl = huge_pte_lock(h, mm, ptep);
3840 size = i_size_read(mapping->host) >> huge_page_shift(h);
3841 if (idx >= size)
3842 goto backout;
3857 3843
3858 ret = 0; 3844 ret = 0;
3859 if (!huge_pte_none(huge_ptep_get(ptep))) 3845 if (!huge_pte_none(huge_ptep_get(ptep)))
@@ -3940,11 +3926,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3940 3926
3941 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 3927 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3942 if (ptep) { 3928 if (ptep) {
3943 /*
3944 * Since we hold no locks, ptep could be stale. That is
3945 * OK as we are only making decisions based on content and
3946 * not actually modifying content here.
3947 */
3948 entry = huge_ptep_get(ptep); 3929 entry = huge_ptep_get(ptep);
3949 if (unlikely(is_hugetlb_entry_migration(entry))) { 3930 if (unlikely(is_hugetlb_entry_migration(entry))) {
3950 migration_entry_wait_huge(vma, mm, ptep); 3931 migration_entry_wait_huge(vma, mm, ptep);
@@ -3952,33 +3933,20 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3952 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 3933 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3953 return VM_FAULT_HWPOISON_LARGE | 3934 return VM_FAULT_HWPOISON_LARGE |
3954 VM_FAULT_SET_HINDEX(hstate_index(h)); 3935 VM_FAULT_SET_HINDEX(hstate_index(h));
3936 } else {
3937 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3938 if (!ptep)
3939 return VM_FAULT_OOM;
3955 } 3940 }
3956 3941
3957 /*
3958 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
3959 * until finished with ptep. This serves two purposes:
3960 * 1) It prevents huge_pmd_unshare from being called elsewhere
3961 * and making the ptep no longer valid.
3962 * 2) It synchronizes us with file truncation.
3963 *
3964 * ptep could have already be assigned via huge_pte_offset. That
3965 * is OK, as huge_pte_alloc will return the same value unless
3966 * something changed.
3967 */
3968 mapping = vma->vm_file->f_mapping; 3942 mapping = vma->vm_file->f_mapping;
3969 i_mmap_lock_read(mapping); 3943 idx = vma_hugecache_offset(h, vma, haddr);
3970 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3971 if (!ptep) {
3972 i_mmap_unlock_read(mapping);
3973 return VM_FAULT_OOM;
3974 }
3975 3944
3976 /* 3945 /*
3977 * Serialize hugepage allocation and instantiation, so that we don't 3946 * Serialize hugepage allocation and instantiation, so that we don't
3978 * get spurious allocation failures if two CPUs race to instantiate 3947 * get spurious allocation failures if two CPUs race to instantiate
3979 * the same page in the page cache. 3948 * the same page in the page cache.
3980 */ 3949 */
3981 idx = vma_hugecache_offset(h, vma, haddr);
3982 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); 3950 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
3983 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3951 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3984 3952
@@ -4066,7 +4034,6 @@ out_ptl:
4066 } 4034 }
4067out_mutex: 4035out_mutex:
4068 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 4036 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4069 i_mmap_unlock_read(mapping);
4070 /* 4037 /*
4071 * Generally it's safe to hold refcount during waiting page lock. But 4038 * Generally it's safe to hold refcount during waiting page lock. But
4072 * here we just wait to defer the next page fault to avoid busy loop and 4039 * here we just wait to defer the next page fault to avoid busy loop and
@@ -4671,12 +4638,10 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4671 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 4638 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4672 * and returns the corresponding pte. While this is not necessary for the 4639 * and returns the corresponding pte. While this is not necessary for the
4673 * !shared pmd case because we can allocate the pmd later as well, it makes the 4640 * !shared pmd case because we can allocate the pmd later as well, it makes the
4674 * code much cleaner. 4641 * code much cleaner. pmd allocation is essential for the shared case because
4675 * 4642 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4676 * This routine must be called with i_mmap_rwsem held in at least read mode. 4643 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4677 * For hugetlbfs, this prevents removal of any page table entries associated 4644 * bad pmd for sharing.
4678 * with the address space. This is important as we are setting up sharing
4679 * based on existing page table entries (mappings).
4680 */ 4645 */
4681pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 4646pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4682{ 4647{
@@ -4693,6 +4658,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4693 if (!vma_shareable(vma, addr)) 4658 if (!vma_shareable(vma, addr))
4694 return (pte_t *)pmd_alloc(mm, pud, addr); 4659 return (pte_t *)pmd_alloc(mm, pud, addr);
4695 4660
4661 i_mmap_lock_write(mapping);
4696 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 4662 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4697 if (svma == vma) 4663 if (svma == vma)
4698 continue; 4664 continue;
@@ -4722,6 +4688,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4722 spin_unlock(ptl); 4688 spin_unlock(ptl);
4723out: 4689out:
4724 pte = (pte_t *)pmd_alloc(mm, pud, addr); 4690 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4691 i_mmap_unlock_write(mapping);
4725 return pte; 4692 return pte;
4726} 4693}
4727 4694
@@ -4732,7 +4699,7 @@ out:
4732 * indicated by page_count > 1, unmap is achieved by clearing pud and 4699 * indicated by page_count > 1, unmap is achieved by clearing pud and
4733 * decrementing the ref count. If count == 1, the pte page is not shared. 4700 * decrementing the ref count. If count == 1, the pte page is not shared.
4734 * 4701 *
4735 * Called with page table lock held and i_mmap_rwsem held in write mode. 4702 * called with page table lock held.
4736 * 4703 *
4737 * returns: 1 successfully unmapped a shared pte page 4704 * returns: 1 successfully unmapped a shared pte page
4738 * 0 the underlying pte page is not shared, or it is the last user 4705 * 0 the underlying pte page is not shared, or it is the last user