diff options
-rw-r--r-- | mm/hugetlb.c | 32 |
1 files changed, 30 insertions, 2 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 424b0ef08a60..2d2ff5e8bf2b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -3984,6 +3984,9 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, | |||
3984 | unsigned long src_addr, | 3984 | unsigned long src_addr, |
3985 | struct page **pagep) | 3985 | struct page **pagep) |
3986 | { | 3986 | { |
3987 | struct address_space *mapping; | ||
3988 | pgoff_t idx; | ||
3989 | unsigned long size; | ||
3987 | int vm_shared = dst_vma->vm_flags & VM_SHARED; | 3990 | int vm_shared = dst_vma->vm_flags & VM_SHARED; |
3988 | struct hstate *h = hstate_vma(dst_vma); | 3991 | struct hstate *h = hstate_vma(dst_vma); |
3989 | pte_t _dst_pte; | 3992 | pte_t _dst_pte; |
@@ -4021,13 +4024,24 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, | |||
4021 | __SetPageUptodate(page); | 4024 | __SetPageUptodate(page); |
4022 | set_page_huge_active(page); | 4025 | set_page_huge_active(page); |
4023 | 4026 | ||
4027 | mapping = dst_vma->vm_file->f_mapping; | ||
4028 | idx = vma_hugecache_offset(h, dst_vma, dst_addr); | ||
4029 | |||
4024 | /* | 4030 | /* |
4025 | * If shared, add to page cache | 4031 | * If shared, add to page cache |
4026 | */ | 4032 | */ |
4027 | if (vm_shared) { | 4033 | if (vm_shared) { |
4028 | struct address_space *mapping = dst_vma->vm_file->f_mapping; | 4034 | size = i_size_read(mapping->host) >> huge_page_shift(h); |
4029 | pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); | 4035 | ret = -EFAULT; |
4036 | if (idx >= size) | ||
4037 | goto out_release_nounlock; | ||
4030 | 4038 | ||
4039 | /* | ||
4040 | * Serialization between remove_inode_hugepages() and | ||
4041 | * huge_add_to_page_cache() below happens through the | ||
4042 | * hugetlb_fault_mutex_table that here must be hold by | ||
4043 | * the caller. | ||
4044 | */ | ||
4031 | ret = huge_add_to_page_cache(page, mapping, idx); | 4045 | ret = huge_add_to_page_cache(page, mapping, idx); |
4032 | if (ret) | 4046 | if (ret) |
4033 | goto out_release_nounlock; | 4047 | goto out_release_nounlock; |
@@ -4036,6 +4050,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, | |||
4036 | ptl = huge_pte_lockptr(h, dst_mm, dst_pte); | 4050 | ptl = huge_pte_lockptr(h, dst_mm, dst_pte); |
4037 | spin_lock(ptl); | 4051 | spin_lock(ptl); |
4038 | 4052 | ||
4053 | /* | ||
4054 | * Recheck the i_size after holding PT lock to make sure not | ||
4055 | * to leave any page mapped (as page_mapped()) beyond the end | ||
4056 | * of the i_size (remove_inode_hugepages() is strict about | ||
4057 | * enforcing that). If we bail out here, we'll also leave a | ||
4058 | * page in the radix tree in the vm_shared case beyond the end | ||
4059 | * of the i_size, but remove_inode_hugepages() will take care | ||
4060 | * of it as soon as we drop the hugetlb_fault_mutex_table. | ||
4061 | */ | ||
4062 | size = i_size_read(mapping->host) >> huge_page_shift(h); | ||
4063 | ret = -EFAULT; | ||
4064 | if (idx >= size) | ||
4065 | goto out_release_unlock; | ||
4066 | |||
4039 | ret = -EEXIST; | 4067 | ret = -EEXIST; |
4040 | if (!huge_pte_none(huge_ptep_get(dst_pte))) | 4068 | if (!huge_pte_none(huge_ptep_get(dst_pte))) |
4041 | goto out_release_unlock; | 4069 | goto out_release_unlock; |