diff options
author | Mike Kravetz <mike.kravetz@oracle.com> | 2018-12-28 03:39:38 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 15:11:51 -0500 |
commit | b43a9990055958e70347c56f90ea2ae32c67334c (patch) | |
tree | 91f90f0c3e73ca076cbc4a9780bd7d5a271b6257 | |
parent | 1ecc07fd0a6d350bbf4dc176e0d654661b304a30 (diff) |
hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization
While looking at BUGs associated with invalid huge page map counts, it was
discovered and observed that a huge pte pointer could become 'invalid' and
point to another task's page table. Consider the following:
A task takes a page fault on a shared hugetlbfs file and calls
huge_pte_alloc to get a ptep. Suppose the returned ptep points to a
shared pmd.
Now, another task truncates the hugetlbfs file. As part of truncation, it
unmaps everyone who has the file mapped. If the range being truncated is
covered by a shared pmd, huge_pmd_unshare will be called. For all but the
last user of the shared pmd, huge_pmd_unshare will clear the pud pointing
to the pmd. If the task in the middle of the page fault is not the last
user, the ptep returned by huge_pte_alloc now points to another task's
page table or worse. This leads to bad things such as incorrect page
map/reference counts or invalid memory references.
To fix, expand the use of i_mmap_rwsem as follows:
- i_mmap_rwsem is held in read mode whenever huge_pmd_share is called.
huge_pmd_share is only called via huge_pte_alloc, so callers of
huge_pte_alloc take i_mmap_rwsem before calling. In addition, callers
of huge_pte_alloc continue to hold the semaphore until finished with the
ptep.
- i_mmap_rwsem is held in write mode whenever huge_pmd_unshare is
called.
[mike.kravetz@oracle.com: add explicit check for mapping != null]
Link: http://lkml.kernel.org/r/20181218223557.5202-2-mike.kravetz@oracle.com
Fixes: 39dde65c9940 ("shared page table for hugetlb page")
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Prakash Sangappa <prakash.sangappa@oracle.com>
Cc: Colin Ian King <colin.king@canonical.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/hugetlb.c | 64 | ||||
-rw-r--r-- | mm/memory-failure.c | 16 | ||||
-rw-r--r-- | mm/migrate.c | 13 | ||||
-rw-r--r-- | mm/rmap.c | 4 | ||||
-rw-r--r-- | mm/userfaultfd.c | 11 |
5 files changed, 88 insertions, 20 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 12000ba5c868..87fd3ab809c6 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -3238,6 +3238,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
3238 | struct page *ptepage; | 3238 | struct page *ptepage; |
3239 | unsigned long addr; | 3239 | unsigned long addr; |
3240 | int cow; | 3240 | int cow; |
3241 | struct address_space *mapping = vma->vm_file->f_mapping; | ||
3241 | struct hstate *h = hstate_vma(vma); | 3242 | struct hstate *h = hstate_vma(vma); |
3242 | unsigned long sz = huge_page_size(h); | 3243 | unsigned long sz = huge_page_size(h); |
3243 | struct mmu_notifier_range range; | 3244 | struct mmu_notifier_range range; |
@@ -3249,13 +3250,23 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
3249 | mmu_notifier_range_init(&range, src, vma->vm_start, | 3250 | mmu_notifier_range_init(&range, src, vma->vm_start, |
3250 | vma->vm_end); | 3251 | vma->vm_end); |
3251 | mmu_notifier_invalidate_range_start(&range); | 3252 | mmu_notifier_invalidate_range_start(&range); |
3253 | } else { | ||
3254 | /* | ||
3255 | * For shared mappings i_mmap_rwsem must be held to call | ||
3256 | * huge_pte_alloc, otherwise the returned ptep could go | ||
3257 | * away if part of a shared pmd and another thread calls | ||
3258 | * huge_pmd_unshare. | ||
3259 | */ | ||
3260 | i_mmap_lock_read(mapping); | ||
3252 | } | 3261 | } |
3253 | 3262 | ||
3254 | for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { | 3263 | for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { |
3255 | spinlock_t *src_ptl, *dst_ptl; | 3264 | spinlock_t *src_ptl, *dst_ptl; |
3265 | |||
3256 | src_pte = huge_pte_offset(src, addr, sz); | 3266 | src_pte = huge_pte_offset(src, addr, sz); |
3257 | if (!src_pte) | 3267 | if (!src_pte) |
3258 | continue; | 3268 | continue; |
3269 | |||
3259 | dst_pte = huge_pte_alloc(dst, addr, sz); | 3270 | dst_pte = huge_pte_alloc(dst, addr, sz); |
3260 | if (!dst_pte) { | 3271 | if (!dst_pte) { |
3261 | ret = -ENOMEM; | 3272 | ret = -ENOMEM; |
@@ -3326,6 +3337,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | |||
3326 | 3337 | ||
3327 | if (cow) | 3338 | if (cow) |
3328 | mmu_notifier_invalidate_range_end(&range); | 3339 | mmu_notifier_invalidate_range_end(&range); |
3340 | else | ||
3341 | i_mmap_unlock_read(mapping); | ||
3329 | 3342 | ||
3330 | return ret; | 3343 | return ret; |
3331 | } | 3344 | } |
@@ -3771,14 +3784,18 @@ retry: | |||
3771 | }; | 3784 | }; |
3772 | 3785 | ||
3773 | /* | 3786 | /* |
3774 | * hugetlb_fault_mutex must be dropped before | 3787 | * hugetlb_fault_mutex and i_mmap_rwsem must be |
3775 | * handling userfault. Reacquire after handling | 3788 | * dropped before handling userfault. Reacquire |
3776 | * fault to make calling code simpler. | 3789 | * after handling fault to make calling code simpler. |
3777 | */ | 3790 | */ |
3778 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, | 3791 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, |
3779 | idx, haddr); | 3792 | idx, haddr); |
3780 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 3793 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
3794 | i_mmap_unlock_read(mapping); | ||
3795 | |||
3781 | ret = handle_userfault(&vmf, VM_UFFD_MISSING); | 3796 | ret = handle_userfault(&vmf, VM_UFFD_MISSING); |
3797 | |||
3798 | i_mmap_lock_read(mapping); | ||
3782 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 3799 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
3783 | goto out; | 3800 | goto out; |
3784 | } | 3801 | } |
@@ -3926,6 +3943,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3926 | 3943 | ||
3927 | ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); | 3944 | ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); |
3928 | if (ptep) { | 3945 | if (ptep) { |
3946 | /* | ||
3947 | * Since we hold no locks, ptep could be stale. That is | ||
3948 | * OK as we are only making decisions based on content and | ||
3949 | * not actually modifying content here. | ||
3950 | */ | ||
3929 | entry = huge_ptep_get(ptep); | 3951 | entry = huge_ptep_get(ptep); |
3930 | if (unlikely(is_hugetlb_entry_migration(entry))) { | 3952 | if (unlikely(is_hugetlb_entry_migration(entry))) { |
3931 | migration_entry_wait_huge(vma, mm, ptep); | 3953 | migration_entry_wait_huge(vma, mm, ptep); |
@@ -3933,20 +3955,31 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3933 | } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) | 3955 | } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) |
3934 | return VM_FAULT_HWPOISON_LARGE | | 3956 | return VM_FAULT_HWPOISON_LARGE | |
3935 | VM_FAULT_SET_HINDEX(hstate_index(h)); | 3957 | VM_FAULT_SET_HINDEX(hstate_index(h)); |
3936 | } else { | ||
3937 | ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); | ||
3938 | if (!ptep) | ||
3939 | return VM_FAULT_OOM; | ||
3940 | } | 3958 | } |
3941 | 3959 | ||
3960 | /* | ||
3961 | * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold | ||
3962 | * until finished with ptep. This prevents huge_pmd_unshare from | ||
3963 | * being called elsewhere and making the ptep no longer valid. | ||
3964 | * | ||
3965 | * ptep could have already be assigned via huge_pte_offset. That | ||
3966 | * is OK, as huge_pte_alloc will return the same value unless | ||
3967 | * something changed. | ||
3968 | */ | ||
3942 | mapping = vma->vm_file->f_mapping; | 3969 | mapping = vma->vm_file->f_mapping; |
3943 | idx = vma_hugecache_offset(h, vma, haddr); | 3970 | i_mmap_lock_read(mapping); |
3971 | ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); | ||
3972 | if (!ptep) { | ||
3973 | i_mmap_unlock_read(mapping); | ||
3974 | return VM_FAULT_OOM; | ||
3975 | } | ||
3944 | 3976 | ||
3945 | /* | 3977 | /* |
3946 | * Serialize hugepage allocation and instantiation, so that we don't | 3978 | * Serialize hugepage allocation and instantiation, so that we don't |
3947 | * get spurious allocation failures if two CPUs race to instantiate | 3979 | * get spurious allocation failures if two CPUs race to instantiate |
3948 | * the same page in the page cache. | 3980 | * the same page in the page cache. |
3949 | */ | 3981 | */ |
3982 | idx = vma_hugecache_offset(h, vma, haddr); | ||
3950 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); | 3983 | hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); |
3951 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 3984 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
3952 | 3985 | ||
@@ -4034,6 +4067,7 @@ out_ptl: | |||
4034 | } | 4067 | } |
4035 | out_mutex: | 4068 | out_mutex: |
4036 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 4069 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
4070 | i_mmap_unlock_read(mapping); | ||
4037 | /* | 4071 | /* |
4038 | * Generally it's safe to hold refcount during waiting page lock. But | 4072 | * Generally it's safe to hold refcount during waiting page lock. But |
4039 | * here we just wait to defer the next page fault to avoid busy loop and | 4073 | * here we just wait to defer the next page fault to avoid busy loop and |
@@ -4638,10 +4672,12 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, | |||
4638 | * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() | 4672 | * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() |
4639 | * and returns the corresponding pte. While this is not necessary for the | 4673 | * and returns the corresponding pte. While this is not necessary for the |
4640 | * !shared pmd case because we can allocate the pmd later as well, it makes the | 4674 | * !shared pmd case because we can allocate the pmd later as well, it makes the |
4641 | * code much cleaner. pmd allocation is essential for the shared case because | 4675 | * code much cleaner. |
4642 | * pud has to be populated inside the same i_mmap_rwsem section - otherwise | 4676 | * |
4643 | * racing tasks could either miss the sharing (see huge_pte_offset) or select a | 4677 | * This routine must be called with i_mmap_rwsem held in at least read mode. |
4644 | * bad pmd for sharing. | 4678 | * For hugetlbfs, this prevents removal of any page table entries associated |
4679 | * with the address space. This is important as we are setting up sharing | ||
4680 | * based on existing page table entries (mappings). | ||
4645 | */ | 4681 | */ |
4646 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | 4682 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) |
4647 | { | 4683 | { |
@@ -4658,7 +4694,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
4658 | if (!vma_shareable(vma, addr)) | 4694 | if (!vma_shareable(vma, addr)) |
4659 | return (pte_t *)pmd_alloc(mm, pud, addr); | 4695 | return (pte_t *)pmd_alloc(mm, pud, addr); |
4660 | 4696 | ||
4661 | i_mmap_lock_write(mapping); | ||
4662 | vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { | 4697 | vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { |
4663 | if (svma == vma) | 4698 | if (svma == vma) |
4664 | continue; | 4699 | continue; |
@@ -4688,7 +4723,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
4688 | spin_unlock(ptl); | 4723 | spin_unlock(ptl); |
4689 | out: | 4724 | out: |
4690 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | 4725 | pte = (pte_t *)pmd_alloc(mm, pud, addr); |
4691 | i_mmap_unlock_write(mapping); | ||
4692 | return pte; | 4726 | return pte; |
4693 | } | 4727 | } |
4694 | 4728 | ||
@@ -4699,7 +4733,7 @@ out: | |||
4699 | * indicated by page_count > 1, unmap is achieved by clearing pud and | 4733 | * indicated by page_count > 1, unmap is achieved by clearing pud and |
4700 | * decrementing the ref count. If count == 1, the pte page is not shared. | 4734 | * decrementing the ref count. If count == 1, the pte page is not shared. |
4701 | * | 4735 | * |
4702 | * called with page table lock held. | 4736 | * Called with page table lock held and i_mmap_rwsem held in write mode. |
4703 | * | 4737 | * |
4704 | * returns: 1 successfully unmapped a shared pte page | 4738 | * returns: 1 successfully unmapped a shared pte page |
4705 | * 0 the underlying pte page is not shared, or it is the last user | 4739 | * 0 the underlying pte page is not shared, or it is the last user |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 7c72f2a95785..6379fff1a5ff 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -966,7 +966,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
966 | enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; | 966 | enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; |
967 | struct address_space *mapping; | 967 | struct address_space *mapping; |
968 | LIST_HEAD(tokill); | 968 | LIST_HEAD(tokill); |
969 | bool unmap_success; | 969 | bool unmap_success = true; |
970 | int kill = 1, forcekill; | 970 | int kill = 1, forcekill; |
971 | struct page *hpage = *hpagep; | 971 | struct page *hpage = *hpagep; |
972 | bool mlocked = PageMlocked(hpage); | 972 | bool mlocked = PageMlocked(hpage); |
@@ -1028,7 +1028,19 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
1028 | if (kill) | 1028 | if (kill) |
1029 | collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); | 1029 | collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); |
1030 | 1030 | ||
1031 | unmap_success = try_to_unmap(hpage, ttu); | 1031 | if (!PageHuge(hpage)) { |
1032 | unmap_success = try_to_unmap(hpage, ttu); | ||
1033 | } else if (mapping) { | ||
1034 | /* | ||
1035 | * For hugetlb pages, try_to_unmap could potentially call | ||
1036 | * huge_pmd_unshare. Because of this, take semaphore in | ||
1037 | * write mode here and set TTU_RMAP_LOCKED to indicate we | ||
1038 | * have taken the lock at this higer level. | ||
1039 | */ | ||
1040 | i_mmap_lock_write(mapping); | ||
1041 | unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED); | ||
1042 | i_mmap_unlock_write(mapping); | ||
1043 | } | ||
1032 | if (!unmap_success) | 1044 | if (!unmap_success) |
1033 | pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", | 1045 | pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", |
1034 | pfn, page_mapcount(hpage)); | 1046 | pfn, page_mapcount(hpage)); |
diff --git a/mm/migrate.c b/mm/migrate.c index 4389696fba0e..5d1839a9148d 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -1324,8 +1324,19 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, | |||
1324 | goto put_anon; | 1324 | goto put_anon; |
1325 | 1325 | ||
1326 | if (page_mapped(hpage)) { | 1326 | if (page_mapped(hpage)) { |
1327 | struct address_space *mapping = page_mapping(hpage); | ||
1328 | |||
1329 | /* | ||
1330 | * try_to_unmap could potentially call huge_pmd_unshare. | ||
1331 | * Because of this, take semaphore in write mode here and | ||
1332 | * set TTU_RMAP_LOCKED to let lower levels know we have | ||
1333 | * taken the lock. | ||
1334 | */ | ||
1335 | i_mmap_lock_write(mapping); | ||
1327 | try_to_unmap(hpage, | 1336 | try_to_unmap(hpage, |
1328 | TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | 1337 | TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| |
1338 | TTU_RMAP_LOCKED); | ||
1339 | i_mmap_unlock_write(mapping); | ||
1329 | page_was_mapped = 1; | 1340 | page_was_mapped = 1; |
1330 | } | 1341 | } |
1331 | 1342 | ||
@@ -25,6 +25,7 @@ | |||
25 | * page->flags PG_locked (lock_page) | 25 | * page->flags PG_locked (lock_page) |
26 | * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) | 26 | * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) |
27 | * mapping->i_mmap_rwsem | 27 | * mapping->i_mmap_rwsem |
28 | * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) | ||
28 | * anon_vma->rwsem | 29 | * anon_vma->rwsem |
29 | * mm->page_table_lock or pte_lock | 30 | * mm->page_table_lock or pte_lock |
30 | * zone_lru_lock (in mark_page_accessed, isolate_lru_page) | 31 | * zone_lru_lock (in mark_page_accessed, isolate_lru_page) |
@@ -1378,6 +1379,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1378 | /* | 1379 | /* |
1379 | * If sharing is possible, start and end will be adjusted | 1380 | * If sharing is possible, start and end will be adjusted |
1380 | * accordingly. | 1381 | * accordingly. |
1382 | * | ||
1383 | * If called for a huge page, caller must hold i_mmap_rwsem | ||
1384 | * in write mode as it is possible to call huge_pmd_unshare. | ||
1381 | */ | 1385 | */ |
1382 | adjust_range_if_pmd_sharing_possible(vma, &range.start, | 1386 | adjust_range_if_pmd_sharing_possible(vma, &range.start, |
1383 | &range.end); | 1387 | &range.end); |
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 458acda96f20..48368589f519 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c | |||
@@ -267,10 +267,14 @@ retry: | |||
267 | VM_BUG_ON(dst_addr & ~huge_page_mask(h)); | 267 | VM_BUG_ON(dst_addr & ~huge_page_mask(h)); |
268 | 268 | ||
269 | /* | 269 | /* |
270 | * Serialize via hugetlb_fault_mutex | 270 | * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. |
271 | * i_mmap_rwsem ensures the dst_pte remains valid even | ||
272 | * in the case of shared pmds. fault mutex prevents | ||
273 | * races with other faulting threads. | ||
271 | */ | 274 | */ |
272 | idx = linear_page_index(dst_vma, dst_addr); | ||
273 | mapping = dst_vma->vm_file->f_mapping; | 275 | mapping = dst_vma->vm_file->f_mapping; |
276 | i_mmap_lock_read(mapping); | ||
277 | idx = linear_page_index(dst_vma, dst_addr); | ||
274 | hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, | 278 | hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, |
275 | idx, dst_addr); | 279 | idx, dst_addr); |
276 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 280 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
@@ -279,6 +283,7 @@ retry: | |||
279 | dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); | 283 | dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); |
280 | if (!dst_pte) { | 284 | if (!dst_pte) { |
281 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 285 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
286 | i_mmap_unlock_read(mapping); | ||
282 | goto out_unlock; | 287 | goto out_unlock; |
283 | } | 288 | } |
284 | 289 | ||
@@ -286,6 +291,7 @@ retry: | |||
286 | dst_pteval = huge_ptep_get(dst_pte); | 291 | dst_pteval = huge_ptep_get(dst_pte); |
287 | if (!huge_pte_none(dst_pteval)) { | 292 | if (!huge_pte_none(dst_pteval)) { |
288 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 293 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
294 | i_mmap_unlock_read(mapping); | ||
289 | goto out_unlock; | 295 | goto out_unlock; |
290 | } | 296 | } |
291 | 297 | ||
@@ -293,6 +299,7 @@ retry: | |||
293 | dst_addr, src_addr, &page); | 299 | dst_addr, src_addr, &page); |
294 | 300 | ||
295 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 301 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
302 | i_mmap_unlock_read(mapping); | ||
296 | vm_alloc_shared = vm_shared; | 303 | vm_alloc_shared = vm_shared; |
297 | 304 | ||
298 | cond_resched(); | 305 | cond_resched(); |