summaryrefslogtreecommitdiffstats
path: root/mm/userfaultfd.c
diff options
context:
space:
mode:
authorMike Kravetz <mike.kravetz@oracle.com>2018-12-28 03:39:38 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 15:11:51 -0500
commitb43a9990055958e70347c56f90ea2ae32c67334c (patch)
tree91f90f0c3e73ca076cbc4a9780bd7d5a271b6257 /mm/userfaultfd.c
parent1ecc07fd0a6d350bbf4dc176e0d654661b304a30 (diff)
hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization
While looking at BUGs associated with invalid huge page map counts, it was discovered and observed that a huge pte pointer could become 'invalid' and point to another task's page table. Consider the following: A task takes a page fault on a shared hugetlbfs file and calls huge_pte_alloc to get a ptep. Suppose the returned ptep points to a shared pmd. Now, another task truncates the hugetlbfs file. As part of truncation, it unmaps everyone who has the file mapped. If the range being truncated is covered by a shared pmd, huge_pmd_unshare will be called. For all but the last user of the shared pmd, huge_pmd_unshare will clear the pud pointing to the pmd. If the task in the middle of the page fault is not the last user, the ptep returned by huge_pte_alloc now points to another task's page table or worse. This leads to bad things such as incorrect page map/reference counts or invalid memory references. To fix, expand the use of i_mmap_rwsem as follows: - i_mmap_rwsem is held in read mode whenever huge_pmd_share is called. huge_pmd_share is only called via huge_pte_alloc, so callers of huge_pte_alloc take i_mmap_rwsem before calling. In addition, callers of huge_pte_alloc continue to hold the semaphore until finished with the ptep. - i_mmap_rwsem is held in write mode whenever huge_pmd_unshare is called. [mike.kravetz@oracle.com: add explicit check for mapping != null] Link: http://lkml.kernel.org/r/20181218223557.5202-2-mike.kravetz@oracle.com Fixes: 39dde65c9940 ("shared page table for hugetlb page") Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Prakash Sangappa <prakash.sangappa@oracle.com> Cc: Colin Ian King <colin.king@canonical.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/userfaultfd.c')
-rw-r--r--mm/userfaultfd.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 458acda96f20..48368589f519 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -267,10 +267,14 @@ retry:
267 VM_BUG_ON(dst_addr & ~huge_page_mask(h)); 267 VM_BUG_ON(dst_addr & ~huge_page_mask(h));
268 268
269 /* 269 /*
270 * Serialize via hugetlb_fault_mutex 270 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
271 * i_mmap_rwsem ensures the dst_pte remains valid even
272 * in the case of shared pmds. fault mutex prevents
273 * races with other faulting threads.
271 */ 274 */
272 idx = linear_page_index(dst_vma, dst_addr);
273 mapping = dst_vma->vm_file->f_mapping; 275 mapping = dst_vma->vm_file->f_mapping;
276 i_mmap_lock_read(mapping);
277 idx = linear_page_index(dst_vma, dst_addr);
274 hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, 278 hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
275 idx, dst_addr); 279 idx, dst_addr);
276 mutex_lock(&hugetlb_fault_mutex_table[hash]); 280 mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -279,6 +283,7 @@ retry:
279 dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); 283 dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
280 if (!dst_pte) { 284 if (!dst_pte) {
281 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 285 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
286 i_mmap_unlock_read(mapping);
282 goto out_unlock; 287 goto out_unlock;
283 } 288 }
284 289
@@ -286,6 +291,7 @@ retry:
286 dst_pteval = huge_ptep_get(dst_pte); 291 dst_pteval = huge_ptep_get(dst_pte);
287 if (!huge_pte_none(dst_pteval)) { 292 if (!huge_pte_none(dst_pteval)) {
288 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 293 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
294 i_mmap_unlock_read(mapping);
289 goto out_unlock; 295 goto out_unlock;
290 } 296 }
291 297
@@ -293,6 +299,7 @@ retry:
293 dst_addr, src_addr, &page); 299 dst_addr, src_addr, &page);
294 300
295 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 301 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
302 i_mmap_unlock_read(mapping);
296 vm_alloc_shared = vm_shared; 303 vm_alloc_shared = vm_shared;
297 304
298 cond_resched(); 305 cond_resched();