aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c16
-rw-r--r--mm/memory.c18
2 files changed, 19 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 82378d44a0c5..4cf7a90e9140 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1439,19 +1439,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1439void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 1439void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1440 unsigned long end, struct page *ref_page) 1440 unsigned long end, struct page *ref_page)
1441{ 1441{
1442 /* 1442 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1443 * It is undesirable to test vma->vm_file as it should be non-null 1443 __unmap_hugepage_range(vma, start, end, ref_page);
1444 * for valid hugetlb area. However, vm_file will be NULL in the error 1444 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1445 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
1446 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
1447 * to clean up. Since no pte has actually been setup, it is safe to
1448 * do nothing in this case.
1449 */
1450 if (vma->vm_file) {
1451 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1452 __unmap_hugepage_range(vma, start, end, ref_page);
1453 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1454 }
1455} 1445}
1456 1446
1457/* 1447/*
diff --git a/mm/memory.c b/mm/memory.c
index c1c1d6d8c22b..02fc6b1047b0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -901,9 +901,23 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
901 } 901 }
902 902
903 if (unlikely(is_vm_hugetlb_page(vma))) { 903 if (unlikely(is_vm_hugetlb_page(vma))) {
904 unmap_hugepage_range(vma, start, end, NULL); 904 /*
905 zap_work -= (end - start) / 905 * It is undesirable to test vma->vm_file as it
906 * should be non-null for valid hugetlb area.
907 * However, vm_file will be NULL in the error
908 * cleanup path of do_mmap_pgoff. When
909 * hugetlbfs ->mmap method fails,
910 * do_mmap_pgoff() nullifies vma->vm_file
911 * before calling this function to clean up.
912 * Since no pte has actually been setup, it is
913 * safe to do nothing in this case.
914 */
915 if (vma->vm_file) {
916 unmap_hugepage_range(vma, start, end, NULL);
917 zap_work -= (end - start) /
906 pages_per_huge_page(hstate_vma(vma)); 918 pages_per_huge_page(hstate_vma(vma));
919 }
920
907 start = end; 921 start = end;
908 } else 922 } else
909 start = unmap_page_range(*tlbp, vma, 923 start = unmap_page_range(*tlbp, vma,