aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2014-12-12 19:54:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 15:42:45 -0500
commit83cde9e8ba95d180eaefefe834958fbf7008cf39 (patch)
tree041dd6f0bc4e41baec1a46422683596111e1e2a8 /mm/hugetlb.c
parent8b28f621bea6f84d44adf7e804b73aff1e09105b (diff)
mm: use new helper functions around the i_mmap_mutex
Convert all open coded mutex_lock/unlock calls to the i_mmap_[lock/unlock]_write() helpers. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: "Kirill A. Shutemov" <kirill@shutemov.name> Acked-by: Hugh Dickins <hughd@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 919b86a2164d..ffe19304cc09 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2774,7 +2774,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2774 * this mapping should be shared between all the VMAs, 2774 * this mapping should be shared between all the VMAs,
2775 * __unmap_hugepage_range() is called as the lock is already held 2775 * __unmap_hugepage_range() is called as the lock is already held
2776 */ 2776 */
2777 mutex_lock(&mapping->i_mmap_mutex); 2777 i_mmap_lock_write(mapping);
2778 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 2778 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2779 /* Do not unmap the current VMA */ 2779 /* Do not unmap the current VMA */
2780 if (iter_vma == vma) 2780 if (iter_vma == vma)
@@ -2791,7 +2791,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2791 unmap_hugepage_range(iter_vma, address, 2791 unmap_hugepage_range(iter_vma, address,
2792 address + huge_page_size(h), page); 2792 address + huge_page_size(h), page);
2793 } 2793 }
2794 mutex_unlock(&mapping->i_mmap_mutex); 2794 i_mmap_unlock_write(mapping);
2795} 2795}
2796 2796
2797/* 2797/*
@@ -3348,7 +3348,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3348 flush_cache_range(vma, address, end); 3348 flush_cache_range(vma, address, end);
3349 3349
3350 mmu_notifier_invalidate_range_start(mm, start, end); 3350 mmu_notifier_invalidate_range_start(mm, start, end);
3351 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 3351 i_mmap_lock_write(vma->vm_file->f_mapping);
3352 for (; address < end; address += huge_page_size(h)) { 3352 for (; address < end; address += huge_page_size(h)) {
3353 spinlock_t *ptl; 3353 spinlock_t *ptl;
3354 ptep = huge_pte_offset(mm, address); 3354 ptep = huge_pte_offset(mm, address);
@@ -3376,7 +3376,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3376 * and that page table be reused and filled with junk. 3376 * and that page table be reused and filled with junk.
3377 */ 3377 */
3378 flush_tlb_range(vma, start, end); 3378 flush_tlb_range(vma, start, end);
3379 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 3379 i_mmap_unlock_write(vma->vm_file->f_mapping);
3380 mmu_notifier_invalidate_range_end(mm, start, end); 3380 mmu_notifier_invalidate_range_end(mm, start, end);
3381 3381
3382 return pages << h->order; 3382 return pages << h->order;
@@ -3544,7 +3544,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3544 if (!vma_shareable(vma, addr)) 3544 if (!vma_shareable(vma, addr))
3545 return (pte_t *)pmd_alloc(mm, pud, addr); 3545 return (pte_t *)pmd_alloc(mm, pud, addr);
3546 3546
3547 mutex_lock(&mapping->i_mmap_mutex); 3547 i_mmap_lock_write(mapping);
3548 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 3548 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3549 if (svma == vma) 3549 if (svma == vma)
3550 continue; 3550 continue;
@@ -3572,7 +3572,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3572 spin_unlock(ptl); 3572 spin_unlock(ptl);
3573out: 3573out:
3574 pte = (pte_t *)pmd_alloc(mm, pud, addr); 3574 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3575 mutex_unlock(&mapping->i_mmap_mutex); 3575 i_mmap_unlock_write(mapping);
3576 return pte; 3576 return pte;
3577} 3577}
3578 3578