aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:12:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:18 -0400
commit3d48ae45e72390ddf8cc5256ac32ed6f7a19cbea (patch)
tree1f46db3a8424090dd8e0b58991fa5acc1a73e680 /mm/hugetlb.c
parent97a894136f29802da19a15541de3c019e1ca147e (diff)
mm: Convert i_mmap_lock to a mutex
Straightforward conversion of i_mmap_lock to a mutex. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Hugh Dickins <hughd@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bbb4a5bbb958..5fd68b95c671 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2205,7 +2205,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2205 unsigned long sz = huge_page_size(h); 2205 unsigned long sz = huge_page_size(h);
2206 2206
2207 /* 2207 /*
2208 * A page gathering list, protected by per file i_mmap_lock. The 2208 * A page gathering list, protected by per file i_mmap_mutex. The
2209 * lock is used to avoid list corruption from multiple unmapping 2209 * lock is used to avoid list corruption from multiple unmapping
2210 * of the same page since we are using page->lru. 2210 * of the same page since we are using page->lru.
2211 */ 2211 */
@@ -2274,9 +2274,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2274void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 2274void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2275 unsigned long end, struct page *ref_page) 2275 unsigned long end, struct page *ref_page)
2276{ 2276{
2277 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 2277 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2278 __unmap_hugepage_range(vma, start, end, ref_page); 2278 __unmap_hugepage_range(vma, start, end, ref_page);
2279 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 2279 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2280} 2280}
2281 2281
2282/* 2282/*
@@ -2308,7 +2308,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2308 * this mapping should be shared between all the VMAs, 2308 * this mapping should be shared between all the VMAs,
2309 * __unmap_hugepage_range() is called as the lock is already held 2309 * __unmap_hugepage_range() is called as the lock is already held
2310 */ 2310 */
2311 spin_lock(&mapping->i_mmap_lock); 2311 mutex_lock(&mapping->i_mmap_mutex);
2312 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 2312 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2313 /* Do not unmap the current VMA */ 2313 /* Do not unmap the current VMA */
2314 if (iter_vma == vma) 2314 if (iter_vma == vma)
@@ -2326,7 +2326,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2326 address, address + huge_page_size(h), 2326 address, address + huge_page_size(h),
2327 page); 2327 page);
2328 } 2328 }
2329 spin_unlock(&mapping->i_mmap_lock); 2329 mutex_unlock(&mapping->i_mmap_mutex);
2330 2330
2331 return 1; 2331 return 1;
2332} 2332}
@@ -2810,7 +2810,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2810 BUG_ON(address >= end); 2810 BUG_ON(address >= end);
2811 flush_cache_range(vma, address, end); 2811 flush_cache_range(vma, address, end);
2812 2812
2813 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 2813 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2814 spin_lock(&mm->page_table_lock); 2814 spin_lock(&mm->page_table_lock);
2815 for (; address < end; address += huge_page_size(h)) { 2815 for (; address < end; address += huge_page_size(h)) {
2816 ptep = huge_pte_offset(mm, address); 2816 ptep = huge_pte_offset(mm, address);
@@ -2825,7 +2825,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2825 } 2825 }
2826 } 2826 }
2827 spin_unlock(&mm->page_table_lock); 2827 spin_unlock(&mm->page_table_lock);
2828 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 2828 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2829 2829
2830 flush_tlb_range(vma, start, end); 2830 flush_tlb_range(vma, start, end);
2831} 2831}