diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-05-24 20:12:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:18 -0400 |
commit | 3d48ae45e72390ddf8cc5256ac32ed6f7a19cbea (patch) | |
tree | 1f46db3a8424090dd8e0b58991fa5acc1a73e680 /mm/mmap.c | |
parent | 97a894136f29802da19a15541de3c019e1ca147e (diff) |
mm: Convert i_mmap_lock to a mutex
Straightforward conversion of i_mmap_lock to a mutex.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 22 |
1 files changed, 11 insertions, 11 deletions
@@ -194,7 +194,7 @@ error: | |||
194 | } | 194 | } |
195 | 195 | ||
196 | /* | 196 | /* |
197 | * Requires inode->i_mapping->i_mmap_lock | 197 | * Requires inode->i_mapping->i_mmap_mutex |
198 | */ | 198 | */ |
199 | static void __remove_shared_vm_struct(struct vm_area_struct *vma, | 199 | static void __remove_shared_vm_struct(struct vm_area_struct *vma, |
200 | struct file *file, struct address_space *mapping) | 200 | struct file *file, struct address_space *mapping) |
@@ -222,9 +222,9 @@ void unlink_file_vma(struct vm_area_struct *vma) | |||
222 | 222 | ||
223 | if (file) { | 223 | if (file) { |
224 | struct address_space *mapping = file->f_mapping; | 224 | struct address_space *mapping = file->f_mapping; |
225 | spin_lock(&mapping->i_mmap_lock); | 225 | mutex_lock(&mapping->i_mmap_mutex); |
226 | __remove_shared_vm_struct(vma, file, mapping); | 226 | __remove_shared_vm_struct(vma, file, mapping); |
227 | spin_unlock(&mapping->i_mmap_lock); | 227 | mutex_unlock(&mapping->i_mmap_mutex); |
228 | } | 228 | } |
229 | } | 229 | } |
230 | 230 | ||
@@ -446,13 +446,13 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, | |||
446 | mapping = vma->vm_file->f_mapping; | 446 | mapping = vma->vm_file->f_mapping; |
447 | 447 | ||
448 | if (mapping) | 448 | if (mapping) |
449 | spin_lock(&mapping->i_mmap_lock); | 449 | mutex_lock(&mapping->i_mmap_mutex); |
450 | 450 | ||
451 | __vma_link(mm, vma, prev, rb_link, rb_parent); | 451 | __vma_link(mm, vma, prev, rb_link, rb_parent); |
452 | __vma_link_file(vma); | 452 | __vma_link_file(vma); |
453 | 453 | ||
454 | if (mapping) | 454 | if (mapping) |
455 | spin_unlock(&mapping->i_mmap_lock); | 455 | mutex_unlock(&mapping->i_mmap_mutex); |
456 | 456 | ||
457 | mm->map_count++; | 457 | mm->map_count++; |
458 | validate_mm(mm); | 458 | validate_mm(mm); |
@@ -555,7 +555,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
555 | mapping = file->f_mapping; | 555 | mapping = file->f_mapping; |
556 | if (!(vma->vm_flags & VM_NONLINEAR)) | 556 | if (!(vma->vm_flags & VM_NONLINEAR)) |
557 | root = &mapping->i_mmap; | 557 | root = &mapping->i_mmap; |
558 | spin_lock(&mapping->i_mmap_lock); | 558 | mutex_lock(&mapping->i_mmap_mutex); |
559 | if (insert) { | 559 | if (insert) { |
560 | /* | 560 | /* |
561 | * Put into prio_tree now, so instantiated pages | 561 | * Put into prio_tree now, so instantiated pages |
@@ -622,7 +622,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
622 | if (anon_vma) | 622 | if (anon_vma) |
623 | anon_vma_unlock(anon_vma); | 623 | anon_vma_unlock(anon_vma); |
624 | if (mapping) | 624 | if (mapping) |
625 | spin_unlock(&mapping->i_mmap_lock); | 625 | mutex_unlock(&mapping->i_mmap_mutex); |
626 | 626 | ||
627 | if (remove_next) { | 627 | if (remove_next) { |
628 | if (file) { | 628 | if (file) { |
@@ -2290,7 +2290,7 @@ void exit_mmap(struct mm_struct *mm) | |||
2290 | 2290 | ||
2291 | /* Insert vm structure into process list sorted by address | 2291 | /* Insert vm structure into process list sorted by address |
2292 | * and into the inode's i_mmap tree. If vm_file is non-NULL | 2292 | * and into the inode's i_mmap tree. If vm_file is non-NULL |
2293 | * then i_mmap_lock is taken here. | 2293 | * then i_mmap_mutex is taken here. |
2294 | */ | 2294 | */ |
2295 | int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) | 2295 | int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) |
2296 | { | 2296 | { |
@@ -2532,7 +2532,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) | |||
2532 | */ | 2532 | */ |
2533 | if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) | 2533 | if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) |
2534 | BUG(); | 2534 | BUG(); |
2535 | spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem); | 2535 | mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem); |
2536 | } | 2536 | } |
2537 | } | 2537 | } |
2538 | 2538 | ||
@@ -2559,7 +2559,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) | |||
2559 | * vma in this mm is backed by the same anon_vma or address_space. | 2559 | * vma in this mm is backed by the same anon_vma or address_space. |
2560 | * | 2560 | * |
2561 | * We can take all the locks in random order because the VM code | 2561 | * We can take all the locks in random order because the VM code |
2562 | * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never | 2562 | * taking i_mmap_mutex or anon_vma->lock outside the mmap_sem never |
2563 | * takes more than one of them in a row. Secondly we're protected | 2563 | * takes more than one of them in a row. Secondly we're protected |
2564 | * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. | 2564 | * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. |
2565 | * | 2565 | * |
@@ -2631,7 +2631,7 @@ static void vm_unlock_mapping(struct address_space *mapping) | |||
2631 | * AS_MM_ALL_LOCKS can't change to 0 from under us | 2631 | * AS_MM_ALL_LOCKS can't change to 0 from under us |
2632 | * because we hold the mm_all_locks_mutex. | 2632 | * because we hold the mm_all_locks_mutex. |
2633 | */ | 2633 | */ |
2634 | spin_unlock(&mapping->i_mmap_lock); | 2634 | mutex_unlock(&mapping->i_mmap_mutex); |
2635 | if (!test_and_clear_bit(AS_MM_ALL_LOCKS, | 2635 | if (!test_and_clear_bit(AS_MM_ALL_LOCKS, |
2636 | &mapping->flags)) | 2636 | &mapping->flags)) |
2637 | BUG(); | 2637 | BUG(); |