diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-05-24 20:12:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:18 -0400 |
commit | 3d48ae45e72390ddf8cc5256ac32ed6f7a19cbea (patch) | |
tree | 1f46db3a8424090dd8e0b58991fa5acc1a73e680 /mm/rmap.c | |
parent | 97a894136f29802da19a15541de3c019e1ca147e (diff) |
mm: Convert i_mmap_lock to a mutex
Straightforward conversion of i_mmap_lock to a mutex.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 28 |
1 files changed, 14 insertions, 14 deletions
@@ -24,7 +24,7 @@ | |||
24 | * inode->i_alloc_sem (vmtruncate_range) | 24 | * inode->i_alloc_sem (vmtruncate_range) |
25 | * mm->mmap_sem | 25 | * mm->mmap_sem |
26 | * page->flags PG_locked (lock_page) | 26 | * page->flags PG_locked (lock_page) |
27 | * mapping->i_mmap_lock | 27 | * mapping->i_mmap_mutex |
28 | * anon_vma->lock | 28 | * anon_vma->lock |
29 | * mm->page_table_lock or pte_lock | 29 | * mm->page_table_lock or pte_lock |
30 | * zone->lru_lock (in mark_page_accessed, isolate_lru_page) | 30 | * zone->lru_lock (in mark_page_accessed, isolate_lru_page) |
@@ -646,14 +646,14 @@ static int page_referenced_file(struct page *page, | |||
646 | * The page lock not only makes sure that page->mapping cannot | 646 | * The page lock not only makes sure that page->mapping cannot |
647 | * suddenly be NULLified by truncation, it makes sure that the | 647 | * suddenly be NULLified by truncation, it makes sure that the |
648 | * structure at mapping cannot be freed and reused yet, | 648 | * structure at mapping cannot be freed and reused yet, |
649 | * so we can safely take mapping->i_mmap_lock. | 649 | * so we can safely take mapping->i_mmap_mutex. |
650 | */ | 650 | */ |
651 | BUG_ON(!PageLocked(page)); | 651 | BUG_ON(!PageLocked(page)); |
652 | 652 | ||
653 | spin_lock(&mapping->i_mmap_lock); | 653 | mutex_lock(&mapping->i_mmap_mutex); |
654 | 654 | ||
655 | /* | 655 | /* |
656 | * i_mmap_lock does not stabilize mapcount at all, but mapcount | 656 | * i_mmap_mutex does not stabilize mapcount at all, but mapcount |
657 | * is more likely to be accurate if we note it after spinning. | 657 | * is more likely to be accurate if we note it after spinning. |
658 | */ | 658 | */ |
659 | mapcount = page_mapcount(page); | 659 | mapcount = page_mapcount(page); |
@@ -675,7 +675,7 @@ static int page_referenced_file(struct page *page, | |||
675 | break; | 675 | break; |
676 | } | 676 | } |
677 | 677 | ||
678 | spin_unlock(&mapping->i_mmap_lock); | 678 | mutex_unlock(&mapping->i_mmap_mutex); |
679 | return referenced; | 679 | return referenced; |
680 | } | 680 | } |
681 | 681 | ||
@@ -762,7 +762,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page) | |||
762 | 762 | ||
763 | BUG_ON(PageAnon(page)); | 763 | BUG_ON(PageAnon(page)); |
764 | 764 | ||
765 | spin_lock(&mapping->i_mmap_lock); | 765 | mutex_lock(&mapping->i_mmap_mutex); |
766 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 766 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
767 | if (vma->vm_flags & VM_SHARED) { | 767 | if (vma->vm_flags & VM_SHARED) { |
768 | unsigned long address = vma_address(page, vma); | 768 | unsigned long address = vma_address(page, vma); |
@@ -771,7 +771,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page) | |||
771 | ret += page_mkclean_one(page, vma, address); | 771 | ret += page_mkclean_one(page, vma, address); |
772 | } | 772 | } |
773 | } | 773 | } |
774 | spin_unlock(&mapping->i_mmap_lock); | 774 | mutex_unlock(&mapping->i_mmap_mutex); |
775 | return ret; | 775 | return ret; |
776 | } | 776 | } |
777 | 777 | ||
@@ -1119,7 +1119,7 @@ out_mlock: | |||
1119 | /* | 1119 | /* |
1120 | * We need mmap_sem locking, Otherwise VM_LOCKED check makes | 1120 | * We need mmap_sem locking, Otherwise VM_LOCKED check makes |
1121 | * unstable result and race. Plus, We can't wait here because | 1121 | * unstable result and race. Plus, We can't wait here because |
1122 | * we now hold anon_vma->lock or mapping->i_mmap_lock. | 1122 | * we now hold anon_vma->lock or mapping->i_mmap_mutex. |
1123 | * if trylock failed, the page remain in evictable lru and later | 1123 | * if trylock failed, the page remain in evictable lru and later |
1124 | * vmscan could retry to move the page to unevictable lru if the | 1124 | * vmscan could retry to move the page to unevictable lru if the |
1125 | * page is actually mlocked. | 1125 | * page is actually mlocked. |
@@ -1345,7 +1345,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1345 | unsigned long max_nl_size = 0; | 1345 | unsigned long max_nl_size = 0; |
1346 | unsigned int mapcount; | 1346 | unsigned int mapcount; |
1347 | 1347 | ||
1348 | spin_lock(&mapping->i_mmap_lock); | 1348 | mutex_lock(&mapping->i_mmap_mutex); |
1349 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 1349 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
1350 | unsigned long address = vma_address(page, vma); | 1350 | unsigned long address = vma_address(page, vma); |
1351 | if (address == -EFAULT) | 1351 | if (address == -EFAULT) |
@@ -1391,7 +1391,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1391 | mapcount = page_mapcount(page); | 1391 | mapcount = page_mapcount(page); |
1392 | if (!mapcount) | 1392 | if (!mapcount) |
1393 | goto out; | 1393 | goto out; |
1394 | cond_resched_lock(&mapping->i_mmap_lock); | 1394 | cond_resched(); |
1395 | 1395 | ||
1396 | max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; | 1396 | max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; |
1397 | if (max_nl_cursor == 0) | 1397 | if (max_nl_cursor == 0) |
@@ -1413,7 +1413,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1413 | } | 1413 | } |
1414 | vma->vm_private_data = (void *) max_nl_cursor; | 1414 | vma->vm_private_data = (void *) max_nl_cursor; |
1415 | } | 1415 | } |
1416 | cond_resched_lock(&mapping->i_mmap_lock); | 1416 | cond_resched(); |
1417 | max_nl_cursor += CLUSTER_SIZE; | 1417 | max_nl_cursor += CLUSTER_SIZE; |
1418 | } while (max_nl_cursor <= max_nl_size); | 1418 | } while (max_nl_cursor <= max_nl_size); |
1419 | 1419 | ||
@@ -1425,7 +1425,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1425 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) | 1425 | list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) |
1426 | vma->vm_private_data = NULL; | 1426 | vma->vm_private_data = NULL; |
1427 | out: | 1427 | out: |
1428 | spin_unlock(&mapping->i_mmap_lock); | 1428 | mutex_unlock(&mapping->i_mmap_mutex); |
1429 | return ret; | 1429 | return ret; |
1430 | } | 1430 | } |
1431 | 1431 | ||
@@ -1544,7 +1544,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, | |||
1544 | 1544 | ||
1545 | if (!mapping) | 1545 | if (!mapping) |
1546 | return ret; | 1546 | return ret; |
1547 | spin_lock(&mapping->i_mmap_lock); | 1547 | mutex_lock(&mapping->i_mmap_mutex); |
1548 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 1548 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
1549 | unsigned long address = vma_address(page, vma); | 1549 | unsigned long address = vma_address(page, vma); |
1550 | if (address == -EFAULT) | 1550 | if (address == -EFAULT) |
@@ -1558,7 +1558,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, | |||
1558 | * never contain migration ptes. Decide what to do about this | 1558 | * never contain migration ptes. Decide what to do about this |
1559 | * limitation to linear when we need rmap_walk() on nonlinear. | 1559 | * limitation to linear when we need rmap_walk() on nonlinear. |
1560 | */ | 1560 | */ |
1561 | spin_unlock(&mapping->i_mmap_lock); | 1561 | mutex_unlock(&mapping->i_mmap_mutex); |
1562 | return ret; | 1562 | return ret; |
1563 | } | 1563 | } |
1564 | 1564 | ||