aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 45eba36fd673..c52f43a69eea 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -23,7 +23,7 @@
23 * inode->i_mutex (while writing or truncating, not reading or faulting) 23 * inode->i_mutex (while writing or truncating, not reading or faulting)
24 * mm->mmap_sem 24 * mm->mmap_sem
25 * page->flags PG_locked (lock_page) 25 * page->flags PG_locked (lock_page)
26 * mapping->i_mmap_mutex 26 * mapping->i_mmap_rwsem
27 * anon_vma->rwsem 27 * anon_vma->rwsem
28 * mm->page_table_lock or pte_lock 28 * mm->page_table_lock or pte_lock
29 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 29 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -1260,7 +1260,7 @@ out_mlock:
1260 /* 1260 /*
1261 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1261 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1262 * unstable result and race. Plus, We can't wait here because 1262 * unstable result and race. Plus, We can't wait here because
1263 * we now hold anon_vma->rwsem or mapping->i_mmap_mutex. 1263 * we now hold anon_vma->rwsem or mapping->i_mmap_rwsem.
1264 * if trylock failed, the page remain in evictable lru and later 1264 * if trylock failed, the page remain in evictable lru and later
1265 * vmscan could retry to move the page to unevictable lru if the 1265 * vmscan could retry to move the page to unevictable lru if the
1266 * page is actually mlocked. 1266 * page is actually mlocked.
@@ -1635,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1636{ 1636{
1637 struct anon_vma *anon_vma; 1637 struct anon_vma *anon_vma;
1638 pgoff_t pgoff = page_to_pgoff(page); 1638 pgoff_t pgoff;
1639 struct anon_vma_chain *avc; 1639 struct anon_vma_chain *avc;
1640 int ret = SWAP_AGAIN; 1640 int ret = SWAP_AGAIN;
1641 1641
@@ -1643,6 +1643,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1643 if (!anon_vma) 1643 if (!anon_vma)
1644 return ret; 1644 return ret;
1645 1645
1646 pgoff = page_to_pgoff(page);
1646 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1647 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1647 struct vm_area_struct *vma = avc->vma; 1648 struct vm_area_struct *vma = avc->vma;
1648 unsigned long address = vma_address(page, vma); 1649 unsigned long address = vma_address(page, vma);
@@ -1676,7 +1677,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1676static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1677static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1677{ 1678{
1678 struct address_space *mapping = page->mapping; 1679 struct address_space *mapping = page->mapping;
1679 pgoff_t pgoff = page_to_pgoff(page); 1680 pgoff_t pgoff;
1680 struct vm_area_struct *vma; 1681 struct vm_area_struct *vma;
1681 int ret = SWAP_AGAIN; 1682 int ret = SWAP_AGAIN;
1682 1683
@@ -1684,13 +1685,15 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1684 * The page lock not only makes sure that page->mapping cannot 1685 * The page lock not only makes sure that page->mapping cannot
1685 * suddenly be NULLified by truncation, it makes sure that the 1686 * suddenly be NULLified by truncation, it makes sure that the
1686 * structure at mapping cannot be freed and reused yet, 1687 * structure at mapping cannot be freed and reused yet,
1687 * so we can safely take mapping->i_mmap_mutex. 1688 * so we can safely take mapping->i_mmap_rwsem.
1688 */ 1689 */
1689 VM_BUG_ON_PAGE(!PageLocked(page), page); 1690 VM_BUG_ON_PAGE(!PageLocked(page), page);
1690 1691
1691 if (!mapping) 1692 if (!mapping)
1692 return ret; 1693 return ret;
1693 mutex_lock(&mapping->i_mmap_mutex); 1694
1695 pgoff = page_to_pgoff(page);
1696 i_mmap_lock_read(mapping);
1694 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1697 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1695 unsigned long address = vma_address(page, vma); 1698 unsigned long address = vma_address(page, vma);
1696 1699
@@ -1711,9 +1714,8 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1711 goto done; 1714 goto done;
1712 1715
1713 ret = rwc->file_nonlinear(page, mapping, rwc->arg); 1716 ret = rwc->file_nonlinear(page, mapping, rwc->arg);
1714
1715done: 1717done:
1716 mutex_unlock(&mapping->i_mmap_mutex); 1718 i_mmap_unlock_read(mapping);
1717 return ret; 1719 return ret;
1718} 1720}
1719 1721