aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 16:00:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 16:00:36 -0500
commit78a45c6f067824cf5d0a9fedea7339ac2e28603c (patch)
treeb4f78c8b6b9059ddace0a18c11629b8d2045f793 /mm/rmap.c
parentf96fe225677b3efb74346ebd56fafe3997b02afa (diff)
parent29d293b6007b91a4463f05bc8d0b26e0e65c5816 (diff)
Merge branch 'akpm' (second patch-bomb from Andrew)
Merge second patchbomb from Andrew Morton: - the rest of MM - misc fs fixes - add execveat() syscall - new ratelimit feature for fault-injection - decompressor updates - ipc/ updates - fallocate feature creep - fsnotify cleanups - a few other misc things * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (99 commits) cgroups: Documentation: fix trivial typos and wrong paragraph numberings parisc: percpu: update comments referring to __get_cpu_var percpu: update local_ops.txt to reflect this_cpu operations percpu: remove __get_cpu_var and __raw_get_cpu_var macros fsnotify: remove destroy_list from fsnotify_mark fsnotify: unify inode and mount marks handling fallocate: create FAN_MODIFY and IN_MODIFY events mm/cma: make kmemleak ignore CMA regions slub: fix cpuset check in get_any_partial slab: fix cpuset check in fallback_alloc shmdt: use i_size_read() instead of ->i_size ipc/shm.c: fix overly aggressive shmdt() when calls span multiple segments ipc/msg: increase MSGMNI, remove scaling ipc/sem.c: increase SEMMSL, SEMMNI, SEMOPM ipc/sem.c: change memory barrier in sem_lock() to smp_rmb() lib/decompress.c: consistency of compress formats for kernel image decompress_bunzip2: off by one in get_next_block() usr/Kconfig: make initrd compression algorithm selection not expert fault-inject: add ratelimit option ratelimit: add initialization macro ...
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 45eba36fd673..c52f43a69eea 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -23,7 +23,7 @@
23 * inode->i_mutex (while writing or truncating, not reading or faulting) 23 * inode->i_mutex (while writing or truncating, not reading or faulting)
24 * mm->mmap_sem 24 * mm->mmap_sem
25 * page->flags PG_locked (lock_page) 25 * page->flags PG_locked (lock_page)
26 * mapping->i_mmap_mutex 26 * mapping->i_mmap_rwsem
27 * anon_vma->rwsem 27 * anon_vma->rwsem
28 * mm->page_table_lock or pte_lock 28 * mm->page_table_lock or pte_lock
29 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 29 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -1260,7 +1260,7 @@ out_mlock:
1260 /* 1260 /*
1261 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1261 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1262 * unstable result and race. Plus, We can't wait here because 1262 * unstable result and race. Plus, We can't wait here because
1263 * we now hold anon_vma->rwsem or mapping->i_mmap_mutex. 1263 * we now hold anon_vma->rwsem or mapping->i_mmap_rwsem.
1264 * if trylock failed, the page remain in evictable lru and later 1264 * if trylock failed, the page remain in evictable lru and later
1265 * vmscan could retry to move the page to unevictable lru if the 1265 * vmscan could retry to move the page to unevictable lru if the
1266 * page is actually mlocked. 1266 * page is actually mlocked.
@@ -1635,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1636{ 1636{
1637 struct anon_vma *anon_vma; 1637 struct anon_vma *anon_vma;
1638 pgoff_t pgoff = page_to_pgoff(page); 1638 pgoff_t pgoff;
1639 struct anon_vma_chain *avc; 1639 struct anon_vma_chain *avc;
1640 int ret = SWAP_AGAIN; 1640 int ret = SWAP_AGAIN;
1641 1641
@@ -1643,6 +1643,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1643 if (!anon_vma) 1643 if (!anon_vma)
1644 return ret; 1644 return ret;
1645 1645
1646 pgoff = page_to_pgoff(page);
1646 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1647 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1647 struct vm_area_struct *vma = avc->vma; 1648 struct vm_area_struct *vma = avc->vma;
1648 unsigned long address = vma_address(page, vma); 1649 unsigned long address = vma_address(page, vma);
@@ -1676,7 +1677,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1676static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1677static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1677{ 1678{
1678 struct address_space *mapping = page->mapping; 1679 struct address_space *mapping = page->mapping;
1679 pgoff_t pgoff = page_to_pgoff(page); 1680 pgoff_t pgoff;
1680 struct vm_area_struct *vma; 1681 struct vm_area_struct *vma;
1681 int ret = SWAP_AGAIN; 1682 int ret = SWAP_AGAIN;
1682 1683
@@ -1684,13 +1685,15 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1684 * The page lock not only makes sure that page->mapping cannot 1685 * The page lock not only makes sure that page->mapping cannot
1685 * suddenly be NULLified by truncation, it makes sure that the 1686 * suddenly be NULLified by truncation, it makes sure that the
1686 * structure at mapping cannot be freed and reused yet, 1687 * structure at mapping cannot be freed and reused yet,
1687 * so we can safely take mapping->i_mmap_mutex. 1688 * so we can safely take mapping->i_mmap_rwsem.
1688 */ 1689 */
1689 VM_BUG_ON_PAGE(!PageLocked(page), page); 1690 VM_BUG_ON_PAGE(!PageLocked(page), page);
1690 1691
1691 if (!mapping) 1692 if (!mapping)
1692 return ret; 1693 return ret;
1693 mutex_lock(&mapping->i_mmap_mutex); 1694
1695 pgoff = page_to_pgoff(page);
1696 i_mmap_lock_read(mapping);
1694 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1697 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1695 unsigned long address = vma_address(page, vma); 1698 unsigned long address = vma_address(page, vma);
1696 1699
@@ -1711,9 +1714,8 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1711 goto done; 1714 goto done;
1712 1715
1713 ret = rwc->file_nonlinear(page, mapping, rwc->arg); 1716 ret = rwc->file_nonlinear(page, mapping, rwc->arg);
1714
1715done: 1717done:
1716 mutex_unlock(&mapping->i_mmap_mutex); 1718 i_mmap_unlock_read(mapping);
1717 return ret; 1719 return ret;
1718} 1720}
1719 1721