aboutsummaryrefslogtreecommitdiffstats
path: root/mm/shmem.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2012-06-07 17:21:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-06-07 17:43:54 -0400
commit0142ef6cdca5f9784eb0762ac50fe378d98d71d4 (patch)
treed8c3912d206c2d286d305df06440f0f1098437a6 /mm/shmem.c
parent71fae7e714749a52cb8be777ec014f82e8a747f4 (diff)
shmem: replace_page must flush_dcache and others
Commit bde05d1ccd51 ("shmem: replace page if mapping excludes its zone") is not at all likely to break for anyone, but it was an earlier version from before review feedback was incorporated. Fix that up now. * shmem_replace_page must flush_dcache_page after copy_highpage [akpm] * Expand comment on why shmem_unuse_inode needs page_swapcount [akpm] * Remove excess of VM_BUG_ONs from shmem_replace_page [wangcong] * Check page_private matches swap before calling shmem_replace_page [hughd] * shmem_replace_page allow for unexpected race in radix_tree lookup [hughd] Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Cong Wang <xiyou.wangcong@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Cc: Stephane Marchesin <marcheu@chromium.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Dave Airlie <airlied@gmail.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Rob Clark <rob.clark@linaro.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/shmem.c')
-rw-r--r--mm/shmem.c57
1 files changed, 37 insertions, 20 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 585bd220a21e..a15a466d0d1d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -683,10 +683,21 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
683 mutex_lock(&shmem_swaplist_mutex); 683 mutex_lock(&shmem_swaplist_mutex);
684 /* 684 /*
685 * We needed to drop mutex to make that restrictive page 685 * We needed to drop mutex to make that restrictive page
686 * allocation; but the inode might already be freed by now, 686 * allocation, but the inode might have been freed while we
687 * and we cannot refer to inode or mapping or info to check. 687 * dropped it: although a racing shmem_evict_inode() cannot
688 * However, we do hold page lock on the PageSwapCache page, 688 * complete without emptying the radix_tree, our page lock
689 * so can check if that still has our reference remaining. 689 * on this swapcache page is not enough to prevent that -
690 * free_swap_and_cache() of our swap entry will only
691 * trylock_page(), removing swap from radix_tree whatever.
692 *
693 * We must not proceed to shmem_add_to_page_cache() if the
694 * inode has been freed, but of course we cannot rely on
695 * inode or mapping or info to check that. However, we can
696 * safely check if our swap entry is still in use (and here
697 * it can't have got reused for another page): if it's still
698 * in use, then the inode cannot have been freed yet, and we
699 * can safely proceed (if it's no longer in use, that tells
700 * nothing about the inode, but we don't need to unuse swap).
690 */ 701 */
691 if (!page_swapcount(*pagep)) 702 if (!page_swapcount(*pagep))
692 error = -ENOENT; 703 error = -ENOENT;
@@ -730,9 +741,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
730 741
731 /* 742 /*
732 * There's a faint possibility that swap page was replaced before 743 * There's a faint possibility that swap page was replaced before
733 * caller locked it: it will come back later with the right page. 744 * caller locked it: caller will come back later with the right page.
734 */ 745 */
735 if (unlikely(!PageSwapCache(page))) 746 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
736 goto out; 747 goto out;
737 748
738 /* 749 /*
@@ -995,21 +1006,15 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
995 newpage = shmem_alloc_page(gfp, info, index); 1006 newpage = shmem_alloc_page(gfp, info, index);
996 if (!newpage) 1007 if (!newpage)
997 return -ENOMEM; 1008 return -ENOMEM;
998 VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
999 1009
1000 *pagep = newpage;
1001 page_cache_get(newpage); 1010 page_cache_get(newpage);
1002 copy_highpage(newpage, oldpage); 1011 copy_highpage(newpage, oldpage);
1012 flush_dcache_page(newpage);
1003 1013
1004 VM_BUG_ON(!PageLocked(oldpage));
1005 __set_page_locked(newpage); 1014 __set_page_locked(newpage);
1006 VM_BUG_ON(!PageUptodate(oldpage));
1007 SetPageUptodate(newpage); 1015 SetPageUptodate(newpage);
1008 VM_BUG_ON(!PageSwapBacked(oldpage));
1009 SetPageSwapBacked(newpage); 1016 SetPageSwapBacked(newpage);
1010 VM_BUG_ON(!swap_index);
1011 set_page_private(newpage, swap_index); 1017 set_page_private(newpage, swap_index);
1012 VM_BUG_ON(!PageSwapCache(oldpage));
1013 SetPageSwapCache(newpage); 1018 SetPageSwapCache(newpage);
1014 1019
1015 /* 1020 /*
@@ -1019,13 +1024,24 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1019 spin_lock_irq(&swap_mapping->tree_lock); 1024 spin_lock_irq(&swap_mapping->tree_lock);
1020 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1025 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1021 newpage); 1026 newpage);
1022 __inc_zone_page_state(newpage, NR_FILE_PAGES); 1027 if (!error) {
1023 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 1028 __inc_zone_page_state(newpage, NR_FILE_PAGES);
1029 __dec_zone_page_state(oldpage, NR_FILE_PAGES);
1030 }
1024 spin_unlock_irq(&swap_mapping->tree_lock); 1031 spin_unlock_irq(&swap_mapping->tree_lock);
1025 BUG_ON(error);
1026 1032
1027 mem_cgroup_replace_page_cache(oldpage, newpage); 1033 if (unlikely(error)) {
1028 lru_cache_add_anon(newpage); 1034 /*
1035 * Is this possible? I think not, now that our callers check
1036 * both PageSwapCache and page_private after getting page lock;
1037 * but be defensive. Reverse old to newpage for clear and free.
1038 */
1039 oldpage = newpage;
1040 } else {
1041 mem_cgroup_replace_page_cache(oldpage, newpage);
1042 lru_cache_add_anon(newpage);
1043 *pagep = newpage;
1044 }
1029 1045
1030 ClearPageSwapCache(oldpage); 1046 ClearPageSwapCache(oldpage);
1031 set_page_private(oldpage, 0); 1047 set_page_private(oldpage, 0);
@@ -1033,7 +1049,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1033 unlock_page(oldpage); 1049 unlock_page(oldpage);
1034 page_cache_release(oldpage); 1050 page_cache_release(oldpage);
1035 page_cache_release(oldpage); 1051 page_cache_release(oldpage);
1036 return 0; 1052 return error;
1037} 1053}
1038 1054
1039/* 1055/*
@@ -1107,7 +1123,8 @@ repeat:
1107 1123
1108 /* We have to do this with page locked to prevent races */ 1124 /* We have to do this with page locked to prevent races */
1109 lock_page(page); 1125 lock_page(page);
1110 if (!PageSwapCache(page) || page->mapping) { 1126 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1127 page->mapping) {
1111 error = -EEXIST; /* try again */ 1128 error = -EEXIST; /* try again */
1112 goto failed; 1129 goto failed;
1113 } 1130 }