aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2011-01-13 18:47:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:49 -0500
commitfd4a4663db293bfd5dc20fb4113977f62895e550 (patch)
treea161c087d714042b5402b858b9a6ee75392ecab2 /mm/migrate.c
parent1ce82b69e96c838d007f316b8347b911fdfa9842 (diff)
mm: fix hugepage migration
2.6.37 added an unmap_and_move_huge_page() for memory failure recovery, but its anon_vma handling was still based around the 2.6.35 conventions. Update it to use page_lock_anon_vma, get_anon_vma, page_unlock_anon_vma, drop_anon_vma in the same way as we're now changing unmap_and_move(). I don't particularly like to propose this for stable when I've not seen its problems in practice nor tested the solution: but it's clearly out of synch at present. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Rik van Riel <riel@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Jun'ichi Nomura" <j-nomura@ce.jp.nec.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: <stable@kernel.org> [2.6.37, 2.6.36] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c23
1 files changed, 6 insertions, 17 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index a20cf12edede..5b7d1fd29621 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -827,7 +827,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
827 int rc = 0; 827 int rc = 0;
828 int *result = NULL; 828 int *result = NULL;
829 struct page *new_hpage = get_new_page(hpage, private, &result); 829 struct page *new_hpage = get_new_page(hpage, private, &result);
830 int rcu_locked = 0;
831 struct anon_vma *anon_vma = NULL; 830 struct anon_vma *anon_vma = NULL;
832 831
833 if (!new_hpage) 832 if (!new_hpage)
@@ -842,12 +841,10 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
842 } 841 }
843 842
844 if (PageAnon(hpage)) { 843 if (PageAnon(hpage)) {
845 rcu_read_lock(); 844 anon_vma = page_lock_anon_vma(hpage);
846 rcu_locked = 1; 845 if (anon_vma) {
847 846 get_anon_vma(anon_vma);
848 if (page_mapped(hpage)) { 847 page_unlock_anon_vma(anon_vma);
849 anon_vma = page_anon_vma(hpage);
850 atomic_inc(&anon_vma->external_refcount);
851 } 848 }
852 } 849 }
853 850
@@ -859,16 +856,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
859 if (rc) 856 if (rc)
860 remove_migration_ptes(hpage, hpage); 857 remove_migration_ptes(hpage, hpage);
861 858
862 if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, 859 if (anon_vma)
863 &anon_vma->lock)) { 860 drop_anon_vma(anon_vma);
864 int empty = list_empty(&anon_vma->head);
865 spin_unlock(&anon_vma->lock);
866 if (empty)
867 anon_vma_free(anon_vma);
868 }
869
870 if (rcu_locked)
871 rcu_read_unlock();
872out: 861out:
873 unlock_page(hpage); 862 unlock_page(hpage);
874 863