aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrea Arcangeli <aarcange@redhat.com>2011-01-13 18:46:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:42 -0500
commit05759d380a9d7f131a475186c07fce58ceaa8902 (patch)
tree6c253f15176c89f6f8dfd80b1471bcf3e0a3e44e /mm
parent8a07651ee8cdaa9e27cb4ae372aed347533770f5 (diff)
thp: split_huge_page anon_vma ordering dependency
This documents how split_huge_page is safe vs new vma inserctions into the anon_vma that may have already released the anon_vma->lock but not established pmds yet when split_huge_page starts. Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c16
-rw-r--r--mm/rmap.c4
2 files changed, 20 insertions, 0 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0c1e8f939f7c..763507932898 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -841,6 +841,19 @@ static void __split_huge_page(struct page *page,
841 continue; 841 continue;
842 mapcount += __split_huge_page_splitting(page, vma, addr); 842 mapcount += __split_huge_page_splitting(page, vma, addr);
843 } 843 }
844 /*
845 * It is critical that new vmas are added to the tail of the
846 * anon_vma list. This guarantes that if copy_huge_pmd() runs
847 * and establishes a child pmd before
848 * __split_huge_page_splitting() freezes the parent pmd (so if
849 * we fail to prevent copy_huge_pmd() from running until the
850 * whole __split_huge_page() is complete), we will still see
851 * the newly established pmd of the child later during the
852 * walk, to be able to set it as pmd_trans_splitting too.
853 */
854 if (mapcount != page_mapcount(page))
855 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
856 mapcount, page_mapcount(page));
844 BUG_ON(mapcount != page_mapcount(page)); 857 BUG_ON(mapcount != page_mapcount(page));
845 858
846 __split_huge_page_refcount(page); 859 __split_huge_page_refcount(page);
@@ -854,6 +867,9 @@ static void __split_huge_page(struct page *page,
854 continue; 867 continue;
855 mapcount2 += __split_huge_page_map(page, vma, addr); 868 mapcount2 += __split_huge_page_map(page, vma, addr);
856 } 869 }
870 if (mapcount != mapcount2)
871 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
872 mapcount, mapcount2, page_mapcount(page));
857 BUG_ON(mapcount != mapcount2); 873 BUG_ON(mapcount != mapcount2);
858} 874}
859 875
diff --git a/mm/rmap.c b/mm/rmap.c
index e41375a6b029..92e14dcfe737 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -177,6 +177,10 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
177 list_add(&avc->same_vma, &vma->anon_vma_chain); 177 list_add(&avc->same_vma, &vma->anon_vma_chain);
178 178
179 anon_vma_lock(anon_vma); 179 anon_vma_lock(anon_vma);
180 /*
181 * It's critical to add new vmas to the tail of the anon_vma,
182 * see comment in huge_memory.c:__split_huge_page().
183 */
180 list_add_tail(&avc->same_anon_vma, &anon_vma->head); 184 list_add_tail(&avc->same_anon_vma, &anon_vma->head);
181 anon_vma_unlock(anon_vma); 185 anon_vma_unlock(anon_vma);
182} 186}