aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0c1e8f939f7c..763507932898 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -841,6 +841,19 @@ static void __split_huge_page(struct page *page,
841 continue; 841 continue;
842 mapcount += __split_huge_page_splitting(page, vma, addr); 842 mapcount += __split_huge_page_splitting(page, vma, addr);
843 } 843 }
844 /*
845 * It is critical that new vmas are added to the tail of the
846 * anon_vma list. This guarantes that if copy_huge_pmd() runs
847 * and establishes a child pmd before
848 * __split_huge_page_splitting() freezes the parent pmd (so if
849 * we fail to prevent copy_huge_pmd() from running until the
850 * whole __split_huge_page() is complete), we will still see
851 * the newly established pmd of the child later during the
852 * walk, to be able to set it as pmd_trans_splitting too.
853 */
854 if (mapcount != page_mapcount(page))
855 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
856 mapcount, page_mapcount(page));
844 BUG_ON(mapcount != page_mapcount(page)); 857 BUG_ON(mapcount != page_mapcount(page));
845 858
846 __split_huge_page_refcount(page); 859 __split_huge_page_refcount(page);
@@ -854,6 +867,9 @@ static void __split_huge_page(struct page *page,
854 continue; 867 continue;
855 mapcount2 += __split_huge_page_map(page, vma, addr); 868 mapcount2 += __split_huge_page_map(page, vma, addr);
856 } 869 }
870 if (mapcount != mapcount2)
871 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
872 mapcount, mapcount2, page_mapcount(page));
857 BUG_ON(mapcount != mapcount2); 873 BUG_ON(mapcount != mapcount2);
858} 874}
859 875