aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2014-01-23 18:52:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:50 -0500
commit309381feaee564281c3d9e90fbca8963bb7428ad (patch)
tree7e9f990c0cffcb8c5fc90deb1c7eac445c5ada0e /mm/huge_memory.c
parente3bba3c3c90cd434c1ccb9e5dc704a96baf9541c (diff)
mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 95d1acb0f3d2..25fab7150fa0 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -712,7 +712,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
712 pgtable_t pgtable; 712 pgtable_t pgtable;
713 spinlock_t *ptl; 713 spinlock_t *ptl;
714 714
715 VM_BUG_ON(!PageCompound(page)); 715 VM_BUG_ON_PAGE(!PageCompound(page), page);
716 pgtable = pte_alloc_one(mm, haddr); 716 pgtable = pte_alloc_one(mm, haddr);
717 if (unlikely(!pgtable)) 717 if (unlikely(!pgtable))
718 return VM_FAULT_OOM; 718 return VM_FAULT_OOM;
@@ -893,7 +893,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
893 goto out; 893 goto out;
894 } 894 }
895 src_page = pmd_page(pmd); 895 src_page = pmd_page(pmd);
896 VM_BUG_ON(!PageHead(src_page)); 896 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
897 get_page(src_page); 897 get_page(src_page);
898 page_dup_rmap(src_page); 898 page_dup_rmap(src_page);
899 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 899 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
@@ -1067,7 +1067,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1067 ptl = pmd_lock(mm, pmd); 1067 ptl = pmd_lock(mm, pmd);
1068 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1068 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1069 goto out_free_pages; 1069 goto out_free_pages;
1070 VM_BUG_ON(!PageHead(page)); 1070 VM_BUG_ON_PAGE(!PageHead(page), page);
1071 1071
1072 pmdp_clear_flush(vma, haddr, pmd); 1072 pmdp_clear_flush(vma, haddr, pmd);
1073 /* leave pmd empty until pte is filled */ 1073 /* leave pmd empty until pte is filled */
@@ -1133,7 +1133,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1133 goto out_unlock; 1133 goto out_unlock;
1134 1134
1135 page = pmd_page(orig_pmd); 1135 page = pmd_page(orig_pmd);
1136 VM_BUG_ON(!PageCompound(page) || !PageHead(page)); 1136 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1137 if (page_mapcount(page) == 1) { 1137 if (page_mapcount(page) == 1) {
1138 pmd_t entry; 1138 pmd_t entry;
1139 entry = pmd_mkyoung(orig_pmd); 1139 entry = pmd_mkyoung(orig_pmd);
@@ -1211,7 +1211,7 @@ alloc:
1211 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 1211 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1212 put_huge_zero_page(); 1212 put_huge_zero_page();
1213 } else { 1213 } else {
1214 VM_BUG_ON(!PageHead(page)); 1214 VM_BUG_ON_PAGE(!PageHead(page), page);
1215 page_remove_rmap(page); 1215 page_remove_rmap(page);
1216 put_page(page); 1216 put_page(page);
1217 } 1217 }
@@ -1249,7 +1249,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1249 goto out; 1249 goto out;
1250 1250
1251 page = pmd_page(*pmd); 1251 page = pmd_page(*pmd);
1252 VM_BUG_ON(!PageHead(page)); 1252 VM_BUG_ON_PAGE(!PageHead(page), page);
1253 if (flags & FOLL_TOUCH) { 1253 if (flags & FOLL_TOUCH) {
1254 pmd_t _pmd; 1254 pmd_t _pmd;
1255 /* 1255 /*
@@ -1274,7 +1274,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1274 } 1274 }
1275 } 1275 }
1276 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1276 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1277 VM_BUG_ON(!PageCompound(page)); 1277 VM_BUG_ON_PAGE(!PageCompound(page), page);
1278 if (flags & FOLL_GET) 1278 if (flags & FOLL_GET)
1279 get_page_foll(page); 1279 get_page_foll(page);
1280 1280
@@ -1432,9 +1432,9 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1432 } else { 1432 } else {
1433 page = pmd_page(orig_pmd); 1433 page = pmd_page(orig_pmd);
1434 page_remove_rmap(page); 1434 page_remove_rmap(page);
1435 VM_BUG_ON(page_mapcount(page) < 0); 1435 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1436 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1436 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1437 VM_BUG_ON(!PageHead(page)); 1437 VM_BUG_ON_PAGE(!PageHead(page), page);
1438 atomic_long_dec(&tlb->mm->nr_ptes); 1438 atomic_long_dec(&tlb->mm->nr_ptes);
1439 spin_unlock(ptl); 1439 spin_unlock(ptl);
1440 tlb_remove_page(tlb, page); 1440 tlb_remove_page(tlb, page);
@@ -2176,9 +2176,9 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2176 if (unlikely(!page)) 2176 if (unlikely(!page))
2177 goto out; 2177 goto out;
2178 2178
2179 VM_BUG_ON(PageCompound(page)); 2179 VM_BUG_ON_PAGE(PageCompound(page), page);
2180 BUG_ON(!PageAnon(page)); 2180 VM_BUG_ON_PAGE(!PageAnon(page), page);
2181 VM_BUG_ON(!PageSwapBacked(page)); 2181 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
2182 2182
2183 /* cannot use mapcount: can't collapse if there's a gup pin */ 2183 /* cannot use mapcount: can't collapse if there's a gup pin */
2184 if (page_count(page) != 1) 2184 if (page_count(page) != 1)
@@ -2201,8 +2201,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2201 } 2201 }
2202 /* 0 stands for page_is_file_cache(page) == false */ 2202 /* 0 stands for page_is_file_cache(page) == false */
2203 inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 2203 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
2204 VM_BUG_ON(!PageLocked(page)); 2204 VM_BUG_ON_PAGE(!PageLocked(page), page);
2205 VM_BUG_ON(PageLRU(page)); 2205 VM_BUG_ON_PAGE(PageLRU(page), page);
2206 2206
2207 /* If there is no mapped pte young don't collapse the page */ 2207 /* If there is no mapped pte young don't collapse the page */
2208 if (pte_young(pteval) || PageReferenced(page) || 2208 if (pte_young(pteval) || PageReferenced(page) ||
@@ -2232,7 +2232,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2232 } else { 2232 } else {
2233 src_page = pte_page(pteval); 2233 src_page = pte_page(pteval);
2234 copy_user_highpage(page, src_page, address, vma); 2234 copy_user_highpage(page, src_page, address, vma);
2235 VM_BUG_ON(page_mapcount(src_page) != 1); 2235 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
2236 release_pte_page(src_page); 2236 release_pte_page(src_page);
2237 /* 2237 /*
2238 * ptl mostly unnecessary, but preempt has to 2238 * ptl mostly unnecessary, but preempt has to
@@ -2311,7 +2311,7 @@ static struct page
2311 struct vm_area_struct *vma, unsigned long address, 2311 struct vm_area_struct *vma, unsigned long address,
2312 int node) 2312 int node)
2313{ 2313{
2314 VM_BUG_ON(*hpage); 2314 VM_BUG_ON_PAGE(*hpage, *hpage);
2315 /* 2315 /*
2316 * Allocate the page while the vma is still valid and under 2316 * Allocate the page while the vma is still valid and under
2317 * the mmap_sem read mode so there is no memory allocation 2317 * the mmap_sem read mode so there is no memory allocation
@@ -2580,7 +2580,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2580 */ 2580 */
2581 node = page_to_nid(page); 2581 node = page_to_nid(page);
2582 khugepaged_node_load[node]++; 2582 khugepaged_node_load[node]++;
2583 VM_BUG_ON(PageCompound(page)); 2583 VM_BUG_ON_PAGE(PageCompound(page), page);
2584 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2584 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2585 goto out_unmap; 2585 goto out_unmap;
2586 /* cannot use mapcount: can't collapse if there's a gup pin */ 2586 /* cannot use mapcount: can't collapse if there's a gup pin */
@@ -2876,7 +2876,7 @@ again:
2876 return; 2876 return;
2877 } 2877 }
2878 page = pmd_page(*pmd); 2878 page = pmd_page(*pmd);
2879 VM_BUG_ON(!page_count(page)); 2879 VM_BUG_ON_PAGE(!page_count(page), page);
2880 get_page(page); 2880 get_page(page);
2881 spin_unlock(ptl); 2881 spin_unlock(ptl);
2882 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2882 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);