aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2014-01-23 18:52:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:50 -0500
commit309381feaee564281c3d9e90fbca8963bb7428ad (patch)
tree7e9f990c0cffcb8c5fc90deb1c7eac445c5ada0e /mm/rmap.c
parente3bba3c3c90cd434c1ccb9e5dc704a96baf9541c (diff)
mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 962e2a1e13a0..2dcd3353c3f6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -894,9 +894,9 @@ void page_move_anon_rmap(struct page *page,
894{ 894{
895 struct anon_vma *anon_vma = vma->anon_vma; 895 struct anon_vma *anon_vma = vma->anon_vma;
896 896
897 VM_BUG_ON(!PageLocked(page)); 897 VM_BUG_ON_PAGE(!PageLocked(page), page);
898 VM_BUG_ON(!anon_vma); 898 VM_BUG_ON(!anon_vma);
899 VM_BUG_ON(page->index != linear_page_index(vma, address)); 899 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
900 900
901 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 901 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
902 page->mapping = (struct address_space *) anon_vma; 902 page->mapping = (struct address_space *) anon_vma;
@@ -995,7 +995,7 @@ void do_page_add_anon_rmap(struct page *page,
995 if (unlikely(PageKsm(page))) 995 if (unlikely(PageKsm(page)))
996 return; 996 return;
997 997
998 VM_BUG_ON(!PageLocked(page)); 998 VM_BUG_ON_PAGE(!PageLocked(page), page);
999 /* address might be in next vma when migration races vma_adjust */ 999 /* address might be in next vma when migration races vma_adjust */
1000 if (first) 1000 if (first)
1001 __page_set_anon_rmap(page, vma, address, exclusive); 1001 __page_set_anon_rmap(page, vma, address, exclusive);
@@ -1481,7 +1481,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
1481 .anon_lock = page_lock_anon_vma_read, 1481 .anon_lock = page_lock_anon_vma_read,
1482 }; 1482 };
1483 1483
1484 VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); 1484 VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
1485 1485
1486 /* 1486 /*
1487 * During exec, a temporary VMA is setup and later moved. 1487 * During exec, a temporary VMA is setup and later moved.
@@ -1533,7 +1533,7 @@ int try_to_munlock(struct page *page)
1533 1533
1534 }; 1534 };
1535 1535
1536 VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1536 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1537 1537
1538 ret = rmap_walk(page, &rwc); 1538 ret = rmap_walk(page, &rwc);
1539 return ret; 1539 return ret;