aboutsummaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2014-01-23 18:52:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:50 -0500
commit309381feaee564281c3d9e90fbca8963bb7428ad (patch)
tree7e9f990c0cffcb8c5fc90deb1c7eac445c5ada0e /mm/ksm.c
parente3bba3c3c90cd434c1ccb9e5dc704a96baf9541c (diff)
mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 3df141e5f3e0..f91ddf5c3688 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1898,13 +1898,13 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
1898 int ret = SWAP_AGAIN; 1898 int ret = SWAP_AGAIN;
1899 int search_new_forks = 0; 1899 int search_new_forks = 0;
1900 1900
1901 VM_BUG_ON(!PageKsm(page)); 1901 VM_BUG_ON_PAGE(!PageKsm(page), page);
1902 1902
1903 /* 1903 /*
1904 * Rely on the page lock to protect against concurrent modifications 1904 * Rely on the page lock to protect against concurrent modifications
1905 * to that page's node of the stable tree. 1905 * to that page's node of the stable tree.
1906 */ 1906 */
1907 VM_BUG_ON(!PageLocked(page)); 1907 VM_BUG_ON_PAGE(!PageLocked(page), page);
1908 1908
1909 stable_node = page_stable_node(page); 1909 stable_node = page_stable_node(page);
1910 if (!stable_node) 1910 if (!stable_node)
@@ -1958,13 +1958,13 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage)
1958{ 1958{
1959 struct stable_node *stable_node; 1959 struct stable_node *stable_node;
1960 1960
1961 VM_BUG_ON(!PageLocked(oldpage)); 1961 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
1962 VM_BUG_ON(!PageLocked(newpage)); 1962 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1963 VM_BUG_ON(newpage->mapping != oldpage->mapping); 1963 VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
1964 1964
1965 stable_node = page_stable_node(newpage); 1965 stable_node = page_stable_node(newpage);
1966 if (stable_node) { 1966 if (stable_node) {
1967 VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); 1967 VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
1968 stable_node->kpfn = page_to_pfn(newpage); 1968 stable_node->kpfn = page_to_pfn(newpage);
1969 /* 1969 /*
1970 * newpage->mapping was set in advance; now we need smp_wmb() 1970 * newpage->mapping was set in advance; now we need smp_wmb()