aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2014-01-23 18:52:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:50 -0500
commit309381feaee564281c3d9e90fbca8963bb7428ad (patch)
tree7e9f990c0cffcb8c5fc90deb1c7eac445c5ada0e /include
parente3bba3c3c90cd434c1ccb9e5dc704a96baf9541c (diff)
mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/gfp.h1
-rw-r--r--include/linux/hugetlb.h3
-rw-r--r--include/linux/hugetlb_cgroup.h5
-rw-r--r--include/linux/mm.h29
-rw-r--r--include/linux/mmdebug.h9
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/pagemap.h10
-rw-r--r--include/linux/percpu.h1
8 files changed, 39 insertions, 29 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 9b4dd491f7e8..0437439bc047 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -1,6 +1,7 @@
1#ifndef __LINUX_GFP_H 1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H 2#define __LINUX_GFP_H
3 3
4#include <linux/mmdebug.h>
4#include <linux/mmzone.h> 5#include <linux/mmzone.h>
5#include <linux/stddef.h> 6#include <linux/stddef.h>
6#include <linux/linkage.h> 7#include <linux/linkage.h>
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d01cc972a1d9..8c43cc469d78 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -2,6 +2,7 @@
2#define _LINUX_HUGETLB_H 2#define _LINUX_HUGETLB_H
3 3
4#include <linux/mm_types.h> 4#include <linux/mm_types.h>
5#include <linux/mmdebug.h>
5#include <linux/fs.h> 6#include <linux/fs.h>
6#include <linux/hugetlb_inline.h> 7#include <linux/hugetlb_inline.h>
7#include <linux/cgroup.h> 8#include <linux/cgroup.h>
@@ -354,7 +355,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
354 355
355static inline struct hstate *page_hstate(struct page *page) 356static inline struct hstate *page_hstate(struct page *page)
356{ 357{
357 VM_BUG_ON(!PageHuge(page)); 358 VM_BUG_ON_PAGE(!PageHuge(page), page);
358 return size_to_hstate(PAGE_SIZE << compound_order(page)); 359 return size_to_hstate(PAGE_SIZE << compound_order(page));
359} 360}
360 361
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index ce8217f7b5c2..787bba3bf552 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -15,6 +15,7 @@
15#ifndef _LINUX_HUGETLB_CGROUP_H 15#ifndef _LINUX_HUGETLB_CGROUP_H
16#define _LINUX_HUGETLB_CGROUP_H 16#define _LINUX_HUGETLB_CGROUP_H
17 17
18#include <linux/mmdebug.h>
18#include <linux/res_counter.h> 19#include <linux/res_counter.h>
19 20
20struct hugetlb_cgroup; 21struct hugetlb_cgroup;
@@ -28,7 +29,7 @@ struct hugetlb_cgroup;
28 29
29static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) 30static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
30{ 31{
31 VM_BUG_ON(!PageHuge(page)); 32 VM_BUG_ON_PAGE(!PageHuge(page), page);
32 33
33 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) 34 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
34 return NULL; 35 return NULL;
@@ -38,7 +39,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
38static inline 39static inline
39int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) 40int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
40{ 41{
41 VM_BUG_ON(!PageHuge(page)); 42 VM_BUG_ON_PAGE(!PageHuge(page), page);
42 43
43 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) 44 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
44 return -1; 45 return -1;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 03bbcb84d96e..d9992fc128ca 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -5,6 +5,7 @@
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7 7
8#include <linux/mmdebug.h>
8#include <linux/gfp.h> 9#include <linux/gfp.h>
9#include <linux/bug.h> 10#include <linux/bug.h>
10#include <linux/list.h> 11#include <linux/list.h>
@@ -303,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page)
303 */ 304 */
304static inline int put_page_testzero(struct page *page) 305static inline int put_page_testzero(struct page *page)
305{ 306{
306 VM_BUG_ON(atomic_read(&page->_count) == 0); 307 VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
307 return atomic_dec_and_test(&page->_count); 308 return atomic_dec_and_test(&page->_count);
308} 309}
309 310
@@ -364,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
364static inline void compound_lock(struct page *page) 365static inline void compound_lock(struct page *page)
365{ 366{
366#ifdef CONFIG_TRANSPARENT_HUGEPAGE 367#ifdef CONFIG_TRANSPARENT_HUGEPAGE
367 VM_BUG_ON(PageSlab(page)); 368 VM_BUG_ON_PAGE(PageSlab(page), page);
368 bit_spin_lock(PG_compound_lock, &page->flags); 369 bit_spin_lock(PG_compound_lock, &page->flags);
369#endif 370#endif
370} 371}
@@ -372,7 +373,7 @@ static inline void compound_lock(struct page *page)
372static inline void compound_unlock(struct page *page) 373static inline void compound_unlock(struct page *page)
373{ 374{
374#ifdef CONFIG_TRANSPARENT_HUGEPAGE 375#ifdef CONFIG_TRANSPARENT_HUGEPAGE
375 VM_BUG_ON(PageSlab(page)); 376 VM_BUG_ON_PAGE(PageSlab(page), page);
376 bit_spin_unlock(PG_compound_lock, &page->flags); 377 bit_spin_unlock(PG_compound_lock, &page->flags);
377#endif 378#endif
378} 379}
@@ -447,7 +448,7 @@ static inline bool __compound_tail_refcounted(struct page *page)
447 */ 448 */
448static inline bool compound_tail_refcounted(struct page *page) 449static inline bool compound_tail_refcounted(struct page *page)
449{ 450{
450 VM_BUG_ON(!PageHead(page)); 451 VM_BUG_ON_PAGE(!PageHead(page), page);
451 return __compound_tail_refcounted(page); 452 return __compound_tail_refcounted(page);
452} 453}
453 454
@@ -456,9 +457,9 @@ static inline void get_huge_page_tail(struct page *page)
456 /* 457 /*
457 * __split_huge_page_refcount() cannot run from under us. 458 * __split_huge_page_refcount() cannot run from under us.
458 */ 459 */
459 VM_BUG_ON(!PageTail(page)); 460 VM_BUG_ON_PAGE(!PageTail(page), page);
460 VM_BUG_ON(page_mapcount(page) < 0); 461 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
461 VM_BUG_ON(atomic_read(&page->_count) != 0); 462 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
462 if (compound_tail_refcounted(page->first_page)) 463 if (compound_tail_refcounted(page->first_page))
463 atomic_inc(&page->_mapcount); 464 atomic_inc(&page->_mapcount);
464} 465}
@@ -474,7 +475,7 @@ static inline void get_page(struct page *page)
474 * Getting a normal page or the head of a compound page 475 * Getting a normal page or the head of a compound page
475 * requires to already have an elevated page->_count. 476 * requires to already have an elevated page->_count.
476 */ 477 */
477 VM_BUG_ON(atomic_read(&page->_count) <= 0); 478 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
478 atomic_inc(&page->_count); 479 atomic_inc(&page->_count);
479} 480}
480 481
@@ -511,13 +512,13 @@ static inline int PageBuddy(struct page *page)
511 512
512static inline void __SetPageBuddy(struct page *page) 513static inline void __SetPageBuddy(struct page *page)
513{ 514{
514 VM_BUG_ON(atomic_read(&page->_mapcount) != -1); 515 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
515 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); 516 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
516} 517}
517 518
518static inline void __ClearPageBuddy(struct page *page) 519static inline void __ClearPageBuddy(struct page *page)
519{ 520{
520 VM_BUG_ON(!PageBuddy(page)); 521 VM_BUG_ON_PAGE(!PageBuddy(page), page);
521 atomic_set(&page->_mapcount, -1); 522 atomic_set(&page->_mapcount, -1);
522} 523}
523 524
@@ -1401,7 +1402,7 @@ static inline bool ptlock_init(struct page *page)
1401 * slab code uses page->slab_cache and page->first_page (for tail 1402 * slab code uses page->slab_cache and page->first_page (for tail
1402 * pages), which share storage with page->ptl. 1403 * pages), which share storage with page->ptl.
1403 */ 1404 */
1404 VM_BUG_ON(*(unsigned long *)&page->ptl); 1405 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1405 if (!ptlock_alloc(page)) 1406 if (!ptlock_alloc(page))
1406 return false; 1407 return false;
1407 spin_lock_init(ptlock_ptr(page)); 1408 spin_lock_init(ptlock_ptr(page));
@@ -1492,7 +1493,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
1492static inline void pgtable_pmd_page_dtor(struct page *page) 1493static inline void pgtable_pmd_page_dtor(struct page *page)
1493{ 1494{
1494#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1495#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1495 VM_BUG_ON(page->pmd_huge_pte); 1496 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1496#endif 1497#endif
1497 ptlock_free(page); 1498 ptlock_free(page);
1498} 1499}
@@ -2029,10 +2030,6 @@ extern void shake_page(struct page *p, int access);
2029extern atomic_long_t num_poisoned_pages; 2030extern atomic_long_t num_poisoned_pages;
2030extern int soft_offline_page(struct page *page, int flags); 2031extern int soft_offline_page(struct page *page, int flags);
2031 2032
2032extern void dump_page(struct page *page, char *reason);
2033extern void dump_page_badflags(struct page *page, char *reason,
2034 unsigned long badflags);
2035
2036#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2033#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2037extern void clear_huge_page(struct page *page, 2034extern void clear_huge_page(struct page *page,
2038 unsigned long addr, 2035 unsigned long addr,
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 580bd587d916..5042c036dda9 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -1,10 +1,19 @@
1#ifndef LINUX_MM_DEBUG_H 1#ifndef LINUX_MM_DEBUG_H
2#define LINUX_MM_DEBUG_H 1 2#define LINUX_MM_DEBUG_H 1
3 3
4struct page;
5
6extern void dump_page(struct page *page, char *reason);
7extern void dump_page_badflags(struct page *page, char *reason,
8 unsigned long badflags);
9
4#ifdef CONFIG_DEBUG_VM 10#ifdef CONFIG_DEBUG_VM
5#define VM_BUG_ON(cond) BUG_ON(cond) 11#define VM_BUG_ON(cond) BUG_ON(cond)
12#define VM_BUG_ON_PAGE(cond, page) \
13 do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0)
6#else 14#else
7#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) 15#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
16#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
8#endif 17#endif
9 18
10#ifdef CONFIG_DEBUG_VIRTUAL 19#ifdef CONFIG_DEBUG_VIRTUAL
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 98ada58f9942..e464b4e987e8 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -412,7 +412,7 @@ static inline void ClearPageCompound(struct page *page)
412 */ 412 */
413static inline int PageTransHuge(struct page *page) 413static inline int PageTransHuge(struct page *page)
414{ 414{
415 VM_BUG_ON(PageTail(page)); 415 VM_BUG_ON_PAGE(PageTail(page), page);
416 return PageHead(page); 416 return PageHead(page);
417} 417}
418 418
@@ -460,25 +460,25 @@ static inline int PageTransTail(struct page *page)
460 */ 460 */
461static inline int PageSlabPfmemalloc(struct page *page) 461static inline int PageSlabPfmemalloc(struct page *page)
462{ 462{
463 VM_BUG_ON(!PageSlab(page)); 463 VM_BUG_ON_PAGE(!PageSlab(page), page);
464 return PageActive(page); 464 return PageActive(page);
465} 465}
466 466
467static inline void SetPageSlabPfmemalloc(struct page *page) 467static inline void SetPageSlabPfmemalloc(struct page *page)
468{ 468{
469 VM_BUG_ON(!PageSlab(page)); 469 VM_BUG_ON_PAGE(!PageSlab(page), page);
470 SetPageActive(page); 470 SetPageActive(page);
471} 471}
472 472
473static inline void __ClearPageSlabPfmemalloc(struct page *page) 473static inline void __ClearPageSlabPfmemalloc(struct page *page)
474{ 474{
475 VM_BUG_ON(!PageSlab(page)); 475 VM_BUG_ON_PAGE(!PageSlab(page), page);
476 __ClearPageActive(page); 476 __ClearPageActive(page);
477} 477}
478 478
479static inline void ClearPageSlabPfmemalloc(struct page *page) 479static inline void ClearPageSlabPfmemalloc(struct page *page)
480{ 480{
481 VM_BUG_ON(!PageSlab(page)); 481 VM_BUG_ON_PAGE(!PageSlab(page), page);
482 ClearPageActive(page); 482 ClearPageActive(page);
483} 483}
484 484
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e3dea75a078b..1710d1b060ba 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -162,7 +162,7 @@ static inline int page_cache_get_speculative(struct page *page)
162 * disabling preempt, and hence no need for the "speculative get" that 162 * disabling preempt, and hence no need for the "speculative get" that
163 * SMP requires. 163 * SMP requires.
164 */ 164 */
165 VM_BUG_ON(page_count(page) == 0); 165 VM_BUG_ON_PAGE(page_count(page) == 0, page);
166 atomic_inc(&page->_count); 166 atomic_inc(&page->_count);
167 167
168#else 168#else
@@ -175,7 +175,7 @@ static inline int page_cache_get_speculative(struct page *page)
175 return 0; 175 return 0;
176 } 176 }
177#endif 177#endif
178 VM_BUG_ON(PageTail(page)); 178 VM_BUG_ON_PAGE(PageTail(page), page);
179 179
180 return 1; 180 return 1;
181} 181}
@@ -191,14 +191,14 @@ static inline int page_cache_add_speculative(struct page *page, int count)
191# ifdef CONFIG_PREEMPT_COUNT 191# ifdef CONFIG_PREEMPT_COUNT
192 VM_BUG_ON(!in_atomic()); 192 VM_BUG_ON(!in_atomic());
193# endif 193# endif
194 VM_BUG_ON(page_count(page) == 0); 194 VM_BUG_ON_PAGE(page_count(page) == 0, page);
195 atomic_add(count, &page->_count); 195 atomic_add(count, &page->_count);
196 196
197#else 197#else
198 if (unlikely(!atomic_add_unless(&page->_count, count, 0))) 198 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
199 return 0; 199 return 0;
200#endif 200#endif
201 VM_BUG_ON(PageCompound(page) && page != compound_head(page)); 201 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
202 202
203 return 1; 203 return 1;
204} 204}
@@ -210,7 +210,7 @@ static inline int page_freeze_refs(struct page *page, int count)
210 210
211static inline void page_unfreeze_refs(struct page *page, int count) 211static inline void page_unfreeze_refs(struct page *page, int count)
212{ 212{
213 VM_BUG_ON(page_count(page) != 0); 213 VM_BUG_ON_PAGE(page_count(page) != 0, page);
214 VM_BUG_ON(count == 0); 214 VM_BUG_ON(count == 0);
215 215
216 atomic_set(&page->_count, count); 216 atomic_set(&page->_count, count);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9e4761caa80c..e3817d2441b6 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -1,6 +1,7 @@
1#ifndef __LINUX_PERCPU_H 1#ifndef __LINUX_PERCPU_H
2#define __LINUX_PERCPU_H 2#define __LINUX_PERCPU_H
3 3
4#include <linux/mmdebug.h>
4#include <linux/preempt.h> 5#include <linux/preempt.h>
5#include <linux/smp.h> 6#include <linux/smp.h>
6#include <linux/cpumask.h> 7#include <linux/cpumask.h>