diff options
author | Sasha Levin <sasha.levin@oracle.com> | 2014-01-23 18:52:54 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-23 19:36:50 -0500 |
commit | 309381feaee564281c3d9e90fbca8963bb7428ad (patch) | |
tree | 7e9f990c0cffcb8c5fc90deb1c7eac445c5ada0e /include/linux/mm.h | |
parent | e3bba3c3c90cd434c1ccb9e5dc704a96baf9541c (diff) |
mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
Most of the VM_BUG_ON assertions are performed on a page. Usually, when
one of these assertions fails we'll get a BUG_ON with a call stack and
the registers.
I've recently noticed based on the requests to add a small piece of code
that dumps the page to various VM_BUG_ON sites that the page dump is
quite useful to people debugging issues in mm.
This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what
VM_BUG_ON() does, also dumps the page before executing the actual
BUG_ON.
[akpm@linux-foundation.org: fix up includes]
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r-- | include/linux/mm.h | 29 |
1 files changed, 13 insertions, 16 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 03bbcb84d96e..d9992fc128ca 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #ifdef __KERNEL__ | 6 | #ifdef __KERNEL__ |
7 | 7 | ||
8 | #include <linux/mmdebug.h> | ||
8 | #include <linux/gfp.h> | 9 | #include <linux/gfp.h> |
9 | #include <linux/bug.h> | 10 | #include <linux/bug.h> |
10 | #include <linux/list.h> | 11 | #include <linux/list.h> |
@@ -303,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page) | |||
303 | */ | 304 | */ |
304 | static inline int put_page_testzero(struct page *page) | 305 | static inline int put_page_testzero(struct page *page) |
305 | { | 306 | { |
306 | VM_BUG_ON(atomic_read(&page->_count) == 0); | 307 | VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page); |
307 | return atomic_dec_and_test(&page->_count); | 308 | return atomic_dec_and_test(&page->_count); |
308 | } | 309 | } |
309 | 310 | ||
@@ -364,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) | |||
364 | static inline void compound_lock(struct page *page) | 365 | static inline void compound_lock(struct page *page) |
365 | { | 366 | { |
366 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 367 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
367 | VM_BUG_ON(PageSlab(page)); | 368 | VM_BUG_ON_PAGE(PageSlab(page), page); |
368 | bit_spin_lock(PG_compound_lock, &page->flags); | 369 | bit_spin_lock(PG_compound_lock, &page->flags); |
369 | #endif | 370 | #endif |
370 | } | 371 | } |
@@ -372,7 +373,7 @@ static inline void compound_lock(struct page *page) | |||
372 | static inline void compound_unlock(struct page *page) | 373 | static inline void compound_unlock(struct page *page) |
373 | { | 374 | { |
374 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 375 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
375 | VM_BUG_ON(PageSlab(page)); | 376 | VM_BUG_ON_PAGE(PageSlab(page), page); |
376 | bit_spin_unlock(PG_compound_lock, &page->flags); | 377 | bit_spin_unlock(PG_compound_lock, &page->flags); |
377 | #endif | 378 | #endif |
378 | } | 379 | } |
@@ -447,7 +448,7 @@ static inline bool __compound_tail_refcounted(struct page *page) | |||
447 | */ | 448 | */ |
448 | static inline bool compound_tail_refcounted(struct page *page) | 449 | static inline bool compound_tail_refcounted(struct page *page) |
449 | { | 450 | { |
450 | VM_BUG_ON(!PageHead(page)); | 451 | VM_BUG_ON_PAGE(!PageHead(page), page); |
451 | return __compound_tail_refcounted(page); | 452 | return __compound_tail_refcounted(page); |
452 | } | 453 | } |
453 | 454 | ||
@@ -456,9 +457,9 @@ static inline void get_huge_page_tail(struct page *page) | |||
456 | /* | 457 | /* |
457 | * __split_huge_page_refcount() cannot run from under us. | 458 | * __split_huge_page_refcount() cannot run from under us. |
458 | */ | 459 | */ |
459 | VM_BUG_ON(!PageTail(page)); | 460 | VM_BUG_ON_PAGE(!PageTail(page), page); |
460 | VM_BUG_ON(page_mapcount(page) < 0); | 461 | VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); |
461 | VM_BUG_ON(atomic_read(&page->_count) != 0); | 462 | VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); |
462 | if (compound_tail_refcounted(page->first_page)) | 463 | if (compound_tail_refcounted(page->first_page)) |
463 | atomic_inc(&page->_mapcount); | 464 | atomic_inc(&page->_mapcount); |
464 | } | 465 | } |
@@ -474,7 +475,7 @@ static inline void get_page(struct page *page) | |||
474 | * Getting a normal page or the head of a compound page | 475 | * Getting a normal page or the head of a compound page |
475 | * requires to already have an elevated page->_count. | 476 | * requires to already have an elevated page->_count. |
476 | */ | 477 | */ |
477 | VM_BUG_ON(atomic_read(&page->_count) <= 0); | 478 | VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); |
478 | atomic_inc(&page->_count); | 479 | atomic_inc(&page->_count); |
479 | } | 480 | } |
480 | 481 | ||
@@ -511,13 +512,13 @@ static inline int PageBuddy(struct page *page) | |||
511 | 512 | ||
512 | static inline void __SetPageBuddy(struct page *page) | 513 | static inline void __SetPageBuddy(struct page *page) |
513 | { | 514 | { |
514 | VM_BUG_ON(atomic_read(&page->_mapcount) != -1); | 515 | VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); |
515 | atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); | 516 | atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); |
516 | } | 517 | } |
517 | 518 | ||
518 | static inline void __ClearPageBuddy(struct page *page) | 519 | static inline void __ClearPageBuddy(struct page *page) |
519 | { | 520 | { |
520 | VM_BUG_ON(!PageBuddy(page)); | 521 | VM_BUG_ON_PAGE(!PageBuddy(page), page); |
521 | atomic_set(&page->_mapcount, -1); | 522 | atomic_set(&page->_mapcount, -1); |
522 | } | 523 | } |
523 | 524 | ||
@@ -1401,7 +1402,7 @@ static inline bool ptlock_init(struct page *page) | |||
1401 | * slab code uses page->slab_cache and page->first_page (for tail | 1402 | * slab code uses page->slab_cache and page->first_page (for tail |
1402 | * pages), which share storage with page->ptl. | 1403 | * pages), which share storage with page->ptl. |
1403 | */ | 1404 | */ |
1404 | VM_BUG_ON(*(unsigned long *)&page->ptl); | 1405 | VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); |
1405 | if (!ptlock_alloc(page)) | 1406 | if (!ptlock_alloc(page)) |
1406 | return false; | 1407 | return false; |
1407 | spin_lock_init(ptlock_ptr(page)); | 1408 | spin_lock_init(ptlock_ptr(page)); |
@@ -1492,7 +1493,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page) | |||
1492 | static inline void pgtable_pmd_page_dtor(struct page *page) | 1493 | static inline void pgtable_pmd_page_dtor(struct page *page) |
1493 | { | 1494 | { |
1494 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 1495 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1495 | VM_BUG_ON(page->pmd_huge_pte); | 1496 | VM_BUG_ON_PAGE(page->pmd_huge_pte, page); |
1496 | #endif | 1497 | #endif |
1497 | ptlock_free(page); | 1498 | ptlock_free(page); |
1498 | } | 1499 | } |
@@ -2029,10 +2030,6 @@ extern void shake_page(struct page *p, int access); | |||
2029 | extern atomic_long_t num_poisoned_pages; | 2030 | extern atomic_long_t num_poisoned_pages; |
2030 | extern int soft_offline_page(struct page *page, int flags); | 2031 | extern int soft_offline_page(struct page *page, int flags); |
2031 | 2032 | ||
2032 | extern void dump_page(struct page *page, char *reason); | ||
2033 | extern void dump_page_badflags(struct page *page, char *reason, | ||
2034 | unsigned long badflags); | ||
2035 | |||
2036 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) | 2033 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) |
2037 | extern void clear_huge_page(struct page *page, | 2034 | extern void clear_huge_page(struct page *page, |
2038 | unsigned long addr, | 2035 | unsigned long addr, |