aboutsummaryrefslogtreecommitdiffstats
path: root/mm/internal.h
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2014-01-23 18:52:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:50 -0500
commit309381feaee564281c3d9e90fbca8963bb7428ad (patch)
tree7e9f990c0cffcb8c5fc90deb1c7eac445c5ada0e /mm/internal.h
parente3bba3c3c90cd434c1ccb9e5dc704a96baf9541c (diff)
mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/internal.h b/mm/internal.h
index a346ba120e42..dc95e979ae56 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -27,8 +27,8 @@ static inline void set_page_count(struct page *page, int v)
27 */ 27 */
28static inline void set_page_refcounted(struct page *page) 28static inline void set_page_refcounted(struct page *page)
29{ 29{
30 VM_BUG_ON(PageTail(page)); 30 VM_BUG_ON_PAGE(PageTail(page), page);
31 VM_BUG_ON(atomic_read(&page->_count)); 31 VM_BUG_ON_PAGE(atomic_read(&page->_count), page);
32 set_page_count(page, 1); 32 set_page_count(page, 1);
33} 33}
34 34
@@ -46,7 +46,7 @@ static inline void __get_page_tail_foll(struct page *page,
46 * speculative page access (like in 46 * speculative page access (like in
47 * page_cache_get_speculative()) on tail pages. 47 * page_cache_get_speculative()) on tail pages.
48 */ 48 */
49 VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); 49 VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page);
50 if (get_page_head) 50 if (get_page_head)
51 atomic_inc(&page->first_page->_count); 51 atomic_inc(&page->first_page->_count);
52 get_huge_page_tail(page); 52 get_huge_page_tail(page);
@@ -71,7 +71,7 @@ static inline void get_page_foll(struct page *page)
71 * Getting a normal page or the head of a compound page 71 * Getting a normal page or the head of a compound page
72 * requires to already have an elevated page->_count. 72 * requires to already have an elevated page->_count.
73 */ 73 */
74 VM_BUG_ON(atomic_read(&page->_count) <= 0); 74 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
75 atomic_inc(&page->_count); 75 atomic_inc(&page->_count);
76 } 76 }
77} 77}
@@ -173,7 +173,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
173static inline int mlocked_vma_newpage(struct vm_area_struct *vma, 173static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
174 struct page *page) 174 struct page *page)
175{ 175{
176 VM_BUG_ON(PageLRU(page)); 176 VM_BUG_ON_PAGE(PageLRU(page), page);
177 177
178 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) 178 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
179 return 0; 179 return 0;