aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2014-01-23 18:52:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:50 -0500
commit309381feaee564281c3d9e90fbca8963bb7428ad (patch)
tree7e9f990c0cffcb8c5fc90deb1c7eac445c5ada0e /mm/page_alloc.c
parente3bba3c3c90cd434c1ccb9e5dc704a96baf9541c (diff)
mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1939f4446a36..f18f016cca80 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -509,12 +509,12 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
509 return 0; 509 return 0;
510 510
511 if (page_is_guard(buddy) && page_order(buddy) == order) { 511 if (page_is_guard(buddy) && page_order(buddy) == order) {
512 VM_BUG_ON(page_count(buddy) != 0); 512 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
513 return 1; 513 return 1;
514 } 514 }
515 515
516 if (PageBuddy(buddy) && page_order(buddy) == order) { 516 if (PageBuddy(buddy) && page_order(buddy) == order) {
517 VM_BUG_ON(page_count(buddy) != 0); 517 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
518 return 1; 518 return 1;
519 } 519 }
520 return 0; 520 return 0;
@@ -564,8 +564,8 @@ static inline void __free_one_page(struct page *page,
564 564
565 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 565 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
566 566
567 VM_BUG_ON(page_idx & ((1 << order) - 1)); 567 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
568 VM_BUG_ON(bad_range(zone, page)); 568 VM_BUG_ON_PAGE(bad_range(zone, page), page);
569 569
570 while (order < MAX_ORDER-1) { 570 while (order < MAX_ORDER-1) {
571 buddy_idx = __find_buddy_index(page_idx, order); 571 buddy_idx = __find_buddy_index(page_idx, order);
@@ -827,7 +827,7 @@ static inline void expand(struct zone *zone, struct page *page,
827 area--; 827 area--;
828 high--; 828 high--;
829 size >>= 1; 829 size >>= 1;
830 VM_BUG_ON(bad_range(zone, &page[size])); 830 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
831 831
832#ifdef CONFIG_DEBUG_PAGEALLOC 832#ifdef CONFIG_DEBUG_PAGEALLOC
833 if (high < debug_guardpage_minorder()) { 833 if (high < debug_guardpage_minorder()) {
@@ -980,7 +980,7 @@ int move_freepages(struct zone *zone,
980 980
981 for (page = start_page; page <= end_page;) { 981 for (page = start_page; page <= end_page;) {
982 /* Make sure we are not inadvertently changing nodes */ 982 /* Make sure we are not inadvertently changing nodes */
983 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); 983 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
984 984
985 if (!pfn_valid_within(page_to_pfn(page))) { 985 if (!pfn_valid_within(page_to_pfn(page))) {
986 page++; 986 page++;
@@ -1429,8 +1429,8 @@ void split_page(struct page *page, unsigned int order)
1429{ 1429{
1430 int i; 1430 int i;
1431 1431
1432 VM_BUG_ON(PageCompound(page)); 1432 VM_BUG_ON_PAGE(PageCompound(page), page);
1433 VM_BUG_ON(!page_count(page)); 1433 VM_BUG_ON_PAGE(!page_count(page), page);
1434 1434
1435#ifdef CONFIG_KMEMCHECK 1435#ifdef CONFIG_KMEMCHECK
1436 /* 1436 /*
@@ -1577,7 +1577,7 @@ again:
1577 zone_statistics(preferred_zone, zone, gfp_flags); 1577 zone_statistics(preferred_zone, zone, gfp_flags);
1578 local_irq_restore(flags); 1578 local_irq_restore(flags);
1579 1579
1580 VM_BUG_ON(bad_range(zone, page)); 1580 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1581 if (prep_new_page(page, order, gfp_flags)) 1581 if (prep_new_page(page, order, gfp_flags))
1582 goto again; 1582 goto again;
1583 return page; 1583 return page;
@@ -6021,7 +6021,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
6021 pfn = page_to_pfn(page); 6021 pfn = page_to_pfn(page);
6022 bitmap = get_pageblock_bitmap(zone, pfn); 6022 bitmap = get_pageblock_bitmap(zone, pfn);
6023 bitidx = pfn_to_bitidx(zone, pfn); 6023 bitidx = pfn_to_bitidx(zone, pfn);
6024 VM_BUG_ON(!zone_spans_pfn(zone, pfn)); 6024 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
6025 6025
6026 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 6026 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
6027 if (flags & value) 6027 if (flags & value)
@@ -6539,3 +6539,4 @@ void dump_page(struct page *page, char *reason)
6539{ 6539{
6540 dump_page_badflags(page, reason, 0); 6540 dump_page_badflags(page, reason, 0);
6541} 6541}
6542EXPORT_SYMBOL_GPL(dump_page);