aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2014-01-23 18:52:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:50 -0500
commit309381feaee564281c3d9e90fbca8963bb7428ad (patch)
tree7e9f990c0cffcb8c5fc90deb1c7eac445c5ada0e /mm/vmscan.c
parente3bba3c3c90cd434c1ccb9e5dc704a96baf9541c (diff)
mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eea668d9cff6..2254f36b74b8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -603,7 +603,7 @@ void putback_lru_page(struct page *page)
603 bool is_unevictable; 603 bool is_unevictable;
604 int was_unevictable = PageUnevictable(page); 604 int was_unevictable = PageUnevictable(page);
605 605
606 VM_BUG_ON(PageLRU(page)); 606 VM_BUG_ON_PAGE(PageLRU(page), page);
607 607
608redo: 608redo:
609 ClearPageUnevictable(page); 609 ClearPageUnevictable(page);
@@ -794,8 +794,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
794 if (!trylock_page(page)) 794 if (!trylock_page(page))
795 goto keep; 795 goto keep;
796 796
797 VM_BUG_ON(PageActive(page)); 797 VM_BUG_ON_PAGE(PageActive(page), page);
798 VM_BUG_ON(page_zone(page) != zone); 798 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
799 799
800 sc->nr_scanned++; 800 sc->nr_scanned++;
801 801
@@ -1079,14 +1079,14 @@ activate_locked:
1079 /* Not a candidate for swapping, so reclaim swap space. */ 1079 /* Not a candidate for swapping, so reclaim swap space. */
1080 if (PageSwapCache(page) && vm_swap_full()) 1080 if (PageSwapCache(page) && vm_swap_full())
1081 try_to_free_swap(page); 1081 try_to_free_swap(page);
1082 VM_BUG_ON(PageActive(page)); 1082 VM_BUG_ON_PAGE(PageActive(page), page);
1083 SetPageActive(page); 1083 SetPageActive(page);
1084 pgactivate++; 1084 pgactivate++;
1085keep_locked: 1085keep_locked:
1086 unlock_page(page); 1086 unlock_page(page);
1087keep: 1087keep:
1088 list_add(&page->lru, &ret_pages); 1088 list_add(&page->lru, &ret_pages);
1089 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 1089 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1090 } 1090 }
1091 1091
1092 free_hot_cold_page_list(&free_pages, 1); 1092 free_hot_cold_page_list(&free_pages, 1);
@@ -1240,7 +1240,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1240 page = lru_to_page(src); 1240 page = lru_to_page(src);
1241 prefetchw_prev_lru_page(page, src, flags); 1241 prefetchw_prev_lru_page(page, src, flags);
1242 1242
1243 VM_BUG_ON(!PageLRU(page)); 1243 VM_BUG_ON_PAGE(!PageLRU(page), page);
1244 1244
1245 switch (__isolate_lru_page(page, mode)) { 1245 switch (__isolate_lru_page(page, mode)) {
1246 case 0: 1246 case 0:
@@ -1295,7 +1295,7 @@ int isolate_lru_page(struct page *page)
1295{ 1295{
1296 int ret = -EBUSY; 1296 int ret = -EBUSY;
1297 1297
1298 VM_BUG_ON(!page_count(page)); 1298 VM_BUG_ON_PAGE(!page_count(page), page);
1299 1299
1300 if (PageLRU(page)) { 1300 if (PageLRU(page)) {
1301 struct zone *zone = page_zone(page); 1301 struct zone *zone = page_zone(page);
@@ -1366,7 +1366,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1366 struct page *page = lru_to_page(page_list); 1366 struct page *page = lru_to_page(page_list);
1367 int lru; 1367 int lru;
1368 1368
1369 VM_BUG_ON(PageLRU(page)); 1369 VM_BUG_ON_PAGE(PageLRU(page), page);
1370 list_del(&page->lru); 1370 list_del(&page->lru);
1371 if (unlikely(!page_evictable(page))) { 1371 if (unlikely(!page_evictable(page))) {
1372 spin_unlock_irq(&zone->lru_lock); 1372 spin_unlock_irq(&zone->lru_lock);
@@ -1586,7 +1586,7 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
1586 page = lru_to_page(list); 1586 page = lru_to_page(list);
1587 lruvec = mem_cgroup_page_lruvec(page, zone); 1587 lruvec = mem_cgroup_page_lruvec(page, zone);
1588 1588
1589 VM_BUG_ON(PageLRU(page)); 1589 VM_BUG_ON_PAGE(PageLRU(page), page);
1590 SetPageLRU(page); 1590 SetPageLRU(page);
1591 1591
1592 nr_pages = hpage_nr_pages(page); 1592 nr_pages = hpage_nr_pages(page);
@@ -3701,7 +3701,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
3701 if (page_evictable(page)) { 3701 if (page_evictable(page)) {
3702 enum lru_list lru = page_lru_base_type(page); 3702 enum lru_list lru = page_lru_base_type(page);
3703 3703
3704 VM_BUG_ON(PageActive(page)); 3704 VM_BUG_ON_PAGE(PageActive(page), page);
3705 ClearPageUnevictable(page); 3705 ClearPageUnevictable(page);
3706 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); 3706 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3707 add_page_to_lru_list(page, lruvec, lru); 3707 add_page_to_lru_list(page, lruvec, lru);