aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2014-01-23 18:52:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-23 19:36:50 -0500
commit309381feaee564281c3d9e90fbca8963bb7428ad (patch)
tree7e9f990c0cffcb8c5fc90deb1c7eac445c5ada0e
parente3bba3c3c90cd434c1ccb9e5dc704a96baf9541c (diff)
mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE
Most of the VM_BUG_ON assertions are performed on a page. Usually, when one of these assertions fails we'll get a BUG_ON with a call stack and the registers. I've recently noticed based on the requests to add a small piece of code that dumps the page to various VM_BUG_ON sites that the page dump is quite useful to people debugging issues in mm. This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what VM_BUG_ON() does, also dumps the page before executing the actual BUG_ON. [akpm@linux-foundation.org: fix up includes] Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/mm/gup.c8
-rw-r--r--include/linux/gfp.h1
-rw-r--r--include/linux/hugetlb.h3
-rw-r--r--include/linux/hugetlb_cgroup.h5
-rw-r--r--include/linux/mm.h29
-rw-r--r--include/linux/mmdebug.h9
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/pagemap.h10
-rw-r--r--include/linux/percpu.h1
-rw-r--r--mm/cleancache.c6
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/filemap.c16
-rw-r--r--mm/huge_memory.c36
-rw-r--r--mm/hugetlb.c10
-rw-r--r--mm/hugetlb_cgroup.c2
-rw-r--r--mm/internal.h10
-rw-r--r--mm/ksm.c12
-rw-r--r--mm/memcontrol.c28
-rw-r--r--mm/memory.c8
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/page_alloc.c21
-rw-r--r--mm/page_io.c4
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slub.c12
-rw-r--r--mm/swap.c36
-rw-r--r--mm/swap_state.c16
-rw-r--r--mm/swapfile.c8
-rw-r--r--mm/vmscan.c20
30 files changed, 181 insertions, 170 deletions
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 0596e8e0cc19..207d9aef662d 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -108,8 +108,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
108 108
109static inline void get_head_page_multiple(struct page *page, int nr) 109static inline void get_head_page_multiple(struct page *page, int nr)
110{ 110{
111 VM_BUG_ON(page != compound_head(page)); 111 VM_BUG_ON_PAGE(page != compound_head(page), page);
112 VM_BUG_ON(page_count(page) == 0); 112 VM_BUG_ON_PAGE(page_count(page) == 0, page);
113 atomic_add(nr, &page->_count); 113 atomic_add(nr, &page->_count);
114 SetPageReferenced(page); 114 SetPageReferenced(page);
115} 115}
@@ -135,7 +135,7 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
135 head = pte_page(pte); 135 head = pte_page(pte);
136 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 136 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
137 do { 137 do {
138 VM_BUG_ON(compound_head(page) != head); 138 VM_BUG_ON_PAGE(compound_head(page) != head, page);
139 pages[*nr] = page; 139 pages[*nr] = page;
140 if (PageTail(page)) 140 if (PageTail(page))
141 get_huge_page_tail(page); 141 get_huge_page_tail(page);
@@ -212,7 +212,7 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
212 head = pte_page(pte); 212 head = pte_page(pte);
213 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 213 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
214 do { 214 do {
215 VM_BUG_ON(compound_head(page) != head); 215 VM_BUG_ON_PAGE(compound_head(page) != head, page);
216 pages[*nr] = page; 216 pages[*nr] = page;
217 if (PageTail(page)) 217 if (PageTail(page))
218 get_huge_page_tail(page); 218 get_huge_page_tail(page);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 9b4dd491f7e8..0437439bc047 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -1,6 +1,7 @@
1#ifndef __LINUX_GFP_H 1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H 2#define __LINUX_GFP_H
3 3
4#include <linux/mmdebug.h>
4#include <linux/mmzone.h> 5#include <linux/mmzone.h>
5#include <linux/stddef.h> 6#include <linux/stddef.h>
6#include <linux/linkage.h> 7#include <linux/linkage.h>
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d01cc972a1d9..8c43cc469d78 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -2,6 +2,7 @@
2#define _LINUX_HUGETLB_H 2#define _LINUX_HUGETLB_H
3 3
4#include <linux/mm_types.h> 4#include <linux/mm_types.h>
5#include <linux/mmdebug.h>
5#include <linux/fs.h> 6#include <linux/fs.h>
6#include <linux/hugetlb_inline.h> 7#include <linux/hugetlb_inline.h>
7#include <linux/cgroup.h> 8#include <linux/cgroup.h>
@@ -354,7 +355,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
354 355
355static inline struct hstate *page_hstate(struct page *page) 356static inline struct hstate *page_hstate(struct page *page)
356{ 357{
357 VM_BUG_ON(!PageHuge(page)); 358 VM_BUG_ON_PAGE(!PageHuge(page), page);
358 return size_to_hstate(PAGE_SIZE << compound_order(page)); 359 return size_to_hstate(PAGE_SIZE << compound_order(page));
359} 360}
360 361
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index ce8217f7b5c2..787bba3bf552 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -15,6 +15,7 @@
15#ifndef _LINUX_HUGETLB_CGROUP_H 15#ifndef _LINUX_HUGETLB_CGROUP_H
16#define _LINUX_HUGETLB_CGROUP_H 16#define _LINUX_HUGETLB_CGROUP_H
17 17
18#include <linux/mmdebug.h>
18#include <linux/res_counter.h> 19#include <linux/res_counter.h>
19 20
20struct hugetlb_cgroup; 21struct hugetlb_cgroup;
@@ -28,7 +29,7 @@ struct hugetlb_cgroup;
28 29
29static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) 30static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
30{ 31{
31 VM_BUG_ON(!PageHuge(page)); 32 VM_BUG_ON_PAGE(!PageHuge(page), page);
32 33
33 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) 34 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
34 return NULL; 35 return NULL;
@@ -38,7 +39,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
38static inline 39static inline
39int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) 40int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
40{ 41{
41 VM_BUG_ON(!PageHuge(page)); 42 VM_BUG_ON_PAGE(!PageHuge(page), page);
42 43
43 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) 44 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
44 return -1; 45 return -1;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 03bbcb84d96e..d9992fc128ca 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -5,6 +5,7 @@
5 5
6#ifdef __KERNEL__ 6#ifdef __KERNEL__
7 7
8#include <linux/mmdebug.h>
8#include <linux/gfp.h> 9#include <linux/gfp.h>
9#include <linux/bug.h> 10#include <linux/bug.h>
10#include <linux/list.h> 11#include <linux/list.h>
@@ -303,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page)
303 */ 304 */
304static inline int put_page_testzero(struct page *page) 305static inline int put_page_testzero(struct page *page)
305{ 306{
306 VM_BUG_ON(atomic_read(&page->_count) == 0); 307 VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
307 return atomic_dec_and_test(&page->_count); 308 return atomic_dec_and_test(&page->_count);
308} 309}
309 310
@@ -364,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
364static inline void compound_lock(struct page *page) 365static inline void compound_lock(struct page *page)
365{ 366{
366#ifdef CONFIG_TRANSPARENT_HUGEPAGE 367#ifdef CONFIG_TRANSPARENT_HUGEPAGE
367 VM_BUG_ON(PageSlab(page)); 368 VM_BUG_ON_PAGE(PageSlab(page), page);
368 bit_spin_lock(PG_compound_lock, &page->flags); 369 bit_spin_lock(PG_compound_lock, &page->flags);
369#endif 370#endif
370} 371}
@@ -372,7 +373,7 @@ static inline void compound_lock(struct page *page)
372static inline void compound_unlock(struct page *page) 373static inline void compound_unlock(struct page *page)
373{ 374{
374#ifdef CONFIG_TRANSPARENT_HUGEPAGE 375#ifdef CONFIG_TRANSPARENT_HUGEPAGE
375 VM_BUG_ON(PageSlab(page)); 376 VM_BUG_ON_PAGE(PageSlab(page), page);
376 bit_spin_unlock(PG_compound_lock, &page->flags); 377 bit_spin_unlock(PG_compound_lock, &page->flags);
377#endif 378#endif
378} 379}
@@ -447,7 +448,7 @@ static inline bool __compound_tail_refcounted(struct page *page)
447 */ 448 */
448static inline bool compound_tail_refcounted(struct page *page) 449static inline bool compound_tail_refcounted(struct page *page)
449{ 450{
450 VM_BUG_ON(!PageHead(page)); 451 VM_BUG_ON_PAGE(!PageHead(page), page);
451 return __compound_tail_refcounted(page); 452 return __compound_tail_refcounted(page);
452} 453}
453 454
@@ -456,9 +457,9 @@ static inline void get_huge_page_tail(struct page *page)
456 /* 457 /*
457 * __split_huge_page_refcount() cannot run from under us. 458 * __split_huge_page_refcount() cannot run from under us.
458 */ 459 */
459 VM_BUG_ON(!PageTail(page)); 460 VM_BUG_ON_PAGE(!PageTail(page), page);
460 VM_BUG_ON(page_mapcount(page) < 0); 461 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
461 VM_BUG_ON(atomic_read(&page->_count) != 0); 462 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
462 if (compound_tail_refcounted(page->first_page)) 463 if (compound_tail_refcounted(page->first_page))
463 atomic_inc(&page->_mapcount); 464 atomic_inc(&page->_mapcount);
464} 465}
@@ -474,7 +475,7 @@ static inline void get_page(struct page *page)
474 * Getting a normal page or the head of a compound page 475 * Getting a normal page or the head of a compound page
475 * requires to already have an elevated page->_count. 476 * requires to already have an elevated page->_count.
476 */ 477 */
477 VM_BUG_ON(atomic_read(&page->_count) <= 0); 478 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
478 atomic_inc(&page->_count); 479 atomic_inc(&page->_count);
479} 480}
480 481
@@ -511,13 +512,13 @@ static inline int PageBuddy(struct page *page)
511 512
512static inline void __SetPageBuddy(struct page *page) 513static inline void __SetPageBuddy(struct page *page)
513{ 514{
514 VM_BUG_ON(atomic_read(&page->_mapcount) != -1); 515 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
515 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); 516 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
516} 517}
517 518
518static inline void __ClearPageBuddy(struct page *page) 519static inline void __ClearPageBuddy(struct page *page)
519{ 520{
520 VM_BUG_ON(!PageBuddy(page)); 521 VM_BUG_ON_PAGE(!PageBuddy(page), page);
521 atomic_set(&page->_mapcount, -1); 522 atomic_set(&page->_mapcount, -1);
522} 523}
523 524
@@ -1401,7 +1402,7 @@ static inline bool ptlock_init(struct page *page)
1401 * slab code uses page->slab_cache and page->first_page (for tail 1402 * slab code uses page->slab_cache and page->first_page (for tail
1402 * pages), which share storage with page->ptl. 1403 * pages), which share storage with page->ptl.
1403 */ 1404 */
1404 VM_BUG_ON(*(unsigned long *)&page->ptl); 1405 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1405 if (!ptlock_alloc(page)) 1406 if (!ptlock_alloc(page))
1406 return false; 1407 return false;
1407 spin_lock_init(ptlock_ptr(page)); 1408 spin_lock_init(ptlock_ptr(page));
@@ -1492,7 +1493,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
1492static inline void pgtable_pmd_page_dtor(struct page *page) 1493static inline void pgtable_pmd_page_dtor(struct page *page)
1493{ 1494{
1494#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1495#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1495 VM_BUG_ON(page->pmd_huge_pte); 1496 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1496#endif 1497#endif
1497 ptlock_free(page); 1498 ptlock_free(page);
1498} 1499}
@@ -2029,10 +2030,6 @@ extern void shake_page(struct page *p, int access);
2029extern atomic_long_t num_poisoned_pages; 2030extern atomic_long_t num_poisoned_pages;
2030extern int soft_offline_page(struct page *page, int flags); 2031extern int soft_offline_page(struct page *page, int flags);
2031 2032
2032extern void dump_page(struct page *page, char *reason);
2033extern void dump_page_badflags(struct page *page, char *reason,
2034 unsigned long badflags);
2035
2036#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 2033#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2037extern void clear_huge_page(struct page *page, 2034extern void clear_huge_page(struct page *page,
2038 unsigned long addr, 2035 unsigned long addr,
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 580bd587d916..5042c036dda9 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -1,10 +1,19 @@
1#ifndef LINUX_MM_DEBUG_H 1#ifndef LINUX_MM_DEBUG_H
2#define LINUX_MM_DEBUG_H 1 2#define LINUX_MM_DEBUG_H 1
3 3
4struct page;
5
6extern void dump_page(struct page *page, char *reason);
7extern void dump_page_badflags(struct page *page, char *reason,
8 unsigned long badflags);
9
4#ifdef CONFIG_DEBUG_VM 10#ifdef CONFIG_DEBUG_VM
5#define VM_BUG_ON(cond) BUG_ON(cond) 11#define VM_BUG_ON(cond) BUG_ON(cond)
12#define VM_BUG_ON_PAGE(cond, page) \
13 do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0)
6#else 14#else
7#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) 15#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
16#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
8#endif 17#endif
9 18
10#ifdef CONFIG_DEBUG_VIRTUAL 19#ifdef CONFIG_DEBUG_VIRTUAL
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 98ada58f9942..e464b4e987e8 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -412,7 +412,7 @@ static inline void ClearPageCompound(struct page *page)
412 */ 412 */
413static inline int PageTransHuge(struct page *page) 413static inline int PageTransHuge(struct page *page)
414{ 414{
415 VM_BUG_ON(PageTail(page)); 415 VM_BUG_ON_PAGE(PageTail(page), page);
416 return PageHead(page); 416 return PageHead(page);
417} 417}
418 418
@@ -460,25 +460,25 @@ static inline int PageTransTail(struct page *page)
460 */ 460 */
461static inline int PageSlabPfmemalloc(struct page *page) 461static inline int PageSlabPfmemalloc(struct page *page)
462{ 462{
463 VM_BUG_ON(!PageSlab(page)); 463 VM_BUG_ON_PAGE(!PageSlab(page), page);
464 return PageActive(page); 464 return PageActive(page);
465} 465}
466 466
467static inline void SetPageSlabPfmemalloc(struct page *page) 467static inline void SetPageSlabPfmemalloc(struct page *page)
468{ 468{
469 VM_BUG_ON(!PageSlab(page)); 469 VM_BUG_ON_PAGE(!PageSlab(page), page);
470 SetPageActive(page); 470 SetPageActive(page);
471} 471}
472 472
473static inline void __ClearPageSlabPfmemalloc(struct page *page) 473static inline void __ClearPageSlabPfmemalloc(struct page *page)
474{ 474{
475 VM_BUG_ON(!PageSlab(page)); 475 VM_BUG_ON_PAGE(!PageSlab(page), page);
476 __ClearPageActive(page); 476 __ClearPageActive(page);
477} 477}
478 478
479static inline void ClearPageSlabPfmemalloc(struct page *page) 479static inline void ClearPageSlabPfmemalloc(struct page *page)
480{ 480{
481 VM_BUG_ON(!PageSlab(page)); 481 VM_BUG_ON_PAGE(!PageSlab(page), page);
482 ClearPageActive(page); 482 ClearPageActive(page);
483} 483}
484 484
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e3dea75a078b..1710d1b060ba 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -162,7 +162,7 @@ static inline int page_cache_get_speculative(struct page *page)
162 * disabling preempt, and hence no need for the "speculative get" that 162 * disabling preempt, and hence no need for the "speculative get" that
163 * SMP requires. 163 * SMP requires.
164 */ 164 */
165 VM_BUG_ON(page_count(page) == 0); 165 VM_BUG_ON_PAGE(page_count(page) == 0, page);
166 atomic_inc(&page->_count); 166 atomic_inc(&page->_count);
167 167
168#else 168#else
@@ -175,7 +175,7 @@ static inline int page_cache_get_speculative(struct page *page)
175 return 0; 175 return 0;
176 } 176 }
177#endif 177#endif
178 VM_BUG_ON(PageTail(page)); 178 VM_BUG_ON_PAGE(PageTail(page), page);
179 179
180 return 1; 180 return 1;
181} 181}
@@ -191,14 +191,14 @@ static inline int page_cache_add_speculative(struct page *page, int count)
191# ifdef CONFIG_PREEMPT_COUNT 191# ifdef CONFIG_PREEMPT_COUNT
192 VM_BUG_ON(!in_atomic()); 192 VM_BUG_ON(!in_atomic());
193# endif 193# endif
194 VM_BUG_ON(page_count(page) == 0); 194 VM_BUG_ON_PAGE(page_count(page) == 0, page);
195 atomic_add(count, &page->_count); 195 atomic_add(count, &page->_count);
196 196
197#else 197#else
198 if (unlikely(!atomic_add_unless(&page->_count, count, 0))) 198 if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
199 return 0; 199 return 0;
200#endif 200#endif
201 VM_BUG_ON(PageCompound(page) && page != compound_head(page)); 201 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
202 202
203 return 1; 203 return 1;
204} 204}
@@ -210,7 +210,7 @@ static inline int page_freeze_refs(struct page *page, int count)
210 210
211static inline void page_unfreeze_refs(struct page *page, int count) 211static inline void page_unfreeze_refs(struct page *page, int count)
212{ 212{
213 VM_BUG_ON(page_count(page) != 0); 213 VM_BUG_ON_PAGE(page_count(page) != 0, page);
214 VM_BUG_ON(count == 0); 214 VM_BUG_ON(count == 0);
215 215
216 atomic_set(&page->_count, count); 216 atomic_set(&page->_count, count);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 9e4761caa80c..e3817d2441b6 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -1,6 +1,7 @@
1#ifndef __LINUX_PERCPU_H 1#ifndef __LINUX_PERCPU_H
2#define __LINUX_PERCPU_H 2#define __LINUX_PERCPU_H
3 3
4#include <linux/mmdebug.h>
4#include <linux/preempt.h> 5#include <linux/preempt.h>
5#include <linux/smp.h> 6#include <linux/smp.h>
6#include <linux/cpumask.h> 7#include <linux/cpumask.h>
diff --git a/mm/cleancache.c b/mm/cleancache.c
index 5875f48ce279..d0eac4350403 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -237,7 +237,7 @@ int __cleancache_get_page(struct page *page)
237 goto out; 237 goto out;
238 } 238 }
239 239
240 VM_BUG_ON(!PageLocked(page)); 240 VM_BUG_ON_PAGE(!PageLocked(page), page);
241 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; 241 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
242 if (fake_pool_id < 0) 242 if (fake_pool_id < 0)
243 goto out; 243 goto out;
@@ -279,7 +279,7 @@ void __cleancache_put_page(struct page *page)
279 return; 279 return;
280 } 280 }
281 281
282 VM_BUG_ON(!PageLocked(page)); 282 VM_BUG_ON_PAGE(!PageLocked(page), page);
283 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid; 283 fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
284 if (fake_pool_id < 0) 284 if (fake_pool_id < 0)
285 return; 285 return;
@@ -318,7 +318,7 @@ void __cleancache_invalidate_page(struct address_space *mapping,
318 if (pool_id < 0) 318 if (pool_id < 0)
319 return; 319 return;
320 320
321 VM_BUG_ON(!PageLocked(page)); 321 VM_BUG_ON_PAGE(!PageLocked(page), page);
322 if (cleancache_get_key(mapping->host, &key) >= 0) { 322 if (cleancache_get_key(mapping->host, &key) >= 0) {
323 cleancache_ops->invalidate_page(pool_id, 323 cleancache_ops->invalidate_page(pool_id,
324 key, page->index); 324 key, page->index);
diff --git a/mm/compaction.c b/mm/compaction.c
index 3a91a2ea3d34..e0ab02d70f13 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -601,7 +601,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
601 if (__isolate_lru_page(page, mode) != 0) 601 if (__isolate_lru_page(page, mode) != 0)
602 continue; 602 continue;
603 603
604 VM_BUG_ON(PageTransCompound(page)); 604 VM_BUG_ON_PAGE(PageTransCompound(page), page);
605 605
606 /* Successfully isolated */ 606 /* Successfully isolated */
607 cc->finished_update_migrate = true; 607 cc->finished_update_migrate = true;
diff --git a/mm/filemap.c b/mm/filemap.c
index b7749a92021c..7a7f3e0db738 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -409,9 +409,9 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
409{ 409{
410 int error; 410 int error;
411 411
412 VM_BUG_ON(!PageLocked(old)); 412 VM_BUG_ON_PAGE(!PageLocked(old), old);
413 VM_BUG_ON(!PageLocked(new)); 413 VM_BUG_ON_PAGE(!PageLocked(new), new);
414 VM_BUG_ON(new->mapping); 414 VM_BUG_ON_PAGE(new->mapping, new);
415 415
416 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); 416 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
417 if (!error) { 417 if (!error) {
@@ -461,8 +461,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
461{ 461{
462 int error; 462 int error;
463 463
464 VM_BUG_ON(!PageLocked(page)); 464 VM_BUG_ON_PAGE(!PageLocked(page), page);
465 VM_BUG_ON(PageSwapBacked(page)); 465 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
466 466
467 error = mem_cgroup_cache_charge(page, current->mm, 467 error = mem_cgroup_cache_charge(page, current->mm,
468 gfp_mask & GFP_RECLAIM_MASK); 468 gfp_mask & GFP_RECLAIM_MASK);
@@ -607,7 +607,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
607 */ 607 */
608void unlock_page(struct page *page) 608void unlock_page(struct page *page)
609{ 609{
610 VM_BUG_ON(!PageLocked(page)); 610 VM_BUG_ON_PAGE(!PageLocked(page), page);
611 clear_bit_unlock(PG_locked, &page->flags); 611 clear_bit_unlock(PG_locked, &page->flags);
612 smp_mb__after_clear_bit(); 612 smp_mb__after_clear_bit();
613 wake_up_page(page, PG_locked); 613 wake_up_page(page, PG_locked);
@@ -760,7 +760,7 @@ repeat:
760 page_cache_release(page); 760 page_cache_release(page);
761 goto repeat; 761 goto repeat;
762 } 762 }
763 VM_BUG_ON(page->index != offset); 763 VM_BUG_ON_PAGE(page->index != offset, page);
764 } 764 }
765 return page; 765 return page;
766} 766}
@@ -1656,7 +1656,7 @@ retry_find:
1656 put_page(page); 1656 put_page(page);
1657 goto retry_find; 1657 goto retry_find;
1658 } 1658 }
1659 VM_BUG_ON(page->index != offset); 1659 VM_BUG_ON_PAGE(page->index != offset, page);
1660 1660
1661 /* 1661 /*
1662 * We have a locked page in the page cache, now we need to check 1662 * We have a locked page in the page cache, now we need to check
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 95d1acb0f3d2..25fab7150fa0 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -712,7 +712,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
712 pgtable_t pgtable; 712 pgtable_t pgtable;
713 spinlock_t *ptl; 713 spinlock_t *ptl;
714 714
715 VM_BUG_ON(!PageCompound(page)); 715 VM_BUG_ON_PAGE(!PageCompound(page), page);
716 pgtable = pte_alloc_one(mm, haddr); 716 pgtable = pte_alloc_one(mm, haddr);
717 if (unlikely(!pgtable)) 717 if (unlikely(!pgtable))
718 return VM_FAULT_OOM; 718 return VM_FAULT_OOM;
@@ -893,7 +893,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
893 goto out; 893 goto out;
894 } 894 }
895 src_page = pmd_page(pmd); 895 src_page = pmd_page(pmd);
896 VM_BUG_ON(!PageHead(src_page)); 896 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
897 get_page(src_page); 897 get_page(src_page);
898 page_dup_rmap(src_page); 898 page_dup_rmap(src_page);
899 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); 899 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
@@ -1067,7 +1067,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1067 ptl = pmd_lock(mm, pmd); 1067 ptl = pmd_lock(mm, pmd);
1068 if (unlikely(!pmd_same(*pmd, orig_pmd))) 1068 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1069 goto out_free_pages; 1069 goto out_free_pages;
1070 VM_BUG_ON(!PageHead(page)); 1070 VM_BUG_ON_PAGE(!PageHead(page), page);
1071 1071
1072 pmdp_clear_flush(vma, haddr, pmd); 1072 pmdp_clear_flush(vma, haddr, pmd);
1073 /* leave pmd empty until pte is filled */ 1073 /* leave pmd empty until pte is filled */
@@ -1133,7 +1133,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1133 goto out_unlock; 1133 goto out_unlock;
1134 1134
1135 page = pmd_page(orig_pmd); 1135 page = pmd_page(orig_pmd);
1136 VM_BUG_ON(!PageCompound(page) || !PageHead(page)); 1136 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1137 if (page_mapcount(page) == 1) { 1137 if (page_mapcount(page) == 1) {
1138 pmd_t entry; 1138 pmd_t entry;
1139 entry = pmd_mkyoung(orig_pmd); 1139 entry = pmd_mkyoung(orig_pmd);
@@ -1211,7 +1211,7 @@ alloc:
1211 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); 1211 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1212 put_huge_zero_page(); 1212 put_huge_zero_page();
1213 } else { 1213 } else {
1214 VM_BUG_ON(!PageHead(page)); 1214 VM_BUG_ON_PAGE(!PageHead(page), page);
1215 page_remove_rmap(page); 1215 page_remove_rmap(page);
1216 put_page(page); 1216 put_page(page);
1217 } 1217 }
@@ -1249,7 +1249,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1249 goto out; 1249 goto out;
1250 1250
1251 page = pmd_page(*pmd); 1251 page = pmd_page(*pmd);
1252 VM_BUG_ON(!PageHead(page)); 1252 VM_BUG_ON_PAGE(!PageHead(page), page);
1253 if (flags & FOLL_TOUCH) { 1253 if (flags & FOLL_TOUCH) {
1254 pmd_t _pmd; 1254 pmd_t _pmd;
1255 /* 1255 /*
@@ -1274,7 +1274,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1274 } 1274 }
1275 } 1275 }
1276 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 1276 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1277 VM_BUG_ON(!PageCompound(page)); 1277 VM_BUG_ON_PAGE(!PageCompound(page), page);
1278 if (flags & FOLL_GET) 1278 if (flags & FOLL_GET)
1279 get_page_foll(page); 1279 get_page_foll(page);
1280 1280
@@ -1432,9 +1432,9 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1432 } else { 1432 } else {
1433 page = pmd_page(orig_pmd); 1433 page = pmd_page(orig_pmd);
1434 page_remove_rmap(page); 1434 page_remove_rmap(page);
1435 VM_BUG_ON(page_mapcount(page) < 0); 1435 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1436 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); 1436 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1437 VM_BUG_ON(!PageHead(page)); 1437 VM_BUG_ON_PAGE(!PageHead(page), page);
1438 atomic_long_dec(&tlb->mm->nr_ptes); 1438 atomic_long_dec(&tlb->mm->nr_ptes);
1439 spin_unlock(ptl); 1439 spin_unlock(ptl);
1440 tlb_remove_page(tlb, page); 1440 tlb_remove_page(tlb, page);
@@ -2176,9 +2176,9 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2176 if (unlikely(!page)) 2176 if (unlikely(!page))
2177 goto out; 2177 goto out;
2178 2178
2179 VM_BUG_ON(PageCompound(page)); 2179 VM_BUG_ON_PAGE(PageCompound(page), page);
2180 BUG_ON(!PageAnon(page)); 2180 VM_BUG_ON_PAGE(!PageAnon(page), page);
2181 VM_BUG_ON(!PageSwapBacked(page)); 2181 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
2182 2182
2183 /* cannot use mapcount: can't collapse if there's a gup pin */ 2183 /* cannot use mapcount: can't collapse if there's a gup pin */
2184 if (page_count(page) != 1) 2184 if (page_count(page) != 1)
@@ -2201,8 +2201,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2201 } 2201 }
2202 /* 0 stands for page_is_file_cache(page) == false */ 2202 /* 0 stands for page_is_file_cache(page) == false */
2203 inc_zone_page_state(page, NR_ISOLATED_ANON + 0); 2203 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
2204 VM_BUG_ON(!PageLocked(page)); 2204 VM_BUG_ON_PAGE(!PageLocked(page), page);
2205 VM_BUG_ON(PageLRU(page)); 2205 VM_BUG_ON_PAGE(PageLRU(page), page);
2206 2206
2207 /* If there is no mapped pte young don't collapse the page */ 2207 /* If there is no mapped pte young don't collapse the page */
2208 if (pte_young(pteval) || PageReferenced(page) || 2208 if (pte_young(pteval) || PageReferenced(page) ||
@@ -2232,7 +2232,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2232 } else { 2232 } else {
2233 src_page = pte_page(pteval); 2233 src_page = pte_page(pteval);
2234 copy_user_highpage(page, src_page, address, vma); 2234 copy_user_highpage(page, src_page, address, vma);
2235 VM_BUG_ON(page_mapcount(src_page) != 1); 2235 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
2236 release_pte_page(src_page); 2236 release_pte_page(src_page);
2237 /* 2237 /*
2238 * ptl mostly unnecessary, but preempt has to 2238 * ptl mostly unnecessary, but preempt has to
@@ -2311,7 +2311,7 @@ static struct page
2311 struct vm_area_struct *vma, unsigned long address, 2311 struct vm_area_struct *vma, unsigned long address,
2312 int node) 2312 int node)
2313{ 2313{
2314 VM_BUG_ON(*hpage); 2314 VM_BUG_ON_PAGE(*hpage, *hpage);
2315 /* 2315 /*
2316 * Allocate the page while the vma is still valid and under 2316 * Allocate the page while the vma is still valid and under
2317 * the mmap_sem read mode so there is no memory allocation 2317 * the mmap_sem read mode so there is no memory allocation
@@ -2580,7 +2580,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2580 */ 2580 */
2581 node = page_to_nid(page); 2581 node = page_to_nid(page);
2582 khugepaged_node_load[node]++; 2582 khugepaged_node_load[node]++;
2583 VM_BUG_ON(PageCompound(page)); 2583 VM_BUG_ON_PAGE(PageCompound(page), page);
2584 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 2584 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2585 goto out_unmap; 2585 goto out_unmap;
2586 /* cannot use mapcount: can't collapse if there's a gup pin */ 2586 /* cannot use mapcount: can't collapse if there's a gup pin */
@@ -2876,7 +2876,7 @@ again:
2876 return; 2876 return;
2877 } 2877 }
2878 page = pmd_page(*pmd); 2878 page = pmd_page(*pmd);
2879 VM_BUG_ON(!page_count(page)); 2879 VM_BUG_ON_PAGE(!page_count(page), page);
2880 get_page(page); 2880 get_page(page);
2881 spin_unlock(ptl); 2881 spin_unlock(ptl);
2882 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 2882 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 04306b9de90d..c01cb9fedb18 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -584,7 +584,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
584 1 << PG_active | 1 << PG_reserved | 584 1 << PG_active | 1 << PG_reserved |
585 1 << PG_private | 1 << PG_writeback); 585 1 << PG_private | 1 << PG_writeback);
586 } 586 }
587 VM_BUG_ON(hugetlb_cgroup_from_page(page)); 587 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
588 set_compound_page_dtor(page, NULL); 588 set_compound_page_dtor(page, NULL);
589 set_page_refcounted(page); 589 set_page_refcounted(page);
590 arch_release_hugepage(page); 590 arch_release_hugepage(page);
@@ -1089,7 +1089,7 @@ retry:
1089 * no users -- drop the buddy allocator's reference. 1089 * no users -- drop the buddy allocator's reference.
1090 */ 1090 */
1091 put_page_testzero(page); 1091 put_page_testzero(page);
1092 VM_BUG_ON(page_count(page)); 1092 VM_BUG_ON_PAGE(page_count(page), page);
1093 enqueue_huge_page(h, page); 1093 enqueue_huge_page(h, page);
1094 } 1094 }
1095free: 1095free:
@@ -3503,7 +3503,7 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
3503 3503
3504bool isolate_huge_page(struct page *page, struct list_head *list) 3504bool isolate_huge_page(struct page *page, struct list_head *list)
3505{ 3505{
3506 VM_BUG_ON(!PageHead(page)); 3506 VM_BUG_ON_PAGE(!PageHead(page), page);
3507 if (!get_page_unless_zero(page)) 3507 if (!get_page_unless_zero(page))
3508 return false; 3508 return false;
3509 spin_lock(&hugetlb_lock); 3509 spin_lock(&hugetlb_lock);
@@ -3514,7 +3514,7 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
3514 3514
3515void putback_active_hugepage(struct page *page) 3515void putback_active_hugepage(struct page *page)
3516{ 3516{
3517 VM_BUG_ON(!PageHead(page)); 3517 VM_BUG_ON_PAGE(!PageHead(page), page);
3518 spin_lock(&hugetlb_lock); 3518 spin_lock(&hugetlb_lock);
3519 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 3519 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
3520 spin_unlock(&hugetlb_lock); 3520 spin_unlock(&hugetlb_lock);
@@ -3523,7 +3523,7 @@ void putback_active_hugepage(struct page *page)
3523 3523
3524bool is_hugepage_active(struct page *page) 3524bool is_hugepage_active(struct page *page)
3525{ 3525{
3526 VM_BUG_ON(!PageHuge(page)); 3526 VM_BUG_ON_PAGE(!PageHuge(page), page);
3527 /* 3527 /*
3528 * This function can be called for a tail page because the caller, 3528 * This function can be called for a tail page because the caller,
3529 * scan_movable_pages, scans through a given pfn-range which typically 3529 * scan_movable_pages, scans through a given pfn-range which typically
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index d747a84e09b0..cb00829bb466 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -390,7 +390,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
390 if (hugetlb_cgroup_disabled()) 390 if (hugetlb_cgroup_disabled())
391 return; 391 return;
392 392
393 VM_BUG_ON(!PageHuge(oldhpage)); 393 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
394 spin_lock(&hugetlb_lock); 394 spin_lock(&hugetlb_lock);
395 h_cg = hugetlb_cgroup_from_page(oldhpage); 395 h_cg = hugetlb_cgroup_from_page(oldhpage);
396 set_hugetlb_cgroup(oldhpage, NULL); 396 set_hugetlb_cgroup(oldhpage, NULL);
diff --git a/mm/internal.h b/mm/internal.h
index a346ba120e42..dc95e979ae56 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -27,8 +27,8 @@ static inline void set_page_count(struct page *page, int v)
27 */ 27 */
28static inline void set_page_refcounted(struct page *page) 28static inline void set_page_refcounted(struct page *page)
29{ 29{
30 VM_BUG_ON(PageTail(page)); 30 VM_BUG_ON_PAGE(PageTail(page), page);
31 VM_BUG_ON(atomic_read(&page->_count)); 31 VM_BUG_ON_PAGE(atomic_read(&page->_count), page);
32 set_page_count(page, 1); 32 set_page_count(page, 1);
33} 33}
34 34
@@ -46,7 +46,7 @@ static inline void __get_page_tail_foll(struct page *page,
46 * speculative page access (like in 46 * speculative page access (like in
47 * page_cache_get_speculative()) on tail pages. 47 * page_cache_get_speculative()) on tail pages.
48 */ 48 */
49 VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); 49 VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page);
50 if (get_page_head) 50 if (get_page_head)
51 atomic_inc(&page->first_page->_count); 51 atomic_inc(&page->first_page->_count);
52 get_huge_page_tail(page); 52 get_huge_page_tail(page);
@@ -71,7 +71,7 @@ static inline void get_page_foll(struct page *page)
71 * Getting a normal page or the head of a compound page 71 * Getting a normal page or the head of a compound page
72 * requires to already have an elevated page->_count. 72 * requires to already have an elevated page->_count.
73 */ 73 */
74 VM_BUG_ON(atomic_read(&page->_count) <= 0); 74 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
75 atomic_inc(&page->_count); 75 atomic_inc(&page->_count);
76 } 76 }
77} 77}
@@ -173,7 +173,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
173static inline int mlocked_vma_newpage(struct vm_area_struct *vma, 173static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
174 struct page *page) 174 struct page *page)
175{ 175{
176 VM_BUG_ON(PageLRU(page)); 176 VM_BUG_ON_PAGE(PageLRU(page), page);
177 177
178 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) 178 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
179 return 0; 179 return 0;
diff --git a/mm/ksm.c b/mm/ksm.c
index 3df141e5f3e0..f91ddf5c3688 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1898,13 +1898,13 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
1898 int ret = SWAP_AGAIN; 1898 int ret = SWAP_AGAIN;
1899 int search_new_forks = 0; 1899 int search_new_forks = 0;
1900 1900
1901 VM_BUG_ON(!PageKsm(page)); 1901 VM_BUG_ON_PAGE(!PageKsm(page), page);
1902 1902
1903 /* 1903 /*
1904 * Rely on the page lock to protect against concurrent modifications 1904 * Rely on the page lock to protect against concurrent modifications
1905 * to that page's node of the stable tree. 1905 * to that page's node of the stable tree.
1906 */ 1906 */
1907 VM_BUG_ON(!PageLocked(page)); 1907 VM_BUG_ON_PAGE(!PageLocked(page), page);
1908 1908
1909 stable_node = page_stable_node(page); 1909 stable_node = page_stable_node(page);
1910 if (!stable_node) 1910 if (!stable_node)
@@ -1958,13 +1958,13 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage)
1958{ 1958{
1959 struct stable_node *stable_node; 1959 struct stable_node *stable_node;
1960 1960
1961 VM_BUG_ON(!PageLocked(oldpage)); 1961 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
1962 VM_BUG_ON(!PageLocked(newpage)); 1962 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1963 VM_BUG_ON(newpage->mapping != oldpage->mapping); 1963 VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
1964 1964
1965 stable_node = page_stable_node(newpage); 1965 stable_node = page_stable_node(newpage);
1966 if (stable_node) { 1966 if (stable_node) {
1967 VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); 1967 VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
1968 stable_node->kpfn = page_to_pfn(newpage); 1968 stable_node->kpfn = page_to_pfn(newpage);
1969 /* 1969 /*
1970 * newpage->mapping was set in advance; now we need smp_wmb() 1970 * newpage->mapping was set in advance; now we need smp_wmb()
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7890ce9d6bd1..72f2d90e7ef6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2897,7 +2897,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2897 unsigned short id; 2897 unsigned short id;
2898 swp_entry_t ent; 2898 swp_entry_t ent;
2899 2899
2900 VM_BUG_ON(!PageLocked(page)); 2900 VM_BUG_ON_PAGE(!PageLocked(page), page);
2901 2901
2902 pc = lookup_page_cgroup(page); 2902 pc = lookup_page_cgroup(page);
2903 lock_page_cgroup(pc); 2903 lock_page_cgroup(pc);
@@ -2931,7 +2931,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2931 bool anon; 2931 bool anon;
2932 2932
2933 lock_page_cgroup(pc); 2933 lock_page_cgroup(pc);
2934 VM_BUG_ON(PageCgroupUsed(pc)); 2934 VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
2935 /* 2935 /*
2936 * we don't need page_cgroup_lock about tail pages, becase they are not 2936 * we don't need page_cgroup_lock about tail pages, becase they are not
2937 * accessed by any other context at this point. 2937 * accessed by any other context at this point.
@@ -2966,7 +2966,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2966 if (lrucare) { 2966 if (lrucare) {
2967 if (was_on_lru) { 2967 if (was_on_lru) {
2968 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup); 2968 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2969 VM_BUG_ON(PageLRU(page)); 2969 VM_BUG_ON_PAGE(PageLRU(page), page);
2970 SetPageLRU(page); 2970 SetPageLRU(page);
2971 add_page_to_lru_list(page, lruvec, page_lru(page)); 2971 add_page_to_lru_list(page, lruvec, page_lru(page));
2972 } 2972 }
@@ -3780,7 +3780,7 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order)
3780 if (!memcg) 3780 if (!memcg)
3781 return; 3781 return;
3782 3782
3783 VM_BUG_ON(mem_cgroup_is_root(memcg)); 3783 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3784 memcg_uncharge_kmem(memcg, PAGE_SIZE << order); 3784 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
3785} 3785}
3786#else 3786#else
@@ -3859,7 +3859,7 @@ static int mem_cgroup_move_account(struct page *page,
3859 bool anon = PageAnon(page); 3859 bool anon = PageAnon(page);
3860 3860
3861 VM_BUG_ON(from == to); 3861 VM_BUG_ON(from == to);
3862 VM_BUG_ON(PageLRU(page)); 3862 VM_BUG_ON_PAGE(PageLRU(page), page);
3863 /* 3863 /*
3864 * The page is isolated from LRU. So, collapse function 3864 * The page is isolated from LRU. So, collapse function
3865 * will not handle this page. But page splitting can happen. 3865 * will not handle this page. But page splitting can happen.
@@ -3952,7 +3952,7 @@ static int mem_cgroup_move_parent(struct page *page,
3952 parent = root_mem_cgroup; 3952 parent = root_mem_cgroup;
3953 3953
3954 if (nr_pages > 1) { 3954 if (nr_pages > 1) {
3955 VM_BUG_ON(!PageTransHuge(page)); 3955 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3956 flags = compound_lock_irqsave(page); 3956 flags = compound_lock_irqsave(page);
3957 } 3957 }
3958 3958
@@ -3986,7 +3986,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
3986 3986
3987 if (PageTransHuge(page)) { 3987 if (PageTransHuge(page)) {
3988 nr_pages <<= compound_order(page); 3988 nr_pages <<= compound_order(page);
3989 VM_BUG_ON(!PageTransHuge(page)); 3989 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3990 /* 3990 /*
3991 * Never OOM-kill a process for a huge page. The 3991 * Never OOM-kill a process for a huge page. The
3992 * fault handler will fall back to regular pages. 3992 * fault handler will fall back to regular pages.
@@ -4006,8 +4006,8 @@ int mem_cgroup_newpage_charge(struct page *page,
4006{ 4006{
4007 if (mem_cgroup_disabled()) 4007 if (mem_cgroup_disabled())
4008 return 0; 4008 return 0;
4009 VM_BUG_ON(page_mapped(page)); 4009 VM_BUG_ON_PAGE(page_mapped(page), page);
4010 VM_BUG_ON(page->mapping && !PageAnon(page)); 4010 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
4011 VM_BUG_ON(!mm); 4011 VM_BUG_ON(!mm);
4012 return mem_cgroup_charge_common(page, mm, gfp_mask, 4012 return mem_cgroup_charge_common(page, mm, gfp_mask,
4013 MEM_CGROUP_CHARGE_TYPE_ANON); 4013 MEM_CGROUP_CHARGE_TYPE_ANON);
@@ -4211,7 +4211,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
4211 4211
4212 if (PageTransHuge(page)) { 4212 if (PageTransHuge(page)) {
4213 nr_pages <<= compound_order(page); 4213 nr_pages <<= compound_order(page);
4214 VM_BUG_ON(!PageTransHuge(page)); 4214 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
4215 } 4215 }
4216 /* 4216 /*
4217 * Check if our page_cgroup is valid 4217 * Check if our page_cgroup is valid
@@ -4303,7 +4303,7 @@ void mem_cgroup_uncharge_page(struct page *page)
4303 /* early check. */ 4303 /* early check. */
4304 if (page_mapped(page)) 4304 if (page_mapped(page))
4305 return; 4305 return;
4306 VM_BUG_ON(page->mapping && !PageAnon(page)); 4306 VM_BUG_ON_PAGE(page->mapping && !PageAnon(page), page);
4307 /* 4307 /*
4308 * If the page is in swap cache, uncharge should be deferred 4308 * If the page is in swap cache, uncharge should be deferred
4309 * to the swap path, which also properly accounts swap usage 4309 * to the swap path, which also properly accounts swap usage
@@ -4323,8 +4323,8 @@ void mem_cgroup_uncharge_page(struct page *page)
4323 4323
4324void mem_cgroup_uncharge_cache_page(struct page *page) 4324void mem_cgroup_uncharge_cache_page(struct page *page)
4325{ 4325{
4326 VM_BUG_ON(page_mapped(page)); 4326 VM_BUG_ON_PAGE(page_mapped(page), page);
4327 VM_BUG_ON(page->mapping); 4327 VM_BUG_ON_PAGE(page->mapping, page);
4328 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false); 4328 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
4329} 4329}
4330 4330
@@ -6880,7 +6880,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6880 enum mc_target_type ret = MC_TARGET_NONE; 6880 enum mc_target_type ret = MC_TARGET_NONE;
6881 6881
6882 page = pmd_page(pmd); 6882 page = pmd_page(pmd);
6883 VM_BUG_ON(!page || !PageHead(page)); 6883 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
6884 if (!move_anon()) 6884 if (!move_anon())
6885 return ret; 6885 return ret;
6886 pc = lookup_page_cgroup(page); 6886 pc = lookup_page_cgroup(page);
diff --git a/mm/memory.c b/mm/memory.c
index 71d70c082b98..be6a0c0d4ae0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -289,7 +289,7 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
289 return 0; 289 return 0;
290 batch = tlb->active; 290 batch = tlb->active;
291 } 291 }
292 VM_BUG_ON(batch->nr > batch->max); 292 VM_BUG_ON_PAGE(batch->nr > batch->max, page);
293 293
294 return batch->max - batch->nr; 294 return batch->max - batch->nr;
295} 295}
@@ -2702,7 +2702,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2702 goto unwritable_page; 2702 goto unwritable_page;
2703 } 2703 }
2704 } else 2704 } else
2705 VM_BUG_ON(!PageLocked(old_page)); 2705 VM_BUG_ON_PAGE(!PageLocked(old_page), old_page);
2706 2706
2707 /* 2707 /*
2708 * Since we dropped the lock we need to revalidate 2708 * Since we dropped the lock we need to revalidate
@@ -3358,7 +3358,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3358 if (unlikely(!(ret & VM_FAULT_LOCKED))) 3358 if (unlikely(!(ret & VM_FAULT_LOCKED)))
3359 lock_page(vmf.page); 3359 lock_page(vmf.page);
3360 else 3360 else
3361 VM_BUG_ON(!PageLocked(vmf.page)); 3361 VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
3362 3362
3363 /* 3363 /*
3364 * Should we do an early C-O-W break? 3364 * Should we do an early C-O-W break?
@@ -3395,7 +3395,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3395 goto unwritable_page; 3395 goto unwritable_page;
3396 } 3396 }
3397 } else 3397 } else
3398 VM_BUG_ON(!PageLocked(page)); 3398 VM_BUG_ON_PAGE(!PageLocked(page), page);
3399 page_mkwrite = 1; 3399 page_mkwrite = 1;
3400 } 3400 }
3401 } 3401 }
diff --git a/mm/migrate.c b/mm/migrate.c
index a8025befc323..4b3996eb7f0f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -499,7 +499,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
499 if (PageUptodate(page)) 499 if (PageUptodate(page))
500 SetPageUptodate(newpage); 500 SetPageUptodate(newpage);
501 if (TestClearPageActive(page)) { 501 if (TestClearPageActive(page)) {
502 VM_BUG_ON(PageUnevictable(page)); 502 VM_BUG_ON_PAGE(PageUnevictable(page), page);
503 SetPageActive(newpage); 503 SetPageActive(newpage);
504 } else if (TestClearPageUnevictable(page)) 504 } else if (TestClearPageUnevictable(page))
505 SetPageUnevictable(newpage); 505 SetPageUnevictable(newpage);
@@ -871,7 +871,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
871 * free the metadata, so the page can be freed. 871 * free the metadata, so the page can be freed.
872 */ 872 */
873 if (!page->mapping) { 873 if (!page->mapping) {
874 VM_BUG_ON(PageAnon(page)); 874 VM_BUG_ON_PAGE(PageAnon(page), page);
875 if (page_has_private(page)) { 875 if (page_has_private(page)) {
876 try_to_free_buffers(page); 876 try_to_free_buffers(page);
877 goto uncharge; 877 goto uncharge;
@@ -1618,7 +1618,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1618{ 1618{
1619 int page_lru; 1619 int page_lru;
1620 1620
1621 VM_BUG_ON(compound_order(page) && !PageTransHuge(page)); 1621 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1622 1622
1623 /* Avoid migrating to a node that is nearly full */ 1623 /* Avoid migrating to a node that is nearly full */
1624 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) 1624 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
diff --git a/mm/mlock.c b/mm/mlock.c
index b30adbe62034..4e1a68162285 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -279,8 +279,8 @@ static int __mlock_posix_error_return(long retval)
279static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, 279static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
280 int *pgrescued) 280 int *pgrescued)
281{ 281{
282 VM_BUG_ON(PageLRU(page)); 282 VM_BUG_ON_PAGE(PageLRU(page), page);
283 VM_BUG_ON(!PageLocked(page)); 283 VM_BUG_ON_PAGE(!PageLocked(page), page);
284 284
285 if (page_mapcount(page) <= 1 && page_evictable(page)) { 285 if (page_mapcount(page) <= 1 && page_evictable(page)) {
286 pagevec_add(pvec, page); 286 pagevec_add(pvec, page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1939f4446a36..f18f016cca80 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -509,12 +509,12 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
509 return 0; 509 return 0;
510 510
511 if (page_is_guard(buddy) && page_order(buddy) == order) { 511 if (page_is_guard(buddy) && page_order(buddy) == order) {
512 VM_BUG_ON(page_count(buddy) != 0); 512 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
513 return 1; 513 return 1;
514 } 514 }
515 515
516 if (PageBuddy(buddy) && page_order(buddy) == order) { 516 if (PageBuddy(buddy) && page_order(buddy) == order) {
517 VM_BUG_ON(page_count(buddy) != 0); 517 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
518 return 1; 518 return 1;
519 } 519 }
520 return 0; 520 return 0;
@@ -564,8 +564,8 @@ static inline void __free_one_page(struct page *page,
564 564
565 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 565 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
566 566
567 VM_BUG_ON(page_idx & ((1 << order) - 1)); 567 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
568 VM_BUG_ON(bad_range(zone, page)); 568 VM_BUG_ON_PAGE(bad_range(zone, page), page);
569 569
570 while (order < MAX_ORDER-1) { 570 while (order < MAX_ORDER-1) {
571 buddy_idx = __find_buddy_index(page_idx, order); 571 buddy_idx = __find_buddy_index(page_idx, order);
@@ -827,7 +827,7 @@ static inline void expand(struct zone *zone, struct page *page,
827 area--; 827 area--;
828 high--; 828 high--;
829 size >>= 1; 829 size >>= 1;
830 VM_BUG_ON(bad_range(zone, &page[size])); 830 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
831 831
832#ifdef CONFIG_DEBUG_PAGEALLOC 832#ifdef CONFIG_DEBUG_PAGEALLOC
833 if (high < debug_guardpage_minorder()) { 833 if (high < debug_guardpage_minorder()) {
@@ -980,7 +980,7 @@ int move_freepages(struct zone *zone,
980 980
981 for (page = start_page; page <= end_page;) { 981 for (page = start_page; page <= end_page;) {
982 /* Make sure we are not inadvertently changing nodes */ 982 /* Make sure we are not inadvertently changing nodes */
983 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); 983 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
984 984
985 if (!pfn_valid_within(page_to_pfn(page))) { 985 if (!pfn_valid_within(page_to_pfn(page))) {
986 page++; 986 page++;
@@ -1429,8 +1429,8 @@ void split_page(struct page *page, unsigned int order)
1429{ 1429{
1430 int i; 1430 int i;
1431 1431
1432 VM_BUG_ON(PageCompound(page)); 1432 VM_BUG_ON_PAGE(PageCompound(page), page);
1433 VM_BUG_ON(!page_count(page)); 1433 VM_BUG_ON_PAGE(!page_count(page), page);
1434 1434
1435#ifdef CONFIG_KMEMCHECK 1435#ifdef CONFIG_KMEMCHECK
1436 /* 1436 /*
@@ -1577,7 +1577,7 @@ again:
1577 zone_statistics(preferred_zone, zone, gfp_flags); 1577 zone_statistics(preferred_zone, zone, gfp_flags);
1578 local_irq_restore(flags); 1578 local_irq_restore(flags);
1579 1579
1580 VM_BUG_ON(bad_range(zone, page)); 1580 VM_BUG_ON_PAGE(bad_range(zone, page), page);
1581 if (prep_new_page(page, order, gfp_flags)) 1581 if (prep_new_page(page, order, gfp_flags))
1582 goto again; 1582 goto again;
1583 return page; 1583 return page;
@@ -6021,7 +6021,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
6021 pfn = page_to_pfn(page); 6021 pfn = page_to_pfn(page);
6022 bitmap = get_pageblock_bitmap(zone, pfn); 6022 bitmap = get_pageblock_bitmap(zone, pfn);
6023 bitidx = pfn_to_bitidx(zone, pfn); 6023 bitidx = pfn_to_bitidx(zone, pfn);
6024 VM_BUG_ON(!zone_spans_pfn(zone, pfn)); 6024 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
6025 6025
6026 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 6026 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
6027 if (flags & value) 6027 if (flags & value)
@@ -6539,3 +6539,4 @@ void dump_page(struct page *page, char *reason)
6539{ 6539{
6540 dump_page_badflags(page, reason, 0); 6540 dump_page_badflags(page, reason, 0);
6541} 6541}
6542EXPORT_SYMBOL_GPL(dump_page);
diff --git a/mm/page_io.c b/mm/page_io.c
index 8c79a4764be0..7247be6114ac 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -320,8 +320,8 @@ int swap_readpage(struct page *page)
320 int ret = 0; 320 int ret = 0;
321 struct swap_info_struct *sis = page_swap_info(page); 321 struct swap_info_struct *sis = page_swap_info(page);
322 322
323 VM_BUG_ON(!PageLocked(page)); 323 VM_BUG_ON_PAGE(!PageLocked(page), page);
324 VM_BUG_ON(PageUptodate(page)); 324 VM_BUG_ON_PAGE(PageUptodate(page), page);
325 if (frontswap_load(page) == 0) { 325 if (frontswap_load(page) == 0) {
326 SetPageUptodate(page); 326 SetPageUptodate(page);
327 unlock_page(page); 327 unlock_page(page);
diff --git a/mm/rmap.c b/mm/rmap.c
index 962e2a1e13a0..2dcd3353c3f6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -894,9 +894,9 @@ void page_move_anon_rmap(struct page *page,
894{ 894{
895 struct anon_vma *anon_vma = vma->anon_vma; 895 struct anon_vma *anon_vma = vma->anon_vma;
896 896
897 VM_BUG_ON(!PageLocked(page)); 897 VM_BUG_ON_PAGE(!PageLocked(page), page);
898 VM_BUG_ON(!anon_vma); 898 VM_BUG_ON(!anon_vma);
899 VM_BUG_ON(page->index != linear_page_index(vma, address)); 899 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
900 900
901 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 901 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
902 page->mapping = (struct address_space *) anon_vma; 902 page->mapping = (struct address_space *) anon_vma;
@@ -995,7 +995,7 @@ void do_page_add_anon_rmap(struct page *page,
995 if (unlikely(PageKsm(page))) 995 if (unlikely(PageKsm(page)))
996 return; 996 return;
997 997
998 VM_BUG_ON(!PageLocked(page)); 998 VM_BUG_ON_PAGE(!PageLocked(page), page);
999 /* address might be in next vma when migration races vma_adjust */ 999 /* address might be in next vma when migration races vma_adjust */
1000 if (first) 1000 if (first)
1001 __page_set_anon_rmap(page, vma, address, exclusive); 1001 __page_set_anon_rmap(page, vma, address, exclusive);
@@ -1481,7 +1481,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
1481 .anon_lock = page_lock_anon_vma_read, 1481 .anon_lock = page_lock_anon_vma_read,
1482 }; 1482 };
1483 1483
1484 VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); 1484 VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
1485 1485
1486 /* 1486 /*
1487 * During exec, a temporary VMA is setup and later moved. 1487 * During exec, a temporary VMA is setup and later moved.
@@ -1533,7 +1533,7 @@ int try_to_munlock(struct page *page)
1533 1533
1534 }; 1534 };
1535 1535
1536 VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1536 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1537 1537
1538 ret = rmap_walk(page, &rwc); 1538 ret = rmap_walk(page, &rwc);
1539 return ret; 1539 return ret;
diff --git a/mm/shmem.c b/mm/shmem.c
index 902a14842b74..8156f95ec0cf 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -285,8 +285,8 @@ static int shmem_add_to_page_cache(struct page *page,
285{ 285{
286 int error; 286 int error;
287 287
288 VM_BUG_ON(!PageLocked(page)); 288 VM_BUG_ON_PAGE(!PageLocked(page), page);
289 VM_BUG_ON(!PageSwapBacked(page)); 289 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
290 290
291 page_cache_get(page); 291 page_cache_get(page);
292 page->mapping = mapping; 292 page->mapping = mapping;
@@ -491,7 +491,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
491 continue; 491 continue;
492 if (!unfalloc || !PageUptodate(page)) { 492 if (!unfalloc || !PageUptodate(page)) {
493 if (page->mapping == mapping) { 493 if (page->mapping == mapping) {
494 VM_BUG_ON(PageWriteback(page)); 494 VM_BUG_ON_PAGE(PageWriteback(page), page);
495 truncate_inode_page(mapping, page); 495 truncate_inode_page(mapping, page);
496 } 496 }
497 } 497 }
@@ -568,7 +568,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
568 lock_page(page); 568 lock_page(page);
569 if (!unfalloc || !PageUptodate(page)) { 569 if (!unfalloc || !PageUptodate(page)) {
570 if (page->mapping == mapping) { 570 if (page->mapping == mapping) {
571 VM_BUG_ON(PageWriteback(page)); 571 VM_BUG_ON_PAGE(PageWriteback(page), page);
572 truncate_inode_page(mapping, page); 572 truncate_inode_page(mapping, page);
573 } 573 }
574 } 574 }
diff --git a/mm/slub.c b/mm/slub.c
index 545a170ebf9f..34bb8c65a2d8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1559,7 +1559,7 @@ static inline void *acquire_slab(struct kmem_cache *s,
1559 new.freelist = freelist; 1559 new.freelist = freelist;
1560 } 1560 }
1561 1561
1562 VM_BUG_ON(new.frozen); 1562 VM_BUG_ON_PAGE(new.frozen, &new);
1563 new.frozen = 1; 1563 new.frozen = 1;
1564 1564
1565 if (!__cmpxchg_double_slab(s, page, 1565 if (!__cmpxchg_double_slab(s, page,
@@ -1812,7 +1812,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
1812 set_freepointer(s, freelist, prior); 1812 set_freepointer(s, freelist, prior);
1813 new.counters = counters; 1813 new.counters = counters;
1814 new.inuse--; 1814 new.inuse--;
1815 VM_BUG_ON(!new.frozen); 1815 VM_BUG_ON_PAGE(!new.frozen, &new);
1816 1816
1817 } while (!__cmpxchg_double_slab(s, page, 1817 } while (!__cmpxchg_double_slab(s, page,
1818 prior, counters, 1818 prior, counters,
@@ -1840,7 +1840,7 @@ redo:
1840 1840
1841 old.freelist = page->freelist; 1841 old.freelist = page->freelist;
1842 old.counters = page->counters; 1842 old.counters = page->counters;
1843 VM_BUG_ON(!old.frozen); 1843 VM_BUG_ON_PAGE(!old.frozen, &old);
1844 1844
1845 /* Determine target state of the slab */ 1845 /* Determine target state of the slab */
1846 new.counters = old.counters; 1846 new.counters = old.counters;
@@ -1952,7 +1952,7 @@ static void unfreeze_partials(struct kmem_cache *s,
1952 1952
1953 old.freelist = page->freelist; 1953 old.freelist = page->freelist;
1954 old.counters = page->counters; 1954 old.counters = page->counters;
1955 VM_BUG_ON(!old.frozen); 1955 VM_BUG_ON_PAGE(!old.frozen, &old);
1956 1956
1957 new.counters = old.counters; 1957 new.counters = old.counters;
1958 new.freelist = old.freelist; 1958 new.freelist = old.freelist;
@@ -2225,7 +2225,7 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2225 counters = page->counters; 2225 counters = page->counters;
2226 2226
2227 new.counters = counters; 2227 new.counters = counters;
2228 VM_BUG_ON(!new.frozen); 2228 VM_BUG_ON_PAGE(!new.frozen, &new);
2229 2229
2230 new.inuse = page->objects; 2230 new.inuse = page->objects;
2231 new.frozen = freelist != NULL; 2231 new.frozen = freelist != NULL;
@@ -2319,7 +2319,7 @@ load_freelist:
2319 * page is pointing to the page from which the objects are obtained. 2319 * page is pointing to the page from which the objects are obtained.
2320 * That page must be frozen for per cpu allocations to work. 2320 * That page must be frozen for per cpu allocations to work.
2321 */ 2321 */
2322 VM_BUG_ON(!c->page->frozen); 2322 VM_BUG_ON_PAGE(!c->page->frozen, c->page);
2323 c->freelist = get_freepointer(s, freelist); 2323 c->freelist = get_freepointer(s, freelist);
2324 c->tid = next_tid(c->tid); 2324 c->tid = next_tid(c->tid);
2325 local_irq_restore(flags); 2325 local_irq_restore(flags);
diff --git a/mm/swap.c b/mm/swap.c
index d1100b619e61..b31ba67d440a 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -57,7 +57,7 @@ static void __page_cache_release(struct page *page)
57 57
58 spin_lock_irqsave(&zone->lru_lock, flags); 58 spin_lock_irqsave(&zone->lru_lock, flags);
59 lruvec = mem_cgroup_page_lruvec(page, zone); 59 lruvec = mem_cgroup_page_lruvec(page, zone);
60 VM_BUG_ON(!PageLRU(page)); 60 VM_BUG_ON_PAGE(!PageLRU(page), page);
61 __ClearPageLRU(page); 61 __ClearPageLRU(page);
62 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 62 del_page_from_lru_list(page, lruvec, page_off_lru(page));
63 spin_unlock_irqrestore(&zone->lru_lock, flags); 63 spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -130,8 +130,8 @@ static void put_compound_page(struct page *page)
130 * __split_huge_page_refcount cannot race 130 * __split_huge_page_refcount cannot race
131 * here. 131 * here.
132 */ 132 */
133 VM_BUG_ON(!PageHead(page_head)); 133 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
134 VM_BUG_ON(page_mapcount(page) != 0); 134 VM_BUG_ON_PAGE(page_mapcount(page) != 0, page);
135 if (put_page_testzero(page_head)) { 135 if (put_page_testzero(page_head)) {
136 /* 136 /*
137 * If this is the tail of a slab 137 * If this is the tail of a slab
@@ -148,7 +148,7 @@ static void put_compound_page(struct page *page)
148 * the compound page enters the buddy 148 * the compound page enters the buddy
149 * allocator. 149 * allocator.
150 */ 150 */
151 VM_BUG_ON(PageSlab(page_head)); 151 VM_BUG_ON_PAGE(PageSlab(page_head), page_head);
152 __put_compound_page(page_head); 152 __put_compound_page(page_head);
153 } 153 }
154 return; 154 return;
@@ -199,7 +199,7 @@ out_put_single:
199 __put_single_page(page); 199 __put_single_page(page);
200 return; 200 return;
201 } 201 }
202 VM_BUG_ON(page_head != page->first_page); 202 VM_BUG_ON_PAGE(page_head != page->first_page, page);
203 /* 203 /*
204 * We can release the refcount taken by 204 * We can release the refcount taken by
205 * get_page_unless_zero() now that 205 * get_page_unless_zero() now that
@@ -207,12 +207,12 @@ out_put_single:
207 * compound_lock. 207 * compound_lock.
208 */ 208 */
209 if (put_page_testzero(page_head)) 209 if (put_page_testzero(page_head))
210 VM_BUG_ON(1); 210 VM_BUG_ON_PAGE(1, page_head);
211 /* __split_huge_page_refcount will wait now */ 211 /* __split_huge_page_refcount will wait now */
212 VM_BUG_ON(page_mapcount(page) <= 0); 212 VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page);
213 atomic_dec(&page->_mapcount); 213 atomic_dec(&page->_mapcount);
214 VM_BUG_ON(atomic_read(&page_head->_count) <= 0); 214 VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head);
215 VM_BUG_ON(atomic_read(&page->_count) != 0); 215 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
216 compound_unlock_irqrestore(page_head, flags); 216 compound_unlock_irqrestore(page_head, flags);
217 217
218 if (put_page_testzero(page_head)) { 218 if (put_page_testzero(page_head)) {
@@ -223,7 +223,7 @@ out_put_single:
223 } 223 }
224 } else { 224 } else {
225 /* page_head is a dangling pointer */ 225 /* page_head is a dangling pointer */
226 VM_BUG_ON(PageTail(page)); 226 VM_BUG_ON_PAGE(PageTail(page), page);
227 goto out_put_single; 227 goto out_put_single;
228 } 228 }
229} 229}
@@ -264,7 +264,7 @@ bool __get_page_tail(struct page *page)
264 * page. __split_huge_page_refcount 264 * page. __split_huge_page_refcount
265 * cannot race here. 265 * cannot race here.
266 */ 266 */
267 VM_BUG_ON(!PageHead(page_head)); 267 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
268 __get_page_tail_foll(page, true); 268 __get_page_tail_foll(page, true);
269 return true; 269 return true;
270 } else { 270 } else {
@@ -604,8 +604,8 @@ EXPORT_SYMBOL(__lru_cache_add);
604 */ 604 */
605void lru_cache_add(struct page *page) 605void lru_cache_add(struct page *page)
606{ 606{
607 VM_BUG_ON(PageActive(page) && PageUnevictable(page)); 607 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
608 VM_BUG_ON(PageLRU(page)); 608 VM_BUG_ON_PAGE(PageLRU(page), page);
609 __lru_cache_add(page); 609 __lru_cache_add(page);
610} 610}
611 611
@@ -846,7 +846,7 @@ void release_pages(struct page **pages, int nr, int cold)
846 } 846 }
847 847
848 lruvec = mem_cgroup_page_lruvec(page, zone); 848 lruvec = mem_cgroup_page_lruvec(page, zone);
849 VM_BUG_ON(!PageLRU(page)); 849 VM_BUG_ON_PAGE(!PageLRU(page), page);
850 __ClearPageLRU(page); 850 __ClearPageLRU(page);
851 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 851 del_page_from_lru_list(page, lruvec, page_off_lru(page));
852 } 852 }
@@ -888,9 +888,9 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
888{ 888{
889 const int file = 0; 889 const int file = 0;
890 890
891 VM_BUG_ON(!PageHead(page)); 891 VM_BUG_ON_PAGE(!PageHead(page), page);
892 VM_BUG_ON(PageCompound(page_tail)); 892 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
893 VM_BUG_ON(PageLRU(page_tail)); 893 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
894 VM_BUG_ON(NR_CPUS != 1 && 894 VM_BUG_ON(NR_CPUS != 1 &&
895 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); 895 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
896 896
@@ -929,7 +929,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
929 int active = PageActive(page); 929 int active = PageActive(page);
930 enum lru_list lru = page_lru(page); 930 enum lru_list lru = page_lru(page);
931 931
932 VM_BUG_ON(PageLRU(page)); 932 VM_BUG_ON_PAGE(PageLRU(page), page);
933 933
934 SetPageLRU(page); 934 SetPageLRU(page);
935 add_page_to_lru_list(page, lruvec, lru); 935 add_page_to_lru_list(page, lruvec, lru);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e6f15f8ca2af..98e85e9c2b2d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -83,9 +83,9 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
83 int error; 83 int error;
84 struct address_space *address_space; 84 struct address_space *address_space;
85 85
86 VM_BUG_ON(!PageLocked(page)); 86 VM_BUG_ON_PAGE(!PageLocked(page), page);
87 VM_BUG_ON(PageSwapCache(page)); 87 VM_BUG_ON_PAGE(PageSwapCache(page), page);
88 VM_BUG_ON(!PageSwapBacked(page)); 88 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
89 89
90 page_cache_get(page); 90 page_cache_get(page);
91 SetPageSwapCache(page); 91 SetPageSwapCache(page);
@@ -139,9 +139,9 @@ void __delete_from_swap_cache(struct page *page)
139 swp_entry_t entry; 139 swp_entry_t entry;
140 struct address_space *address_space; 140 struct address_space *address_space;
141 141
142 VM_BUG_ON(!PageLocked(page)); 142 VM_BUG_ON_PAGE(!PageLocked(page), page);
143 VM_BUG_ON(!PageSwapCache(page)); 143 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
144 VM_BUG_ON(PageWriteback(page)); 144 VM_BUG_ON_PAGE(PageWriteback(page), page);
145 145
146 entry.val = page_private(page); 146 entry.val = page_private(page);
147 address_space = swap_address_space(entry); 147 address_space = swap_address_space(entry);
@@ -165,8 +165,8 @@ int add_to_swap(struct page *page, struct list_head *list)
165 swp_entry_t entry; 165 swp_entry_t entry;
166 int err; 166 int err;
167 167
168 VM_BUG_ON(!PageLocked(page)); 168 VM_BUG_ON_PAGE(!PageLocked(page), page);
169 VM_BUG_ON(!PageUptodate(page)); 169 VM_BUG_ON_PAGE(!PageUptodate(page), page);
170 170
171 entry = get_swap_page(); 171 entry = get_swap_page();
172 if (!entry.val) 172 if (!entry.val)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 612a7c9795f6..d443dea95c27 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -906,7 +906,7 @@ int reuse_swap_page(struct page *page)
906{ 906{
907 int count; 907 int count;
908 908
909 VM_BUG_ON(!PageLocked(page)); 909 VM_BUG_ON_PAGE(!PageLocked(page), page);
910 if (unlikely(PageKsm(page))) 910 if (unlikely(PageKsm(page)))
911 return 0; 911 return 0;
912 count = page_mapcount(page); 912 count = page_mapcount(page);
@@ -926,7 +926,7 @@ int reuse_swap_page(struct page *page)
926 */ 926 */
927int try_to_free_swap(struct page *page) 927int try_to_free_swap(struct page *page)
928{ 928{
929 VM_BUG_ON(!PageLocked(page)); 929 VM_BUG_ON_PAGE(!PageLocked(page), page);
930 930
931 if (!PageSwapCache(page)) 931 if (!PageSwapCache(page))
932 return 0; 932 return 0;
@@ -2714,7 +2714,7 @@ struct swap_info_struct *page_swap_info(struct page *page)
2714 */ 2714 */
2715struct address_space *__page_file_mapping(struct page *page) 2715struct address_space *__page_file_mapping(struct page *page)
2716{ 2716{
2717 VM_BUG_ON(!PageSwapCache(page)); 2717 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
2718 return page_swap_info(page)->swap_file->f_mapping; 2718 return page_swap_info(page)->swap_file->f_mapping;
2719} 2719}
2720EXPORT_SYMBOL_GPL(__page_file_mapping); 2720EXPORT_SYMBOL_GPL(__page_file_mapping);
@@ -2722,7 +2722,7 @@ EXPORT_SYMBOL_GPL(__page_file_mapping);
2722pgoff_t __page_file_index(struct page *page) 2722pgoff_t __page_file_index(struct page *page)
2723{ 2723{
2724 swp_entry_t swap = { .val = page_private(page) }; 2724 swp_entry_t swap = { .val = page_private(page) };
2725 VM_BUG_ON(!PageSwapCache(page)); 2725 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
2726 return swp_offset(swap); 2726 return swp_offset(swap);
2727} 2727}
2728EXPORT_SYMBOL_GPL(__page_file_index); 2728EXPORT_SYMBOL_GPL(__page_file_index);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eea668d9cff6..2254f36b74b8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -603,7 +603,7 @@ void putback_lru_page(struct page *page)
603 bool is_unevictable; 603 bool is_unevictable;
604 int was_unevictable = PageUnevictable(page); 604 int was_unevictable = PageUnevictable(page);
605 605
606 VM_BUG_ON(PageLRU(page)); 606 VM_BUG_ON_PAGE(PageLRU(page), page);
607 607
608redo: 608redo:
609 ClearPageUnevictable(page); 609 ClearPageUnevictable(page);
@@ -794,8 +794,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
794 if (!trylock_page(page)) 794 if (!trylock_page(page))
795 goto keep; 795 goto keep;
796 796
797 VM_BUG_ON(PageActive(page)); 797 VM_BUG_ON_PAGE(PageActive(page), page);
798 VM_BUG_ON(page_zone(page) != zone); 798 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
799 799
800 sc->nr_scanned++; 800 sc->nr_scanned++;
801 801
@@ -1079,14 +1079,14 @@ activate_locked:
1079 /* Not a candidate for swapping, so reclaim swap space. */ 1079 /* Not a candidate for swapping, so reclaim swap space. */
1080 if (PageSwapCache(page) && vm_swap_full()) 1080 if (PageSwapCache(page) && vm_swap_full())
1081 try_to_free_swap(page); 1081 try_to_free_swap(page);
1082 VM_BUG_ON(PageActive(page)); 1082 VM_BUG_ON_PAGE(PageActive(page), page);
1083 SetPageActive(page); 1083 SetPageActive(page);
1084 pgactivate++; 1084 pgactivate++;
1085keep_locked: 1085keep_locked:
1086 unlock_page(page); 1086 unlock_page(page);
1087keep: 1087keep:
1088 list_add(&page->lru, &ret_pages); 1088 list_add(&page->lru, &ret_pages);
1089 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 1089 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1090 } 1090 }
1091 1091
1092 free_hot_cold_page_list(&free_pages, 1); 1092 free_hot_cold_page_list(&free_pages, 1);
@@ -1240,7 +1240,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1240 page = lru_to_page(src); 1240 page = lru_to_page(src);
1241 prefetchw_prev_lru_page(page, src, flags); 1241 prefetchw_prev_lru_page(page, src, flags);
1242 1242
1243 VM_BUG_ON(!PageLRU(page)); 1243 VM_BUG_ON_PAGE(!PageLRU(page), page);
1244 1244
1245 switch (__isolate_lru_page(page, mode)) { 1245 switch (__isolate_lru_page(page, mode)) {
1246 case 0: 1246 case 0:
@@ -1295,7 +1295,7 @@ int isolate_lru_page(struct page *page)
1295{ 1295{
1296 int ret = -EBUSY; 1296 int ret = -EBUSY;
1297 1297
1298 VM_BUG_ON(!page_count(page)); 1298 VM_BUG_ON_PAGE(!page_count(page), page);
1299 1299
1300 if (PageLRU(page)) { 1300 if (PageLRU(page)) {
1301 struct zone *zone = page_zone(page); 1301 struct zone *zone = page_zone(page);
@@ -1366,7 +1366,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1366 struct page *page = lru_to_page(page_list); 1366 struct page *page = lru_to_page(page_list);
1367 int lru; 1367 int lru;
1368 1368
1369 VM_BUG_ON(PageLRU(page)); 1369 VM_BUG_ON_PAGE(PageLRU(page), page);
1370 list_del(&page->lru); 1370 list_del(&page->lru);
1371 if (unlikely(!page_evictable(page))) { 1371 if (unlikely(!page_evictable(page))) {
1372 spin_unlock_irq(&zone->lru_lock); 1372 spin_unlock_irq(&zone->lru_lock);
@@ -1586,7 +1586,7 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
1586 page = lru_to_page(list); 1586 page = lru_to_page(list);
1587 lruvec = mem_cgroup_page_lruvec(page, zone); 1587 lruvec = mem_cgroup_page_lruvec(page, zone);
1588 1588
1589 VM_BUG_ON(PageLRU(page)); 1589 VM_BUG_ON_PAGE(PageLRU(page), page);
1590 SetPageLRU(page); 1590 SetPageLRU(page);
1591 1591
1592 nr_pages = hpage_nr_pages(page); 1592 nr_pages = hpage_nr_pages(page);
@@ -3701,7 +3701,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
3701 if (page_evictable(page)) { 3701 if (page_evictable(page)) {
3702 enum lru_list lru = page_lru_base_type(page); 3702 enum lru_list lru = page_lru_base_type(page);
3703 3703
3704 VM_BUG_ON(PageActive(page)); 3704 VM_BUG_ON_PAGE(PageActive(page), page);
3705 ClearPageUnevictable(page); 3705 ClearPageUnevictable(page);
3706 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); 3706 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3707 add_page_to_lru_list(page, lruvec, lru); 3707 add_page_to_lru_list(page, lruvec, lru);