aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/mm/swap.c b/mm/swap.c
index d1100b619e61..b31ba67d440a 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -57,7 +57,7 @@ static void __page_cache_release(struct page *page)
57 57
58 spin_lock_irqsave(&zone->lru_lock, flags); 58 spin_lock_irqsave(&zone->lru_lock, flags);
59 lruvec = mem_cgroup_page_lruvec(page, zone); 59 lruvec = mem_cgroup_page_lruvec(page, zone);
60 VM_BUG_ON(!PageLRU(page)); 60 VM_BUG_ON_PAGE(!PageLRU(page), page);
61 __ClearPageLRU(page); 61 __ClearPageLRU(page);
62 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 62 del_page_from_lru_list(page, lruvec, page_off_lru(page));
63 spin_unlock_irqrestore(&zone->lru_lock, flags); 63 spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -130,8 +130,8 @@ static void put_compound_page(struct page *page)
130 * __split_huge_page_refcount cannot race 130 * __split_huge_page_refcount cannot race
131 * here. 131 * here.
132 */ 132 */
133 VM_BUG_ON(!PageHead(page_head)); 133 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
134 VM_BUG_ON(page_mapcount(page) != 0); 134 VM_BUG_ON_PAGE(page_mapcount(page) != 0, page);
135 if (put_page_testzero(page_head)) { 135 if (put_page_testzero(page_head)) {
136 /* 136 /*
137 * If this is the tail of a slab 137 * If this is the tail of a slab
@@ -148,7 +148,7 @@ static void put_compound_page(struct page *page)
148 * the compound page enters the buddy 148 * the compound page enters the buddy
149 * allocator. 149 * allocator.
150 */ 150 */
151 VM_BUG_ON(PageSlab(page_head)); 151 VM_BUG_ON_PAGE(PageSlab(page_head), page_head);
152 __put_compound_page(page_head); 152 __put_compound_page(page_head);
153 } 153 }
154 return; 154 return;
@@ -199,7 +199,7 @@ out_put_single:
199 __put_single_page(page); 199 __put_single_page(page);
200 return; 200 return;
201 } 201 }
202 VM_BUG_ON(page_head != page->first_page); 202 VM_BUG_ON_PAGE(page_head != page->first_page, page);
203 /* 203 /*
204 * We can release the refcount taken by 204 * We can release the refcount taken by
205 * get_page_unless_zero() now that 205 * get_page_unless_zero() now that
@@ -207,12 +207,12 @@ out_put_single:
207 * compound_lock. 207 * compound_lock.
208 */ 208 */
209 if (put_page_testzero(page_head)) 209 if (put_page_testzero(page_head))
210 VM_BUG_ON(1); 210 VM_BUG_ON_PAGE(1, page_head);
211 /* __split_huge_page_refcount will wait now */ 211 /* __split_huge_page_refcount will wait now */
212 VM_BUG_ON(page_mapcount(page) <= 0); 212 VM_BUG_ON_PAGE(page_mapcount(page) <= 0, page);
213 atomic_dec(&page->_mapcount); 213 atomic_dec(&page->_mapcount);
214 VM_BUG_ON(atomic_read(&page_head->_count) <= 0); 214 VM_BUG_ON_PAGE(atomic_read(&page_head->_count) <= 0, page_head);
215 VM_BUG_ON(atomic_read(&page->_count) != 0); 215 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
216 compound_unlock_irqrestore(page_head, flags); 216 compound_unlock_irqrestore(page_head, flags);
217 217
218 if (put_page_testzero(page_head)) { 218 if (put_page_testzero(page_head)) {
@@ -223,7 +223,7 @@ out_put_single:
223 } 223 }
224 } else { 224 } else {
225 /* page_head is a dangling pointer */ 225 /* page_head is a dangling pointer */
226 VM_BUG_ON(PageTail(page)); 226 VM_BUG_ON_PAGE(PageTail(page), page);
227 goto out_put_single; 227 goto out_put_single;
228 } 228 }
229} 229}
@@ -264,7 +264,7 @@ bool __get_page_tail(struct page *page)
264 * page. __split_huge_page_refcount 264 * page. __split_huge_page_refcount
265 * cannot race here. 265 * cannot race here.
266 */ 266 */
267 VM_BUG_ON(!PageHead(page_head)); 267 VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
268 __get_page_tail_foll(page, true); 268 __get_page_tail_foll(page, true);
269 return true; 269 return true;
270 } else { 270 } else {
@@ -604,8 +604,8 @@ EXPORT_SYMBOL(__lru_cache_add);
604 */ 604 */
605void lru_cache_add(struct page *page) 605void lru_cache_add(struct page *page)
606{ 606{
607 VM_BUG_ON(PageActive(page) && PageUnevictable(page)); 607 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
608 VM_BUG_ON(PageLRU(page)); 608 VM_BUG_ON_PAGE(PageLRU(page), page);
609 __lru_cache_add(page); 609 __lru_cache_add(page);
610} 610}
611 611
@@ -846,7 +846,7 @@ void release_pages(struct page **pages, int nr, int cold)
846 } 846 }
847 847
848 lruvec = mem_cgroup_page_lruvec(page, zone); 848 lruvec = mem_cgroup_page_lruvec(page, zone);
849 VM_BUG_ON(!PageLRU(page)); 849 VM_BUG_ON_PAGE(!PageLRU(page), page);
850 __ClearPageLRU(page); 850 __ClearPageLRU(page);
851 del_page_from_lru_list(page, lruvec, page_off_lru(page)); 851 del_page_from_lru_list(page, lruvec, page_off_lru(page));
852 } 852 }
@@ -888,9 +888,9 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
888{ 888{
889 const int file = 0; 889 const int file = 0;
890 890
891 VM_BUG_ON(!PageHead(page)); 891 VM_BUG_ON_PAGE(!PageHead(page), page);
892 VM_BUG_ON(PageCompound(page_tail)); 892 VM_BUG_ON_PAGE(PageCompound(page_tail), page);
893 VM_BUG_ON(PageLRU(page_tail)); 893 VM_BUG_ON_PAGE(PageLRU(page_tail), page);
894 VM_BUG_ON(NR_CPUS != 1 && 894 VM_BUG_ON(NR_CPUS != 1 &&
895 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); 895 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
896 896
@@ -929,7 +929,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
929 int active = PageActive(page); 929 int active = PageActive(page);
930 enum lru_list lru = page_lru(page); 930 enum lru_list lru = page_lru(page);
931 931
932 VM_BUG_ON(PageLRU(page)); 932 VM_BUG_ON_PAGE(PageLRU(page), page);
933 933
934 SetPageLRU(page); 934 SetPageLRU(page);
935 add_page_to_lru_list(page, lruvec, lru); 935 add_page_to_lru_list(page, lruvec, lru);