aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h10
-rw-r--r--mm/internal.h4
-rw-r--r--mm/page_alloc.c23
-rw-r--r--mm/swap.c12
-rw-r--r--mm/vmscan.c16
5 files changed, 36 insertions, 29 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 224178a000d2..7d20b25c58fc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -278,6 +278,12 @@ struct page {
278 */ 278 */
279#include <linux/page-flags.h> 279#include <linux/page-flags.h>
280 280
281#ifdef CONFIG_DEBUG_VM
282#define VM_BUG_ON(cond) BUG_ON(cond)
283#else
284#define VM_BUG_ON(condition) do { } while(0)
285#endif
286
281/* 287/*
282 * Methods to modify the page usage count. 288 * Methods to modify the page usage count.
283 * 289 *
@@ -297,7 +303,7 @@ struct page {
297 */ 303 */
298static inline int put_page_testzero(struct page *page) 304static inline int put_page_testzero(struct page *page)
299{ 305{
300 BUG_ON(atomic_read(&page->_count) == 0); 306 VM_BUG_ON(atomic_read(&page->_count) == 0);
301 return atomic_dec_and_test(&page->_count); 307 return atomic_dec_and_test(&page->_count);
302} 308}
303 309
@@ -307,6 +313,7 @@ static inline int put_page_testzero(struct page *page)
307 */ 313 */
308static inline int get_page_unless_zero(struct page *page) 314static inline int get_page_unless_zero(struct page *page)
309{ 315{
316 VM_BUG_ON(PageCompound(page));
310 return atomic_inc_not_zero(&page->_count); 317 return atomic_inc_not_zero(&page->_count);
311} 318}
312 319
@@ -323,6 +330,7 @@ static inline void get_page(struct page *page)
323{ 330{
324 if (unlikely(PageCompound(page))) 331 if (unlikely(PageCompound(page)))
325 page = (struct page *)page_private(page); 332 page = (struct page *)page_private(page);
333 VM_BUG_ON(atomic_read(&page->_count) == 0);
326 atomic_inc(&page->_count); 334 atomic_inc(&page->_count);
327} 335}
328 336
diff --git a/mm/internal.h b/mm/internal.h
index d20e3cc4aef0..d527b80b292f 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -24,8 +24,8 @@ static inline void set_page_count(struct page *page, int v)
24 */ 24 */
25static inline void set_page_refcounted(struct page *page) 25static inline void set_page_refcounted(struct page *page)
26{ 26{
27 BUG_ON(PageCompound(page) && page_private(page) != (unsigned long)page); 27 VM_BUG_ON(PageCompound(page) && page_private(page) != (unsigned long)page);
28 BUG_ON(atomic_read(&page->_count)); 28 VM_BUG_ON(atomic_read(&page->_count));
29 set_page_count(page, 1); 29 set_page_count(page, 1);
30} 30}
31 31
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8a52ba9fe693..4b33878e9488 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -127,7 +127,6 @@ static int bad_range(struct zone *zone, struct page *page)
127 127
128 return 0; 128 return 0;
129} 129}
130
131#else 130#else
132static inline int bad_range(struct zone *zone, struct page *page) 131static inline int bad_range(struct zone *zone, struct page *page)
133{ 132{
@@ -218,12 +217,12 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
218{ 217{
219 int i; 218 int i;
220 219
221 BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); 220 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
222 /* 221 /*
223 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 222 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
224 * and __GFP_HIGHMEM from hard or soft interrupt context. 223 * and __GFP_HIGHMEM from hard or soft interrupt context.
225 */ 224 */
226 BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 225 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
227 for (i = 0; i < (1 << order); i++) 226 for (i = 0; i < (1 << order); i++)
228 clear_highpage(page + i); 227 clear_highpage(page + i);
229} 228}
@@ -347,8 +346,8 @@ static inline void __free_one_page(struct page *page,
347 346
348 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 347 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
349 348
350 BUG_ON(page_idx & (order_size - 1)); 349 VM_BUG_ON(page_idx & (order_size - 1));
351 BUG_ON(bad_range(zone, page)); 350 VM_BUG_ON(bad_range(zone, page));
352 351
353 zone->free_pages += order_size; 352 zone->free_pages += order_size;
354 while (order < MAX_ORDER-1) { 353 while (order < MAX_ORDER-1) {
@@ -421,7 +420,7 @@ static void free_pages_bulk(struct zone *zone, int count,
421 while (count--) { 420 while (count--) {
422 struct page *page; 421 struct page *page;
423 422
424 BUG_ON(list_empty(list)); 423 VM_BUG_ON(list_empty(list));
425 page = list_entry(list->prev, struct page, lru); 424 page = list_entry(list->prev, struct page, lru);
426 /* have to delete it as __free_one_page list manipulates */ 425 /* have to delete it as __free_one_page list manipulates */
427 list_del(&page->lru); 426 list_del(&page->lru);
@@ -512,7 +511,7 @@ static inline void expand(struct zone *zone, struct page *page,
512 area--; 511 area--;
513 high--; 512 high--;
514 size >>= 1; 513 size >>= 1;
515 BUG_ON(bad_range(zone, &page[size])); 514 VM_BUG_ON(bad_range(zone, &page[size]));
516 list_add(&page[size].lru, &area->free_list); 515 list_add(&page[size].lru, &area->free_list);
517 area->nr_free++; 516 area->nr_free++;
518 set_page_order(&page[size], high); 517 set_page_order(&page[size], high);
@@ -761,8 +760,8 @@ void split_page(struct page *page, unsigned int order)
761{ 760{
762 int i; 761 int i;
763 762
764 BUG_ON(PageCompound(page)); 763 VM_BUG_ON(PageCompound(page));
765 BUG_ON(!page_count(page)); 764 VM_BUG_ON(!page_count(page));
766 for (i = 1; i < (1 << order); i++) 765 for (i = 1; i < (1 << order); i++)
767 set_page_refcounted(page + i); 766 set_page_refcounted(page + i);
768} 767}
@@ -809,7 +808,7 @@ again:
809 local_irq_restore(flags); 808 local_irq_restore(flags);
810 put_cpu(); 809 put_cpu();
811 810
812 BUG_ON(bad_range(zone, page)); 811 VM_BUG_ON(bad_range(zone, page));
813 if (prep_new_page(page, order, gfp_flags)) 812 if (prep_new_page(page, order, gfp_flags))
814 goto again; 813 goto again;
815 return page; 814 return page;
@@ -1083,7 +1082,7 @@ fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
1083 * get_zeroed_page() returns a 32-bit address, which cannot represent 1082 * get_zeroed_page() returns a 32-bit address, which cannot represent
1084 * a highmem page 1083 * a highmem page
1085 */ 1084 */
1086 BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1085 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1087 1086
1088 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1087 page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1089 if (page) 1088 if (page)
@@ -1116,7 +1115,7 @@ EXPORT_SYMBOL(__free_pages);
1116fastcall void free_pages(unsigned long addr, unsigned int order) 1115fastcall void free_pages(unsigned long addr, unsigned int order)
1117{ 1116{
1118 if (addr != 0) { 1117 if (addr != 0) {
1119 BUG_ON(!virt_addr_valid((void *)addr)); 1118 VM_BUG_ON(!virt_addr_valid((void *)addr));
1120 __free_pages(virt_to_page((void *)addr), order); 1119 __free_pages(virt_to_page((void *)addr), order);
1121 } 1120 }
1122} 1121}
diff --git a/mm/swap.c b/mm/swap.c
index 687686a61f7c..600235e43704 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -233,7 +233,7 @@ void fastcall __page_cache_release(struct page *page)
233 struct zone *zone = page_zone(page); 233 struct zone *zone = page_zone(page);
234 234
235 spin_lock_irqsave(&zone->lru_lock, flags); 235 spin_lock_irqsave(&zone->lru_lock, flags);
236 BUG_ON(!PageLRU(page)); 236 VM_BUG_ON(!PageLRU(page));
237 __ClearPageLRU(page); 237 __ClearPageLRU(page);
238 del_page_from_lru(zone, page); 238 del_page_from_lru(zone, page);
239 spin_unlock_irqrestore(&zone->lru_lock, flags); 239 spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -284,7 +284,7 @@ void release_pages(struct page **pages, int nr, int cold)
284 zone = pagezone; 284 zone = pagezone;
285 spin_lock_irq(&zone->lru_lock); 285 spin_lock_irq(&zone->lru_lock);
286 } 286 }
287 BUG_ON(!PageLRU(page)); 287 VM_BUG_ON(!PageLRU(page));
288 __ClearPageLRU(page); 288 __ClearPageLRU(page);
289 del_page_from_lru(zone, page); 289 del_page_from_lru(zone, page);
290 } 290 }
@@ -337,7 +337,7 @@ void __pagevec_release_nonlru(struct pagevec *pvec)
337 for (i = 0; i < pagevec_count(pvec); i++) { 337 for (i = 0; i < pagevec_count(pvec); i++) {
338 struct page *page = pvec->pages[i]; 338 struct page *page = pvec->pages[i];
339 339
340 BUG_ON(PageLRU(page)); 340 VM_BUG_ON(PageLRU(page));
341 if (put_page_testzero(page)) 341 if (put_page_testzero(page))
342 pagevec_add(&pages_to_free, page); 342 pagevec_add(&pages_to_free, page);
343 } 343 }
@@ -364,7 +364,7 @@ void __pagevec_lru_add(struct pagevec *pvec)
364 zone = pagezone; 364 zone = pagezone;
365 spin_lock_irq(&zone->lru_lock); 365 spin_lock_irq(&zone->lru_lock);
366 } 366 }
367 BUG_ON(PageLRU(page)); 367 VM_BUG_ON(PageLRU(page));
368 SetPageLRU(page); 368 SetPageLRU(page);
369 add_page_to_inactive_list(zone, page); 369 add_page_to_inactive_list(zone, page);
370 } 370 }
@@ -391,9 +391,9 @@ void __pagevec_lru_add_active(struct pagevec *pvec)
391 zone = pagezone; 391 zone = pagezone;
392 spin_lock_irq(&zone->lru_lock); 392 spin_lock_irq(&zone->lru_lock);
393 } 393 }
394 BUG_ON(PageLRU(page)); 394 VM_BUG_ON(PageLRU(page));
395 SetPageLRU(page); 395 SetPageLRU(page);
396 BUG_ON(PageActive(page)); 396 VM_BUG_ON(PageActive(page));
397 SetPageActive(page); 397 SetPageActive(page);
398 add_page_to_active_list(zone, page); 398 add_page_to_active_list(zone, page);
399 } 399 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5d4c4d02254d..41a3da3d6ccc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -440,7 +440,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
440 if (TestSetPageLocked(page)) 440 if (TestSetPageLocked(page))
441 goto keep; 441 goto keep;
442 442
443 BUG_ON(PageActive(page)); 443 VM_BUG_ON(PageActive(page));
444 444
445 sc->nr_scanned++; 445 sc->nr_scanned++;
446 446
@@ -564,7 +564,7 @@ keep_locked:
564 unlock_page(page); 564 unlock_page(page);
565keep: 565keep:
566 list_add(&page->lru, &ret_pages); 566 list_add(&page->lru, &ret_pages);
567 BUG_ON(PageLRU(page)); 567 VM_BUG_ON(PageLRU(page));
568 } 568 }
569 list_splice(&ret_pages, page_list); 569 list_splice(&ret_pages, page_list);
570 if (pagevec_count(&freed_pvec)) 570 if (pagevec_count(&freed_pvec))
@@ -603,7 +603,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
603 page = lru_to_page(src); 603 page = lru_to_page(src);
604 prefetchw_prev_lru_page(page, src, flags); 604 prefetchw_prev_lru_page(page, src, flags);
605 605
606 BUG_ON(!PageLRU(page)); 606 VM_BUG_ON(!PageLRU(page));
607 607
608 list_del(&page->lru); 608 list_del(&page->lru);
609 target = src; 609 target = src;
@@ -674,7 +674,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
674 */ 674 */
675 while (!list_empty(&page_list)) { 675 while (!list_empty(&page_list)) {
676 page = lru_to_page(&page_list); 676 page = lru_to_page(&page_list);
677 BUG_ON(PageLRU(page)); 677 VM_BUG_ON(PageLRU(page));
678 SetPageLRU(page); 678 SetPageLRU(page);
679 list_del(&page->lru); 679 list_del(&page->lru);
680 if (PageActive(page)) 680 if (PageActive(page))
@@ -797,9 +797,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
797 while (!list_empty(&l_inactive)) { 797 while (!list_empty(&l_inactive)) {
798 page = lru_to_page(&l_inactive); 798 page = lru_to_page(&l_inactive);
799 prefetchw_prev_lru_page(page, &l_inactive, flags); 799 prefetchw_prev_lru_page(page, &l_inactive, flags);
800 BUG_ON(PageLRU(page)); 800 VM_BUG_ON(PageLRU(page));
801 SetPageLRU(page); 801 SetPageLRU(page);
802 BUG_ON(!PageActive(page)); 802 VM_BUG_ON(!PageActive(page));
803 ClearPageActive(page); 803 ClearPageActive(page);
804 804
805 list_move(&page->lru, &zone->inactive_list); 805 list_move(&page->lru, &zone->inactive_list);
@@ -827,9 +827,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
827 while (!list_empty(&l_active)) { 827 while (!list_empty(&l_active)) {
828 page = lru_to_page(&l_active); 828 page = lru_to_page(&l_active);
829 prefetchw_prev_lru_page(page, &l_active, flags); 829 prefetchw_prev_lru_page(page, &l_active, flags);
830 BUG_ON(PageLRU(page)); 830 VM_BUG_ON(PageLRU(page));
831 SetPageLRU(page); 831 SetPageLRU(page);
832 BUG_ON(!PageActive(page)); 832 VM_BUG_ON(!PageActive(page));
833 list_move(&page->lru, &zone->active_list); 833 list_move(&page->lru, &zone->active_list);
834 pgmoved++; 834 pgmoved++;
835 if (!pagevec_add(&pvec, page)) { 835 if (!pagevec_add(&pvec, page)) {