aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-09-26 02:30:55 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:44 -0400
commit725d704ecaca4a43f067092c140d4f3271cf2856 (patch)
tree320cf8ab5457ac6c01c05da8c30d6026538ee259 /mm/swap.c
parenta6ca1b99ed434f3fb41bbed647ed36c0420501e5 (diff)
[PATCH] mm: VM_BUG_ON
Introduce a VM_BUG_ON, which is turned on with CONFIG_DEBUG_VM. Use this in the lightweight, inline refcounting functions; PageLRU and PageActive checks in vmscan, because they're pretty well confined to vmscan. And in page allocate/free fastpaths which can be the hottest parts of the kernel for kbuilds. Unlike BUG_ON, VM_BUG_ON must not be used to execute statements with side-effects, and should not be used outside core mm code. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Cc: Christoph Lameter <clameter@engr.sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 687686a61f7c..600235e43704 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -233,7 +233,7 @@ void fastcall __page_cache_release(struct page *page)
233 struct zone *zone = page_zone(page); 233 struct zone *zone = page_zone(page);
234 234
235 spin_lock_irqsave(&zone->lru_lock, flags); 235 spin_lock_irqsave(&zone->lru_lock, flags);
236 BUG_ON(!PageLRU(page)); 236 VM_BUG_ON(!PageLRU(page));
237 __ClearPageLRU(page); 237 __ClearPageLRU(page);
238 del_page_from_lru(zone, page); 238 del_page_from_lru(zone, page);
239 spin_unlock_irqrestore(&zone->lru_lock, flags); 239 spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -284,7 +284,7 @@ void release_pages(struct page **pages, int nr, int cold)
284 zone = pagezone; 284 zone = pagezone;
285 spin_lock_irq(&zone->lru_lock); 285 spin_lock_irq(&zone->lru_lock);
286 } 286 }
287 BUG_ON(!PageLRU(page)); 287 VM_BUG_ON(!PageLRU(page));
288 __ClearPageLRU(page); 288 __ClearPageLRU(page);
289 del_page_from_lru(zone, page); 289 del_page_from_lru(zone, page);
290 } 290 }
@@ -337,7 +337,7 @@ void __pagevec_release_nonlru(struct pagevec *pvec)
337 for (i = 0; i < pagevec_count(pvec); i++) { 337 for (i = 0; i < pagevec_count(pvec); i++) {
338 struct page *page = pvec->pages[i]; 338 struct page *page = pvec->pages[i];
339 339
340 BUG_ON(PageLRU(page)); 340 VM_BUG_ON(PageLRU(page));
341 if (put_page_testzero(page)) 341 if (put_page_testzero(page))
342 pagevec_add(&pages_to_free, page); 342 pagevec_add(&pages_to_free, page);
343 } 343 }
@@ -364,7 +364,7 @@ void __pagevec_lru_add(struct pagevec *pvec)
364 zone = pagezone; 364 zone = pagezone;
365 spin_lock_irq(&zone->lru_lock); 365 spin_lock_irq(&zone->lru_lock);
366 } 366 }
367 BUG_ON(PageLRU(page)); 367 VM_BUG_ON(PageLRU(page));
368 SetPageLRU(page); 368 SetPageLRU(page);
369 add_page_to_inactive_list(zone, page); 369 add_page_to_inactive_list(zone, page);
370 } 370 }
@@ -391,9 +391,9 @@ void __pagevec_lru_add_active(struct pagevec *pvec)
391 zone = pagezone; 391 zone = pagezone;
392 spin_lock_irq(&zone->lru_lock); 392 spin_lock_irq(&zone->lru_lock);
393 } 393 }
394 BUG_ON(PageLRU(page)); 394 VM_BUG_ON(PageLRU(page));
395 SetPageLRU(page); 395 SetPageLRU(page);
396 BUG_ON(PageActive(page)); 396 VM_BUG_ON(PageActive(page));
397 SetPageActive(page); 397 SetPageActive(page);
398 add_page_to_active_list(zone, page); 398 add_page_to_active_list(zone, page);
399 } 399 }