aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swapfile.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2009-01-06 17:39:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:59:02 -0500
commit51726b1222863852c46ca21ed0115b85d1edfd89 (patch)
treefb9d4de47a1ee860003bc4aa7c46651a77ae7b83 /mm/swapfile.c
parent6d91add09f4bad5f4d4233b13faa392f0c4b16be (diff)
mm: replace some BUG_ONs by VM_BUG_ONs
The swap code is over-provisioned with BUG_ONs on assorted page flags, mostly dating back to 2.3. They're good documentation, and guard against developer error, but a waste of space on most systems: change them to VM_BUG_ONs, conditional on CONFIG_DEBUG_VM. Just delete the PagePrivate ones: they're later, from 2.5.69, but even less interesting now. Signed-off-by: Hugh Dickins <hugh@veritas.com> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swapfile.c')
-rw-r--r--mm/swapfile.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 54a9f87e5162..214e90b94946 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -333,7 +333,7 @@ int can_share_swap_page(struct page *page)
333{ 333{
334 int count; 334 int count;
335 335
336 BUG_ON(!PageLocked(page)); 336 VM_BUG_ON(!PageLocked(page));
337 count = page_mapcount(page); 337 count = page_mapcount(page);
338 if (count <= 1 && PageSwapCache(page)) 338 if (count <= 1 && PageSwapCache(page))
339 count += page_swapcount(page); 339 count += page_swapcount(page);
@@ -350,8 +350,7 @@ static int remove_exclusive_swap_page_count(struct page *page, int count)
350 struct swap_info_struct * p; 350 struct swap_info_struct * p;
351 swp_entry_t entry; 351 swp_entry_t entry;
352 352
353 BUG_ON(PagePrivate(page)); 353 VM_BUG_ON(!PageLocked(page));
354 BUG_ON(!PageLocked(page));
355 354
356 if (!PageSwapCache(page)) 355 if (!PageSwapCache(page))
357 return 0; 356 return 0;
@@ -432,7 +431,6 @@ void free_swap_and_cache(swp_entry_t entry)
432 if (page) { 431 if (page) {
433 int one_user; 432 int one_user;
434 433
435 BUG_ON(PagePrivate(page));
436 one_user = (page_count(page) == 2); 434 one_user = (page_count(page) == 2);
437 /* Only cache user (+us), or swap space full? Free it! */ 435 /* Only cache user (+us), or swap space full? Free it! */
438 /* Also recheck PageSwapCache after page is locked (above) */ 436 /* Also recheck PageSwapCache after page is locked (above) */
@@ -1209,7 +1207,7 @@ int page_queue_congested(struct page *page)
1209{ 1207{
1210 struct backing_dev_info *bdi; 1208 struct backing_dev_info *bdi;
1211 1209
1212 BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ 1210 VM_BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */
1213 1211
1214 if (PageSwapCache(page)) { 1212 if (PageSwapCache(page)) {
1215 swp_entry_t entry = { .val = page_private(page) }; 1213 swp_entry_t entry = { .val = page_private(page) };