diff options
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 69687ab4277f..ee6732e1d590 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -418,7 +418,6 @@ int replicate_page_move_mapping(struct address_space *mapping, | |||
418 | 418 | ||
419 | TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page)); | 419 | TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page)); |
420 | 420 | ||
421 | expected_count++; | ||
422 | if (page_count(page) != expected_count || | 421 | if (page_count(page) != expected_count || |
423 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { | 422 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { |
424 | spin_unlock_irq(&mapping->tree_lock); | 423 | spin_unlock_irq(&mapping->tree_lock); |
@@ -464,9 +463,8 @@ int replicate_page_move_mapping(struct address_space *mapping, | |||
464 | * to one less reference. | 463 | * to one less reference. |
465 | * We know this isn't the last reference. | 464 | * We know this isn't the last reference. |
466 | */ | 465 | */ |
467 | //page_unfreeze_refs(page, expected_count - 1); | 466 | page_unfreeze_refs(page, expected_count - 1); |
468 | page_unfreeze_refs(page, expected_count - 2); | 467 | |
469 | |||
470 | /* | 468 | /* |
471 | * If moved to a different zone then also account | 469 | * If moved to a different zone then also account |
472 | * the page for that zone. Other VM counters will be | 470 | * the page for that zone. Other VM counters will be |
@@ -1184,6 +1182,8 @@ static int __unmap_and_copy(struct page *page, struct page *newpage, | |||
1184 | * the retry loop is too short and in the sync-light case, | 1182 | * the retry loop is too short and in the sync-light case, |
1185 | * the overhead of stalling is too much | 1183 | * the overhead of stalling is too much |
1186 | */ | 1184 | */ |
1185 | BUG(); | ||
1186 | /* | ||
1187 | if (mode != MIGRATE_SYNC) { | 1187 | if (mode != MIGRATE_SYNC) { |
1188 | rc = -EBUSY; | 1188 | rc = -EBUSY; |
1189 | goto out_unlock; | 1189 | goto out_unlock; |
@@ -1191,6 +1191,7 @@ static int __unmap_and_copy(struct page *page, struct page *newpage, | |||
1191 | if (!force) | 1191 | if (!force) |
1192 | goto out_unlock; | 1192 | goto out_unlock; |
1193 | wait_on_page_writeback(page); | 1193 | wait_on_page_writeback(page); |
1194 | */ | ||
1194 | } | 1195 | } |
1195 | /* | 1196 | /* |
1196 | * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, | 1197 | * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, |
@@ -1683,7 +1684,8 @@ int replicate_pages(struct list_head *from, new_page_t get_new_page, | |||
1683 | 1684 | ||
1684 | list_for_each_entry_safe(page, page2, from, lru) { | 1685 | list_for_each_entry_safe(page, page2, from, lru) { |
1685 | cond_resched(); | 1686 | cond_resched(); |
1686 | 1687 | ||
1688 | TRACE_TASK(current, "PageAnon=%d\n", PageAnon(page)); | ||
1687 | rc = unmap_and_copy(get_new_page, put_new_page, private, page, pass > 2, mode); | 1689 | rc = unmap_and_copy(get_new_page, put_new_page, private, page, pass > 2, mode); |
1688 | TRACE_TASK(current, "rc = %d\n", rc); | 1690 | TRACE_TASK(current, "rc = %d\n", rc); |
1689 | 1691 | ||