aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c25
1 files changed, 6 insertions, 19 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index ccf8966caf6f..d4fd680be3b0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -709,7 +709,6 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
709 /* Simple case, sync compaction */ 709 /* Simple case, sync compaction */
710 if (mode != MIGRATE_ASYNC) { 710 if (mode != MIGRATE_ASYNC) {
711 do { 711 do {
712 get_bh(bh);
713 lock_buffer(bh); 712 lock_buffer(bh);
714 bh = bh->b_this_page; 713 bh = bh->b_this_page;
715 714
@@ -720,18 +719,15 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
720 719
721 /* async case, we cannot block on lock_buffer so use trylock_buffer */ 720 /* async case, we cannot block on lock_buffer so use trylock_buffer */
722 do { 721 do {
723 get_bh(bh);
724 if (!trylock_buffer(bh)) { 722 if (!trylock_buffer(bh)) {
725 /* 723 /*
726 * We failed to lock the buffer and cannot stall in 724 * We failed to lock the buffer and cannot stall in
727 * async migration. Release the taken locks 725 * async migration. Release the taken locks
728 */ 726 */
729 struct buffer_head *failed_bh = bh; 727 struct buffer_head *failed_bh = bh;
730 put_bh(failed_bh);
731 bh = head; 728 bh = head;
732 while (bh != failed_bh) { 729 while (bh != failed_bh) {
733 unlock_buffer(bh); 730 unlock_buffer(bh);
734 put_bh(bh);
735 bh = bh->b_this_page; 731 bh = bh->b_this_page;
736 } 732 }
737 return false; 733 return false;
@@ -818,7 +814,6 @@ unlock_buffers:
818 bh = head; 814 bh = head;
819 do { 815 do {
820 unlock_buffer(bh); 816 unlock_buffer(bh);
821 put_bh(bh);
822 bh = bh->b_this_page; 817 bh = bh->b_this_page;
823 818
824 } while (bh != head); 819 } while (bh != head);
@@ -1135,10 +1130,13 @@ out:
1135 * If migration is successful, decrease refcount of the newpage 1130 * If migration is successful, decrease refcount of the newpage
1136 * which will not free the page because new page owner increased 1131 * which will not free the page because new page owner increased
1137 * refcounter. As well, if it is LRU page, add the page to LRU 1132 * refcounter. As well, if it is LRU page, add the page to LRU
1138 * list in here. 1133 * list in here. Use the old state of the isolated source page to
1134 * determine if we migrated a LRU page. newpage was already unlocked
1135 * and possibly modified by its owner - don't rely on the page
1136 * state.
1139 */ 1137 */
1140 if (rc == MIGRATEPAGE_SUCCESS) { 1138 if (rc == MIGRATEPAGE_SUCCESS) {
1141 if (unlikely(__PageMovable(newpage))) 1139 if (unlikely(!is_lru))
1142 put_page(newpage); 1140 put_page(newpage);
1143 else 1141 else
1144 putback_lru_page(newpage); 1142 putback_lru_page(newpage);
@@ -1324,19 +1322,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
1324 goto put_anon; 1322 goto put_anon;
1325 1323
1326 if (page_mapped(hpage)) { 1324 if (page_mapped(hpage)) {
1327 struct address_space *mapping = page_mapping(hpage);
1328
1329 /*
1330 * try_to_unmap could potentially call huge_pmd_unshare.
1331 * Because of this, take semaphore in write mode here and
1332 * set TTU_RMAP_LOCKED to let lower levels know we have
1333 * taken the lock.
1334 */
1335 i_mmap_lock_write(mapping);
1336 try_to_unmap(hpage, 1325 try_to_unmap(hpage,
1337 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| 1326 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1338 TTU_RMAP_LOCKED);
1339 i_mmap_unlock_write(mapping);
1340 page_was_mapped = 1; 1327 page_was_mapped = 1;
1341 } 1328 }
1342 1329