summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2018-12-28 03:39:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 15:11:51 -0500
commitcc4f11e69fd00c61c38619759b07d00631bda5ca (patch)
tree24eca7a3290201f5534870d26c70dbd5f1f28020 /mm
parent0b3901b38d9d916f634e903ce7cd2a8ddd5b1559 (diff)
mm: migrate: lock buffers before migrate_page_move_mapping()
Lock buffers before calling into migrate_page_move_mapping() so that that function doesn't have to know about buffers (which is somewhat unexpected anyway) and all the buffer head logic is in buffer_migrate_page(). Link: http://lkml.kernel.org/r/20181211172143.7358-3-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c39
1 files changed, 13 insertions, 26 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 94c9ebf1f33e..e0bc03e15e74 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -487,20 +487,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
487 } 487 }
488 488
489 /* 489 /*
490 * In the async migration case of moving a page with buffers, lock the
491 * buffers using trylock before the mapping is moved. If the mapping
492 * was moved, we later failed to lock the buffers and could not move
493 * the mapping back due to an elevated page count, we would have to
494 * block waiting on other references to be dropped.
495 */
496 if (mode == MIGRATE_ASYNC && head &&
497 !buffer_migrate_lock_buffers(head, mode)) {
498 page_ref_unfreeze(page, expected_count);
499 xas_unlock_irq(&xas);
500 return -EAGAIN;
501 }
502
503 /*
504 * Now we know that no one else is looking at the page: 490 * Now we know that no one else is looking at the page:
505 * no turning back from here. 491 * no turning back from here.
506 */ 492 */
@@ -775,24 +761,23 @@ int buffer_migrate_page(struct address_space *mapping,
775{ 761{
776 struct buffer_head *bh, *head; 762 struct buffer_head *bh, *head;
777 int rc; 763 int rc;
764 int expected_count;
778 765
779 if (!page_has_buffers(page)) 766 if (!page_has_buffers(page))
780 return migrate_page(mapping, newpage, page, mode); 767 return migrate_page(mapping, newpage, page, mode);
781 768
782 head = page_buffers(page); 769 /* Check whether page does not have extra refs before we do more work */
770 expected_count = expected_page_refs(page);
771 if (page_count(page) != expected_count)
772 return -EAGAIN;
783 773
784 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); 774 head = page_buffers(page);
775 if (!buffer_migrate_lock_buffers(head, mode))
776 return -EAGAIN;
785 777
778 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
786 if (rc != MIGRATEPAGE_SUCCESS) 779 if (rc != MIGRATEPAGE_SUCCESS)
787 return rc; 780 goto unlock_buffers;
788
789 /*
790 * In the async case, migrate_page_move_mapping locked the buffers
791 * with an IRQ-safe spinlock held. In the sync case, the buffers
792 * need to be locked now
793 */
794 if (mode != MIGRATE_ASYNC)
795 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
796 781
797 ClearPagePrivate(page); 782 ClearPagePrivate(page);
798 set_page_private(newpage, page_private(page)); 783 set_page_private(newpage, page_private(page));
@@ -814,6 +799,8 @@ int buffer_migrate_page(struct address_space *mapping,
814 else 799 else
815 migrate_page_states(newpage, page); 800 migrate_page_states(newpage, page);
816 801
802 rc = MIGRATEPAGE_SUCCESS;
803unlock_buffers:
817 bh = head; 804 bh = head;
818 do { 805 do {
819 unlock_buffer(bh); 806 unlock_buffer(bh);
@@ -822,7 +809,7 @@ int buffer_migrate_page(struct address_space *mapping,
822 809
823 } while (bh != head); 810 } while (bh != head);
824 811
825 return MIGRATEPAGE_SUCCESS; 812 return rc;
826} 813}
827EXPORT_SYMBOL(buffer_migrate_page); 814EXPORT_SYMBOL(buffer_migrate_page);
828#endif 815#endif