diff options
-rw-r--r-- | fs/btrfs/disk-io.c | 5 | ||||
-rw-r--r-- | fs/hugetlbfs/inode.c | 2 | ||||
-rw-r--r-- | fs/nfs/internal.h | 2 | ||||
-rw-r--r-- | fs/nfs/write.c | 4 | ||||
-rw-r--r-- | include/linux/fs.h | 6 | ||||
-rw-r--r-- | include/linux/migrate.h | 23 | ||||
-rw-r--r-- | mm/compaction.c | 2 | ||||
-rw-r--r-- | mm/memory-failure.c | 2 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 2 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/migrate.c | 78 |
11 files changed, 76 insertions, 52 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 1375494c8cb6..d8525662ca7a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -872,7 +872,8 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
872 | 872 | ||
873 | #ifdef CONFIG_MIGRATION | 873 | #ifdef CONFIG_MIGRATION |
874 | static int btree_migratepage(struct address_space *mapping, | 874 | static int btree_migratepage(struct address_space *mapping, |
875 | struct page *newpage, struct page *page, bool sync) | 875 | struct page *newpage, struct page *page, |
876 | enum migrate_mode mode) | ||
876 | { | 877 | { |
877 | /* | 878 | /* |
878 | * we can't safely write a btree page from here, | 879 | * we can't safely write a btree page from here, |
@@ -887,7 +888,7 @@ static int btree_migratepage(struct address_space *mapping, | |||
887 | if (page_has_private(page) && | 888 | if (page_has_private(page) && |
888 | !try_to_release_page(page, GFP_KERNEL)) | 889 | !try_to_release_page(page, GFP_KERNEL)) |
889 | return -EAGAIN; | 890 | return -EAGAIN; |
890 | return migrate_page(mapping, newpage, page, sync); | 891 | return migrate_page(mapping, newpage, page, mode); |
891 | } | 892 | } |
892 | #endif | 893 | #endif |
893 | 894 | ||
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 06fd4608a990..1e85a7ac0217 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -584,7 +584,7 @@ static int hugetlbfs_set_page_dirty(struct page *page) | |||
584 | 584 | ||
585 | static int hugetlbfs_migrate_page(struct address_space *mapping, | 585 | static int hugetlbfs_migrate_page(struct address_space *mapping, |
586 | struct page *newpage, struct page *page, | 586 | struct page *newpage, struct page *page, |
587 | bool sync) | 587 | enum migrate_mode mode) |
588 | { | 588 | { |
589 | int rc; | 589 | int rc; |
590 | 590 | ||
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 114398a15830..8102db9b926c 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -332,7 +332,7 @@ void nfs_commit_release_pages(struct nfs_write_data *data); | |||
332 | 332 | ||
333 | #ifdef CONFIG_MIGRATION | 333 | #ifdef CONFIG_MIGRATION |
334 | extern int nfs_migrate_page(struct address_space *, | 334 | extern int nfs_migrate_page(struct address_space *, |
335 | struct page *, struct page *, bool); | 335 | struct page *, struct page *, enum migrate_mode); |
336 | #else | 336 | #else |
337 | #define nfs_migrate_page NULL | 337 | #define nfs_migrate_page NULL |
338 | #endif | 338 | #endif |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 889e98bc5a21..834f0fe96f89 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1688,7 +1688,7 @@ out_error: | |||
1688 | 1688 | ||
1689 | #ifdef CONFIG_MIGRATION | 1689 | #ifdef CONFIG_MIGRATION |
1690 | int nfs_migrate_page(struct address_space *mapping, struct page *newpage, | 1690 | int nfs_migrate_page(struct address_space *mapping, struct page *newpage, |
1691 | struct page *page, bool sync) | 1691 | struct page *page, enum migrate_mode mode) |
1692 | { | 1692 | { |
1693 | /* | 1693 | /* |
1694 | * If PagePrivate is set, then the page is currently associated with | 1694 | * If PagePrivate is set, then the page is currently associated with |
@@ -1703,7 +1703,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage, | |||
1703 | 1703 | ||
1704 | nfs_fscache_release_page(page, GFP_KERNEL); | 1704 | nfs_fscache_release_page(page, GFP_KERNEL); |
1705 | 1705 | ||
1706 | return migrate_page(mapping, newpage, page, sync); | 1706 | return migrate_page(mapping, newpage, page, mode); |
1707 | } | 1707 | } |
1708 | #endif | 1708 | #endif |
1709 | 1709 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index b92b73d0b2b9..e694bd4434a4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -525,6 +525,7 @@ enum positive_aop_returns { | |||
525 | struct page; | 525 | struct page; |
526 | struct address_space; | 526 | struct address_space; |
527 | struct writeback_control; | 527 | struct writeback_control; |
528 | enum migrate_mode; | ||
528 | 529 | ||
529 | struct iov_iter { | 530 | struct iov_iter { |
530 | const struct iovec *iov; | 531 | const struct iovec *iov; |
@@ -614,7 +615,7 @@ struct address_space_operations { | |||
614 | * is false, it must not block. | 615 | * is false, it must not block. |
615 | */ | 616 | */ |
616 | int (*migratepage) (struct address_space *, | 617 | int (*migratepage) (struct address_space *, |
617 | struct page *, struct page *, bool); | 618 | struct page *, struct page *, enum migrate_mode); |
618 | int (*launder_page) (struct page *); | 619 | int (*launder_page) (struct page *); |
619 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, | 620 | int (*is_partially_uptodate) (struct page *, read_descriptor_t *, |
620 | unsigned long); | 621 | unsigned long); |
@@ -2540,7 +2541,8 @@ extern int generic_check_addressable(unsigned, u64); | |||
2540 | 2541 | ||
2541 | #ifdef CONFIG_MIGRATION | 2542 | #ifdef CONFIG_MIGRATION |
2542 | extern int buffer_migrate_page(struct address_space *, | 2543 | extern int buffer_migrate_page(struct address_space *, |
2543 | struct page *, struct page *, bool); | 2544 | struct page *, struct page *, |
2545 | enum migrate_mode); | ||
2544 | #else | 2546 | #else |
2545 | #define buffer_migrate_page NULL | 2547 | #define buffer_migrate_page NULL |
2546 | #endif | 2548 | #endif |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 14e6d2a88475..eaf867412f7a 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -6,18 +6,31 @@ | |||
6 | 6 | ||
7 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); | 7 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
8 | 8 | ||
9 | /* | ||
10 | * MIGRATE_ASYNC means never block | ||
11 | * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking | ||
12 | * on most operations but not ->writepage as the potential stall time | ||
13 | * is too significant | ||
14 | * MIGRATE_SYNC will block when migrating pages | ||
15 | */ | ||
16 | enum migrate_mode { | ||
17 | MIGRATE_ASYNC, | ||
18 | MIGRATE_SYNC_LIGHT, | ||
19 | MIGRATE_SYNC, | ||
20 | }; | ||
21 | |||
9 | #ifdef CONFIG_MIGRATION | 22 | #ifdef CONFIG_MIGRATION |
10 | #define PAGE_MIGRATION 1 | 23 | #define PAGE_MIGRATION 1 |
11 | 24 | ||
12 | extern void putback_lru_pages(struct list_head *l); | 25 | extern void putback_lru_pages(struct list_head *l); |
13 | extern int migrate_page(struct address_space *, | 26 | extern int migrate_page(struct address_space *, |
14 | struct page *, struct page *, bool); | 27 | struct page *, struct page *, enum migrate_mode); |
15 | extern int migrate_pages(struct list_head *l, new_page_t x, | 28 | extern int migrate_pages(struct list_head *l, new_page_t x, |
16 | unsigned long private, bool offlining, | 29 | unsigned long private, bool offlining, |
17 | bool sync); | 30 | enum migrate_mode mode); |
18 | extern int migrate_huge_pages(struct list_head *l, new_page_t x, | 31 | extern int migrate_huge_pages(struct list_head *l, new_page_t x, |
19 | unsigned long private, bool offlining, | 32 | unsigned long private, bool offlining, |
20 | bool sync); | 33 | enum migrate_mode mode); |
21 | 34 | ||
22 | extern int fail_migrate_page(struct address_space *, | 35 | extern int fail_migrate_page(struct address_space *, |
23 | struct page *, struct page *); | 36 | struct page *, struct page *); |
@@ -36,10 +49,10 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, | |||
36 | static inline void putback_lru_pages(struct list_head *l) {} | 49 | static inline void putback_lru_pages(struct list_head *l) {} |
37 | static inline int migrate_pages(struct list_head *l, new_page_t x, | 50 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
38 | unsigned long private, bool offlining, | 51 | unsigned long private, bool offlining, |
39 | bool sync) { return -ENOSYS; } | 52 | enum migrate_mode mode) { return -ENOSYS; } |
40 | static inline int migrate_huge_pages(struct list_head *l, new_page_t x, | 53 | static inline int migrate_huge_pages(struct list_head *l, new_page_t x, |
41 | unsigned long private, bool offlining, | 54 | unsigned long private, bool offlining, |
42 | bool sync) { return -ENOSYS; } | 55 | enum migrate_mode mode) { return -ENOSYS; } |
43 | 56 | ||
44 | static inline int migrate_prep(void) { return -ENOSYS; } | 57 | static inline int migrate_prep(void) { return -ENOSYS; } |
45 | static inline int migrate_prep_local(void) { return -ENOSYS; } | 58 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
diff --git a/mm/compaction.c b/mm/compaction.c index fb291585e1bf..71a58f67f481 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -557,7 +557,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
557 | nr_migrate = cc->nr_migratepages; | 557 | nr_migrate = cc->nr_migratepages; |
558 | err = migrate_pages(&cc->migratepages, compaction_alloc, | 558 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
559 | (unsigned long)cc, false, | 559 | (unsigned long)cc, false, |
560 | cc->sync); | 560 | cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); |
561 | update_nr_listpages(cc); | 561 | update_nr_listpages(cc); |
562 | nr_remaining = cc->nr_migratepages; | 562 | nr_remaining = cc->nr_migratepages; |
563 | 563 | ||
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 06d3479513aa..56080ea36140 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -1557,7 +1557,7 @@ int soft_offline_page(struct page *page, int flags) | |||
1557 | page_is_file_cache(page)); | 1557 | page_is_file_cache(page)); |
1558 | list_add(&page->lru, &pagelist); | 1558 | list_add(&page->lru, &pagelist); |
1559 | ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, | 1559 | ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, |
1560 | 0, true); | 1560 | 0, MIGRATE_SYNC); |
1561 | if (ret) { | 1561 | if (ret) { |
1562 | putback_lru_pages(&pagelist); | 1562 | putback_lru_pages(&pagelist); |
1563 | pr_info("soft offline: %#lx: migration failed %d, type %lx\n", | 1563 | pr_info("soft offline: %#lx: migration failed %d, type %lx\n", |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2168489c0bc9..6629fafd6ce4 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -809,7 +809,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
809 | } | 809 | } |
810 | /* this function returns # of failed pages */ | 810 | /* this function returns # of failed pages */ |
811 | ret = migrate_pages(&source, hotremove_migrate_alloc, 0, | 811 | ret = migrate_pages(&source, hotremove_migrate_alloc, 0, |
812 | true, true); | 812 | true, MIGRATE_SYNC); |
813 | if (ret) | 813 | if (ret) |
814 | putback_lru_pages(&source); | 814 | putback_lru_pages(&source); |
815 | } | 815 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e3d58f088466..06b145fb64ab 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -942,7 +942,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, | |||
942 | 942 | ||
943 | if (!list_empty(&pagelist)) { | 943 | if (!list_empty(&pagelist)) { |
944 | err = migrate_pages(&pagelist, new_node_page, dest, | 944 | err = migrate_pages(&pagelist, new_node_page, dest, |
945 | false, true); | 945 | false, MIGRATE_SYNC); |
946 | if (err) | 946 | if (err) |
947 | putback_lru_pages(&pagelist); | 947 | putback_lru_pages(&pagelist); |
948 | } | 948 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index 4e86f3bacb85..9871a56d82c3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -218,12 +218,13 @@ out: | |||
218 | 218 | ||
219 | #ifdef CONFIG_BLOCK | 219 | #ifdef CONFIG_BLOCK |
220 | /* Returns true if all buffers are successfully locked */ | 220 | /* Returns true if all buffers are successfully locked */ |
221 | static bool buffer_migrate_lock_buffers(struct buffer_head *head, bool sync) | 221 | static bool buffer_migrate_lock_buffers(struct buffer_head *head, |
222 | enum migrate_mode mode) | ||
222 | { | 223 | { |
223 | struct buffer_head *bh = head; | 224 | struct buffer_head *bh = head; |
224 | 225 | ||
225 | /* Simple case, sync compaction */ | 226 | /* Simple case, sync compaction */ |
226 | if (sync) { | 227 | if (mode != MIGRATE_ASYNC) { |
227 | do { | 228 | do { |
228 | get_bh(bh); | 229 | get_bh(bh); |
229 | lock_buffer(bh); | 230 | lock_buffer(bh); |
@@ -259,7 +260,7 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, bool sync) | |||
259 | } | 260 | } |
260 | #else | 261 | #else |
261 | static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, | 262 | static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, |
262 | bool sync) | 263 | enum migrate_mode mode) |
263 | { | 264 | { |
264 | return true; | 265 | return true; |
265 | } | 266 | } |
@@ -275,7 +276,7 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, | |||
275 | */ | 276 | */ |
276 | static int migrate_page_move_mapping(struct address_space *mapping, | 277 | static int migrate_page_move_mapping(struct address_space *mapping, |
277 | struct page *newpage, struct page *page, | 278 | struct page *newpage, struct page *page, |
278 | struct buffer_head *head, bool sync) | 279 | struct buffer_head *head, enum migrate_mode mode) |
279 | { | 280 | { |
280 | int expected_count; | 281 | int expected_count; |
281 | void **pslot; | 282 | void **pslot; |
@@ -311,7 +312,8 @@ static int migrate_page_move_mapping(struct address_space *mapping, | |||
311 | * the mapping back due to an elevated page count, we would have to | 312 | * the mapping back due to an elevated page count, we would have to |
312 | * block waiting on other references to be dropped. | 313 | * block waiting on other references to be dropped. |
313 | */ | 314 | */ |
314 | if (!sync && head && !buffer_migrate_lock_buffers(head, sync)) { | 315 | if (mode == MIGRATE_ASYNC && head && |
316 | !buffer_migrate_lock_buffers(head, mode)) { | ||
315 | page_unfreeze_refs(page, expected_count); | 317 | page_unfreeze_refs(page, expected_count); |
316 | spin_unlock_irq(&mapping->tree_lock); | 318 | spin_unlock_irq(&mapping->tree_lock); |
317 | return -EAGAIN; | 319 | return -EAGAIN; |
@@ -472,13 +474,14 @@ EXPORT_SYMBOL(fail_migrate_page); | |||
472 | * Pages are locked upon entry and exit. | 474 | * Pages are locked upon entry and exit. |
473 | */ | 475 | */ |
474 | int migrate_page(struct address_space *mapping, | 476 | int migrate_page(struct address_space *mapping, |
475 | struct page *newpage, struct page *page, bool sync) | 477 | struct page *newpage, struct page *page, |
478 | enum migrate_mode mode) | ||
476 | { | 479 | { |
477 | int rc; | 480 | int rc; |
478 | 481 | ||
479 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ | 482 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ |
480 | 483 | ||
481 | rc = migrate_page_move_mapping(mapping, newpage, page, NULL, sync); | 484 | rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); |
482 | 485 | ||
483 | if (rc) | 486 | if (rc) |
484 | return rc; | 487 | return rc; |
@@ -495,17 +498,17 @@ EXPORT_SYMBOL(migrate_page); | |||
495 | * exist. | 498 | * exist. |
496 | */ | 499 | */ |
497 | int buffer_migrate_page(struct address_space *mapping, | 500 | int buffer_migrate_page(struct address_space *mapping, |
498 | struct page *newpage, struct page *page, bool sync) | 501 | struct page *newpage, struct page *page, enum migrate_mode mode) |
499 | { | 502 | { |
500 | struct buffer_head *bh, *head; | 503 | struct buffer_head *bh, *head; |
501 | int rc; | 504 | int rc; |
502 | 505 | ||
503 | if (!page_has_buffers(page)) | 506 | if (!page_has_buffers(page)) |
504 | return migrate_page(mapping, newpage, page, sync); | 507 | return migrate_page(mapping, newpage, page, mode); |
505 | 508 | ||
506 | head = page_buffers(page); | 509 | head = page_buffers(page); |
507 | 510 | ||
508 | rc = migrate_page_move_mapping(mapping, newpage, page, head, sync); | 511 | rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); |
509 | 512 | ||
510 | if (rc) | 513 | if (rc) |
511 | return rc; | 514 | return rc; |
@@ -515,8 +518,8 @@ int buffer_migrate_page(struct address_space *mapping, | |||
515 | * with an IRQ-safe spinlock held. In the sync case, the buffers | 518 | * with an IRQ-safe spinlock held. In the sync case, the buffers |
516 | * need to be locked now | 519 | * need to be locked now |
517 | */ | 520 | */ |
518 | if (sync) | 521 | if (mode != MIGRATE_ASYNC) |
519 | BUG_ON(!buffer_migrate_lock_buffers(head, sync)); | 522 | BUG_ON(!buffer_migrate_lock_buffers(head, mode)); |
520 | 523 | ||
521 | ClearPagePrivate(page); | 524 | ClearPagePrivate(page); |
522 | set_page_private(newpage, page_private(page)); | 525 | set_page_private(newpage, page_private(page)); |
@@ -593,10 +596,11 @@ static int writeout(struct address_space *mapping, struct page *page) | |||
593 | * Default handling if a filesystem does not provide a migration function. | 596 | * Default handling if a filesystem does not provide a migration function. |
594 | */ | 597 | */ |
595 | static int fallback_migrate_page(struct address_space *mapping, | 598 | static int fallback_migrate_page(struct address_space *mapping, |
596 | struct page *newpage, struct page *page, bool sync) | 599 | struct page *newpage, struct page *page, enum migrate_mode mode) |
597 | { | 600 | { |
598 | if (PageDirty(page)) { | 601 | if (PageDirty(page)) { |
599 | if (!sync) | 602 | /* Only writeback pages in full synchronous migration */ |
603 | if (mode != MIGRATE_SYNC) | ||
600 | return -EBUSY; | 604 | return -EBUSY; |
601 | return writeout(mapping, page); | 605 | return writeout(mapping, page); |
602 | } | 606 | } |
@@ -609,7 +613,7 @@ static int fallback_migrate_page(struct address_space *mapping, | |||
609 | !try_to_release_page(page, GFP_KERNEL)) | 613 | !try_to_release_page(page, GFP_KERNEL)) |
610 | return -EAGAIN; | 614 | return -EAGAIN; |
611 | 615 | ||
612 | return migrate_page(mapping, newpage, page, sync); | 616 | return migrate_page(mapping, newpage, page, mode); |
613 | } | 617 | } |
614 | 618 | ||
615 | /* | 619 | /* |
@@ -624,7 +628,7 @@ static int fallback_migrate_page(struct address_space *mapping, | |||
624 | * == 0 - success | 628 | * == 0 - success |
625 | */ | 629 | */ |
626 | static int move_to_new_page(struct page *newpage, struct page *page, | 630 | static int move_to_new_page(struct page *newpage, struct page *page, |
627 | int remap_swapcache, bool sync) | 631 | int remap_swapcache, enum migrate_mode mode) |
628 | { | 632 | { |
629 | struct address_space *mapping; | 633 | struct address_space *mapping; |
630 | int rc; | 634 | int rc; |
@@ -645,7 +649,7 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
645 | 649 | ||
646 | mapping = page_mapping(page); | 650 | mapping = page_mapping(page); |
647 | if (!mapping) | 651 | if (!mapping) |
648 | rc = migrate_page(mapping, newpage, page, sync); | 652 | rc = migrate_page(mapping, newpage, page, mode); |
649 | else if (mapping->a_ops->migratepage) | 653 | else if (mapping->a_ops->migratepage) |
650 | /* | 654 | /* |
651 | * Most pages have a mapping and most filesystems provide a | 655 | * Most pages have a mapping and most filesystems provide a |
@@ -654,9 +658,9 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
654 | * is the most common path for page migration. | 658 | * is the most common path for page migration. |
655 | */ | 659 | */ |
656 | rc = mapping->a_ops->migratepage(mapping, | 660 | rc = mapping->a_ops->migratepage(mapping, |
657 | newpage, page, sync); | 661 | newpage, page, mode); |
658 | else | 662 | else |
659 | rc = fallback_migrate_page(mapping, newpage, page, sync); | 663 | rc = fallback_migrate_page(mapping, newpage, page, mode); |
660 | 664 | ||
661 | if (rc) { | 665 | if (rc) { |
662 | newpage->mapping = NULL; | 666 | newpage->mapping = NULL; |
@@ -671,7 +675,7 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
671 | } | 675 | } |
672 | 676 | ||
673 | static int __unmap_and_move(struct page *page, struct page *newpage, | 677 | static int __unmap_and_move(struct page *page, struct page *newpage, |
674 | int force, bool offlining, bool sync) | 678 | int force, bool offlining, enum migrate_mode mode) |
675 | { | 679 | { |
676 | int rc = -EAGAIN; | 680 | int rc = -EAGAIN; |
677 | int remap_swapcache = 1; | 681 | int remap_swapcache = 1; |
@@ -680,7 +684,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, | |||
680 | struct anon_vma *anon_vma = NULL; | 684 | struct anon_vma *anon_vma = NULL; |
681 | 685 | ||
682 | if (!trylock_page(page)) { | 686 | if (!trylock_page(page)) { |
683 | if (!force || !sync) | 687 | if (!force || mode == MIGRATE_ASYNC) |
684 | goto out; | 688 | goto out; |
685 | 689 | ||
686 | /* | 690 | /* |
@@ -726,10 +730,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage, | |||
726 | 730 | ||
727 | if (PageWriteback(page)) { | 731 | if (PageWriteback(page)) { |
728 | /* | 732 | /* |
729 | * For !sync, there is no point retrying as the retry loop | 733 | * Only in the case of a full syncronous migration is it |
730 | * is expected to be too short for PageWriteback to be cleared | 734 | * necessary to wait for PageWriteback. In the async case, |
735 | * the retry loop is too short and in the sync-light case, | ||
736 | * the overhead of stalling is too much | ||
731 | */ | 737 | */ |
732 | if (!sync) { | 738 | if (mode != MIGRATE_SYNC) { |
733 | rc = -EBUSY; | 739 | rc = -EBUSY; |
734 | goto uncharge; | 740 | goto uncharge; |
735 | } | 741 | } |
@@ -800,7 +806,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, | |||
800 | 806 | ||
801 | skip_unmap: | 807 | skip_unmap: |
802 | if (!page_mapped(page)) | 808 | if (!page_mapped(page)) |
803 | rc = move_to_new_page(newpage, page, remap_swapcache, sync); | 809 | rc = move_to_new_page(newpage, page, remap_swapcache, mode); |
804 | 810 | ||
805 | if (rc && remap_swapcache) | 811 | if (rc && remap_swapcache) |
806 | remove_migration_ptes(page, page); | 812 | remove_migration_ptes(page, page); |
@@ -823,7 +829,8 @@ out: | |||
823 | * to the newly allocated page in newpage. | 829 | * to the newly allocated page in newpage. |
824 | */ | 830 | */ |
825 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, | 831 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, |
826 | struct page *page, int force, bool offlining, bool sync) | 832 | struct page *page, int force, bool offlining, |
833 | enum migrate_mode mode) | ||
827 | { | 834 | { |
828 | int rc = 0; | 835 | int rc = 0; |
829 | int *result = NULL; | 836 | int *result = NULL; |
@@ -843,7 +850,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
843 | if (unlikely(split_huge_page(page))) | 850 | if (unlikely(split_huge_page(page))) |
844 | goto out; | 851 | goto out; |
845 | 852 | ||
846 | rc = __unmap_and_move(page, newpage, force, offlining, sync); | 853 | rc = __unmap_and_move(page, newpage, force, offlining, mode); |
847 | out: | 854 | out: |
848 | if (rc != -EAGAIN) { | 855 | if (rc != -EAGAIN) { |
849 | /* | 856 | /* |
@@ -891,7 +898,8 @@ out: | |||
891 | */ | 898 | */ |
892 | static int unmap_and_move_huge_page(new_page_t get_new_page, | 899 | static int unmap_and_move_huge_page(new_page_t get_new_page, |
893 | unsigned long private, struct page *hpage, | 900 | unsigned long private, struct page *hpage, |
894 | int force, bool offlining, bool sync) | 901 | int force, bool offlining, |
902 | enum migrate_mode mode) | ||
895 | { | 903 | { |
896 | int rc = 0; | 904 | int rc = 0; |
897 | int *result = NULL; | 905 | int *result = NULL; |
@@ -904,7 +912,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, | |||
904 | rc = -EAGAIN; | 912 | rc = -EAGAIN; |
905 | 913 | ||
906 | if (!trylock_page(hpage)) { | 914 | if (!trylock_page(hpage)) { |
907 | if (!force || !sync) | 915 | if (!force || mode != MIGRATE_SYNC) |
908 | goto out; | 916 | goto out; |
909 | lock_page(hpage); | 917 | lock_page(hpage); |
910 | } | 918 | } |
@@ -915,7 +923,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, | |||
915 | try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | 923 | try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); |
916 | 924 | ||
917 | if (!page_mapped(hpage)) | 925 | if (!page_mapped(hpage)) |
918 | rc = move_to_new_page(new_hpage, hpage, 1, sync); | 926 | rc = move_to_new_page(new_hpage, hpage, 1, mode); |
919 | 927 | ||
920 | if (rc) | 928 | if (rc) |
921 | remove_migration_ptes(hpage, hpage); | 929 | remove_migration_ptes(hpage, hpage); |
@@ -958,7 +966,7 @@ out: | |||
958 | */ | 966 | */ |
959 | int migrate_pages(struct list_head *from, | 967 | int migrate_pages(struct list_head *from, |
960 | new_page_t get_new_page, unsigned long private, bool offlining, | 968 | new_page_t get_new_page, unsigned long private, bool offlining, |
961 | bool sync) | 969 | enum migrate_mode mode) |
962 | { | 970 | { |
963 | int retry = 1; | 971 | int retry = 1; |
964 | int nr_failed = 0; | 972 | int nr_failed = 0; |
@@ -979,7 +987,7 @@ int migrate_pages(struct list_head *from, | |||
979 | 987 | ||
980 | rc = unmap_and_move(get_new_page, private, | 988 | rc = unmap_and_move(get_new_page, private, |
981 | page, pass > 2, offlining, | 989 | page, pass > 2, offlining, |
982 | sync); | 990 | mode); |
983 | 991 | ||
984 | switch(rc) { | 992 | switch(rc) { |
985 | case -ENOMEM: | 993 | case -ENOMEM: |
@@ -1009,7 +1017,7 @@ out: | |||
1009 | 1017 | ||
1010 | int migrate_huge_pages(struct list_head *from, | 1018 | int migrate_huge_pages(struct list_head *from, |
1011 | new_page_t get_new_page, unsigned long private, bool offlining, | 1019 | new_page_t get_new_page, unsigned long private, bool offlining, |
1012 | bool sync) | 1020 | enum migrate_mode mode) |
1013 | { | 1021 | { |
1014 | int retry = 1; | 1022 | int retry = 1; |
1015 | int nr_failed = 0; | 1023 | int nr_failed = 0; |
@@ -1026,7 +1034,7 @@ int migrate_huge_pages(struct list_head *from, | |||
1026 | 1034 | ||
1027 | rc = unmap_and_move_huge_page(get_new_page, | 1035 | rc = unmap_and_move_huge_page(get_new_page, |
1028 | private, page, pass > 2, offlining, | 1036 | private, page, pass > 2, offlining, |
1029 | sync); | 1037 | mode); |
1030 | 1038 | ||
1031 | switch(rc) { | 1039 | switch(rc) { |
1032 | case -ENOMEM: | 1040 | case -ENOMEM: |
@@ -1155,7 +1163,7 @@ set_status: | |||
1155 | err = 0; | 1163 | err = 0; |
1156 | if (!list_empty(&pagelist)) { | 1164 | if (!list_empty(&pagelist)) { |
1157 | err = migrate_pages(&pagelist, new_page_node, | 1165 | err = migrate_pages(&pagelist, new_page_node, |
1158 | (unsigned long)pm, 0, true); | 1166 | (unsigned long)pm, 0, MIGRATE_SYNC); |
1159 | if (err) | 1167 | if (err) |
1160 | putback_lru_pages(&pagelist); | 1168 | putback_lru_pages(&pagelist); |
1161 | } | 1169 | } |