diff options
-rw-r--r-- | mm/compaction.c | 21 | ||||
-rw-r--r-- | mm/migrate.c | 34 |
2 files changed, 51 insertions, 4 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 694eaabaaebd..470474c03b61 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/backing-dev.h> | 14 | #include <linux/backing-dev.h> |
15 | #include <linux/sysctl.h> | 15 | #include <linux/sysctl.h> |
16 | #include <linux/sysfs.h> | 16 | #include <linux/sysfs.h> |
17 | #include <linux/balloon_compaction.h> | ||
17 | #include "internal.h" | 18 | #include "internal.h" |
18 | 19 | ||
19 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA | 20 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
@@ -565,9 +566,24 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
565 | goto next_pageblock; | 566 | goto next_pageblock; |
566 | } | 567 | } |
567 | 568 | ||
568 | /* Check may be lockless but that's ok as we recheck later */ | 569 | /* |
569 | if (!PageLRU(page)) | 570 | * Check may be lockless but that's ok as we recheck later. |
571 | * It's possible to migrate LRU pages and balloon pages | ||
572 | * Skip any other type of page | ||
573 | */ | ||
574 | if (!PageLRU(page)) { | ||
575 | if (unlikely(balloon_page_movable(page))) { | ||
576 | if (locked && balloon_page_isolate(page)) { | ||
577 | /* Successfully isolated */ | ||
578 | cc->finished_update_migrate = true; | ||
579 | list_add(&page->lru, migratelist); | ||
580 | cc->nr_migratepages++; | ||
581 | nr_isolated++; | ||
582 | goto check_compact_cluster; | ||
583 | } | ||
584 | } | ||
570 | continue; | 585 | continue; |
586 | } | ||
571 | 587 | ||
572 | /* | 588 | /* |
573 | * PageLRU is set. lru_lock normally excludes isolation | 589 | * PageLRU is set. lru_lock normally excludes isolation |
@@ -621,6 +637,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
621 | cc->nr_migratepages++; | 637 | cc->nr_migratepages++; |
622 | nr_isolated++; | 638 | nr_isolated++; |
623 | 639 | ||
640 | check_compact_cluster: | ||
624 | /* Avoid isolating too much */ | 641 | /* Avoid isolating too much */ |
625 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { | 642 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { |
626 | ++low_pfn; | 643 | ++low_pfn; |
diff --git a/mm/migrate.c b/mm/migrate.c index 33f5f82a6006..427343c0c296 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/hugetlb.h> | 35 | #include <linux/hugetlb.h> |
36 | #include <linux/hugetlb_cgroup.h> | 36 | #include <linux/hugetlb_cgroup.h> |
37 | #include <linux/gfp.h> | 37 | #include <linux/gfp.h> |
38 | #include <linux/balloon_compaction.h> | ||
38 | 39 | ||
39 | #include <asm/tlbflush.h> | 40 | #include <asm/tlbflush.h> |
40 | 41 | ||
@@ -79,7 +80,10 @@ void putback_lru_pages(struct list_head *l) | |||
79 | list_del(&page->lru); | 80 | list_del(&page->lru); |
80 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 81 | dec_zone_page_state(page, NR_ISOLATED_ANON + |
81 | page_is_file_cache(page)); | 82 | page_is_file_cache(page)); |
82 | putback_lru_page(page); | 83 | if (unlikely(balloon_page_movable(page))) |
84 | balloon_page_putback(page); | ||
85 | else | ||
86 | putback_lru_page(page); | ||
83 | } | 87 | } |
84 | } | 88 | } |
85 | 89 | ||
@@ -768,6 +772,18 @@ static int __unmap_and_move(struct page *page, struct page *newpage, | |||
768 | } | 772 | } |
769 | } | 773 | } |
770 | 774 | ||
775 | if (unlikely(balloon_page_movable(page))) { | ||
776 | /* | ||
777 | * A ballooned page does not need any special attention from | ||
778 | * physical to virtual reverse mapping procedures. | ||
779 | * Skip any attempt to unmap PTEs or to remap swap cache, | ||
780 | * in order to avoid burning cycles at rmap level, and perform | ||
781 | * the page migration right away (proteced by page lock). | ||
782 | */ | ||
783 | rc = balloon_page_migrate(newpage, page, mode); | ||
784 | goto uncharge; | ||
785 | } | ||
786 | |||
771 | /* | 787 | /* |
772 | * Corner case handling: | 788 | * Corner case handling: |
773 | * 1. When a new swap-cache page is read into, it is added to the LRU | 789 | * 1. When a new swap-cache page is read into, it is added to the LRU |
@@ -804,7 +820,9 @@ skip_unmap: | |||
804 | put_anon_vma(anon_vma); | 820 | put_anon_vma(anon_vma); |
805 | 821 | ||
806 | uncharge: | 822 | uncharge: |
807 | mem_cgroup_end_migration(mem, page, newpage, rc == MIGRATEPAGE_SUCCESS); | 823 | mem_cgroup_end_migration(mem, page, newpage, |
824 | (rc == MIGRATEPAGE_SUCCESS || | ||
825 | rc == MIGRATEPAGE_BALLOON_SUCCESS)); | ||
808 | unlock: | 826 | unlock: |
809 | unlock_page(page); | 827 | unlock_page(page); |
810 | out: | 828 | out: |
@@ -836,6 +854,18 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
836 | goto out; | 854 | goto out; |
837 | 855 | ||
838 | rc = __unmap_and_move(page, newpage, force, offlining, mode); | 856 | rc = __unmap_and_move(page, newpage, force, offlining, mode); |
857 | |||
858 | if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { | ||
859 | /* | ||
860 | * A ballooned page has been migrated already. | ||
861 | * Now, it's the time to wrap-up counters, | ||
862 | * handle the page back to Buddy and return. | ||
863 | */ | ||
864 | dec_zone_page_state(page, NR_ISOLATED_ANON + | ||
865 | page_is_file_cache(page)); | ||
866 | balloon_page_free(page); | ||
867 | return MIGRATEPAGE_SUCCESS; | ||
868 | } | ||
839 | out: | 869 | out: |
840 | if (rc != -EAGAIN) { | 870 | if (rc != -EAGAIN) { |
841 | /* | 871 | /* |