aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c81
1 files changed, 28 insertions, 53 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index be26d5cbe56..77ed2d77370 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -33,6 +33,7 @@
33#include <linux/memcontrol.h> 33#include <linux/memcontrol.h>
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/hugetlb.h> 35#include <linux/hugetlb.h>
36#include <linux/hugetlb_cgroup.h>
36#include <linux/gfp.h> 37#include <linux/gfp.h>
37 38
38#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
@@ -682,7 +683,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
682{ 683{
683 int rc = -EAGAIN; 684 int rc = -EAGAIN;
684 int remap_swapcache = 1; 685 int remap_swapcache = 1;
685 int charge = 0;
686 struct mem_cgroup *mem; 686 struct mem_cgroup *mem;
687 struct anon_vma *anon_vma = NULL; 687 struct anon_vma *anon_vma = NULL;
688 688
@@ -724,12 +724,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
724 } 724 }
725 725
726 /* charge against new page */ 726 /* charge against new page */
727 charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL); 727 mem_cgroup_prepare_migration(page, newpage, &mem);
728 if (charge == -ENOMEM) {
729 rc = -ENOMEM;
730 goto unlock;
731 }
732 BUG_ON(charge);
733 728
734 if (PageWriteback(page)) { 729 if (PageWriteback(page)) {
735 /* 730 /*
@@ -819,8 +814,7 @@ skip_unmap:
819 put_anon_vma(anon_vma); 814 put_anon_vma(anon_vma);
820 815
821uncharge: 816uncharge:
822 if (!charge) 817 mem_cgroup_end_migration(mem, page, newpage, rc == 0);
823 mem_cgroup_end_migration(mem, page, newpage, rc == 0);
824unlock: 818unlock:
825 unlock_page(page); 819 unlock_page(page);
826out: 820out:
@@ -931,16 +925,13 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
931 925
932 if (anon_vma) 926 if (anon_vma)
933 put_anon_vma(anon_vma); 927 put_anon_vma(anon_vma);
934 unlock_page(hpage);
935 928
936out: 929 if (!rc)
937 if (rc != -EAGAIN) { 930 hugetlb_cgroup_migrate(hpage, new_hpage);
938 list_del(&hpage->lru);
939 put_page(hpage);
940 }
941 931
932 unlock_page(hpage);
933out:
942 put_page(new_hpage); 934 put_page(new_hpage);
943
944 if (result) { 935 if (result) {
945 if (rc) 936 if (rc)
946 *result = rc; 937 *result = rc;
@@ -1016,48 +1007,32 @@ out:
1016 return nr_failed + retry; 1007 return nr_failed + retry;
1017} 1008}
1018 1009
1019int migrate_huge_pages(struct list_head *from, 1010int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
1020 new_page_t get_new_page, unsigned long private, bool offlining, 1011 unsigned long private, bool offlining,
1021 enum migrate_mode mode) 1012 enum migrate_mode mode)
1022{ 1013{
1023 int retry = 1; 1014 int pass, rc;
1024 int nr_failed = 0; 1015
1025 int pass = 0; 1016 for (pass = 0; pass < 10; pass++) {
1026 struct page *page; 1017 rc = unmap_and_move_huge_page(get_new_page,
1027 struct page *page2; 1018 private, hpage, pass > 2, offlining,
1028 int rc; 1019 mode);
1029 1020 switch (rc) {
1030 for (pass = 0; pass < 10 && retry; pass++) { 1021 case -ENOMEM:
1031 retry = 0; 1022 goto out;
1032 1023 case -EAGAIN:
1033 list_for_each_entry_safe(page, page2, from, lru) { 1024 /* try again */
1034 cond_resched(); 1025 cond_resched();
1035 1026 break;
1036 rc = unmap_and_move_huge_page(get_new_page, 1027 case 0:
1037 private, page, pass > 2, offlining, 1028 goto out;
1038 mode); 1029 default:
1039 1030 rc = -EIO;
1040 switch(rc) { 1031 goto out;
1041 case -ENOMEM:
1042 goto out;
1043 case -EAGAIN:
1044 retry++;
1045 break;
1046 case 0:
1047 break;
1048 default:
1049 /* Permanent failure */
1050 nr_failed++;
1051 break;
1052 }
1053 } 1032 }
1054 } 1033 }
1055 rc = 0;
1056out: 1034out:
1057 if (rc) 1035 return rc;
1058 return rc;
1059
1060 return nr_failed + retry;
1061} 1036}
1062 1037
1063#ifdef CONFIG_NUMA 1038#ifdef CONFIG_NUMA