diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2015-03-25 11:03:29 -0400 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2015-03-25 11:03:29 -0400 |
commit | 09471d13bd498bdc9d6f0874c0e00eba574f5558 (patch) | |
tree | fff09b3133b16a943b4e7602bfaf2c29883bbf9c | |
parent | 0041e83c7994510cebe9f335eb30b6049d8b4c1f (diff) |
litmus_migrate_pages
-rw-r--r-- | include/linux/balloon_compaction.h | 14 | ||||
-rw-r--r-- | include/linux/migrate.h | 2 | ||||
-rw-r--r-- | litmus/litmus.c | 2 | ||||
-rw-r--r-- | mm/migrate.c | 113 |
4 files changed, 130 insertions, 1 deletions
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 089743ade734..1dbef0b18d21 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h | |||
@@ -93,6 +93,20 @@ static inline void balloon_page_free(struct page *page) | |||
93 | __free_page(page); | 93 | __free_page(page); |
94 | } | 94 | } |
95 | 95 | ||
96 | static inline void litmus_balloon_page_free(struct page *page) | ||
97 | { | ||
98 | /* | ||
99 | * Balloon pages always get an extra refcount before being isolated | ||
100 | * and before being dequeued to help on sorting out fortuite colisions | ||
101 | * between a thread attempting to isolate and another thread attempting | ||
102 | * to release the very same balloon page. | ||
103 | * | ||
104 | * Before we handle the page back to Buddy, lets drop its extra refcnt. | ||
105 | */ | ||
106 | put_page(page); | ||
107 | __free_page(page); | ||
108 | } | ||
109 | |||
96 | #ifdef CONFIG_BALLOON_COMPACTION | 110 | #ifdef CONFIG_BALLOON_COMPACTION |
97 | extern bool balloon_page_isolate(struct page *page); | 111 | extern bool balloon_page_isolate(struct page *page); |
98 | extern void balloon_page_putback(struct page *page); | 112 | extern void balloon_page_putback(struct page *page); |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index a405d3dc0f61..a2a7e25ec716 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -41,6 +41,8 @@ extern int migrate_page(struct address_space *, | |||
41 | struct page *, struct page *, enum migrate_mode); | 41 | struct page *, struct page *, enum migrate_mode); |
42 | extern int migrate_pages(struct list_head *l, new_page_t x, | 42 | extern int migrate_pages(struct list_head *l, new_page_t x, |
43 | unsigned long private, enum migrate_mode mode, int reason); | 43 | unsigned long private, enum migrate_mode mode, int reason); |
44 | extern int litmus_migrate_pages(struct list_head *l, new_page_t x, | ||
45 | unsigned long private, enum migrate_mode mode, int reason); | ||
44 | extern int migrate_huge_page(struct page *, new_page_t x, | 46 | extern int migrate_huge_page(struct page *, new_page_t x, |
45 | unsigned long private, enum migrate_mode mode); | 47 | unsigned long private, enum migrate_mode mode); |
46 | 48 | ||
diff --git a/litmus/litmus.c b/litmus/litmus.c index 04c5017aa0dd..c8ed597f212e 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -499,7 +499,7 @@ asmlinkage long sys_set_page_color(int cpu) | |||
499 | 499 | ||
500 | //node= 0; | 500 | //node= 0; |
501 | if (!list_empty(&pagelist)) { | 501 | if (!list_empty(&pagelist)) { |
502 | ret = migrate_pages(&pagelist, new_alloc_page, node, MIGRATE_ASYNC, MR_SYSCALL); | 502 | ret = litmus_migrate_pages(&pagelist, new_alloc_page, node, MIGRATE_ASYNC, MR_SYSCALL); |
503 | TRACE_TASK(current, "%ld pages not migrated.\n", ret); | 503 | TRACE_TASK(current, "%ld pages not migrated.\n", ret); |
504 | if (ret) { | 504 | if (ret) { |
505 | putback_lru_pages(&pagelist); | 505 | putback_lru_pages(&pagelist); |
diff --git a/mm/migrate.c b/mm/migrate.c index a88c12f2235d..eab459afebf0 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -918,6 +918,65 @@ out: | |||
918 | return rc; | 918 | return rc; |
919 | } | 919 | } |
920 | 920 | ||
921 | static int litmus_unmap_and_move(new_page_t get_new_page, unsigned long private, | ||
922 | struct page *page, int force, enum migrate_mode mode) | ||
923 | { | ||
924 | int rc = 0; | ||
925 | int *result = NULL; | ||
926 | struct page *newpage = get_new_page(page, private, &result); | ||
927 | |||
928 | if (!newpage) | ||
929 | return -ENOMEM; | ||
930 | |||
931 | if (page_count(page) == 1) { | ||
932 | /* page was freed from under us. So we are done. */ | ||
933 | goto out; | ||
934 | } | ||
935 | |||
936 | if (unlikely(PageTransHuge(page))) | ||
937 | if (unlikely(split_huge_page(page))) | ||
938 | goto out; | ||
939 | |||
940 | rc = __unmap_and_move(page, newpage, force, mode); | ||
941 | |||
942 | if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { | ||
943 | /* | ||
944 | * A ballooned page has been migrated already. | ||
945 | * Now, it's the time to wrap-up counters, | ||
946 | * handle the page back to Buddy and return. | ||
947 | */ | ||
948 | dec_zone_page_state(page, NR_ISOLATED_ANON + | ||
949 | page_is_file_cache(page)); | ||
950 | litmus_balloon_page_free(page); | ||
951 | return MIGRATEPAGE_SUCCESS; | ||
952 | } | ||
953 | out: | ||
954 | if (rc != -EAGAIN) { | ||
955 | /* | ||
956 | * A page that has been migrated has all references | ||
957 | * removed and will be freed. A page that has not been | ||
958 | * migrated will have kepts its references and be | ||
959 | * restored. | ||
960 | */ | ||
961 | list_del(&page->lru); | ||
962 | dec_zone_page_state(page, NR_ISOLATED_ANON + | ||
963 | page_is_file_cache(page)); | ||
964 | putback_lru_page(page); | ||
965 | } | ||
966 | /* | ||
967 | * Move the new page to the LRU. If migration was not successful | ||
968 | * then this will free the page. | ||
969 | */ | ||
970 | putback_lru_page(newpage); | ||
971 | if (result) { | ||
972 | if (rc) | ||
973 | *result = rc; | ||
974 | else | ||
975 | *result = page_to_nid(newpage); | ||
976 | } | ||
977 | return rc; | ||
978 | } | ||
979 | |||
921 | /* | 980 | /* |
922 | * Counterpart of unmap_and_move_page() for hugepage migration. | 981 | * Counterpart of unmap_and_move_page() for hugepage migration. |
923 | * | 982 | * |
@@ -1058,6 +1117,60 @@ out: | |||
1058 | return rc; | 1117 | return rc; |
1059 | } | 1118 | } |
1060 | 1119 | ||
1120 | int litmus_migrate_pages(struct list_head *from, new_page_t get_new_page, | ||
1121 | unsigned long private, enum migrate_mode mode, int reason) | ||
1122 | { | ||
1123 | int retry = 1; | ||
1124 | int nr_failed = 0; | ||
1125 | int nr_succeeded = 0; | ||
1126 | int pass = 0; | ||
1127 | struct page *page; | ||
1128 | struct page *page2; | ||
1129 | int swapwrite = current->flags & PF_SWAPWRITE; | ||
1130 | int rc; | ||
1131 | |||
1132 | if (!swapwrite) | ||
1133 | current->flags |= PF_SWAPWRITE; | ||
1134 | |||
1135 | for(pass = 0; pass < 10 && retry; pass++) { | ||
1136 | retry = 0; | ||
1137 | |||
1138 | list_for_each_entry_safe(page, page2, from, lru) { | ||
1139 | cond_resched(); | ||
1140 | |||
1141 | rc = litmus_unmap_and_move(get_new_page, private, | ||
1142 | page, pass > 2, mode); | ||
1143 | |||
1144 | switch(rc) { | ||
1145 | case -ENOMEM: | ||
1146 | goto out; | ||
1147 | case -EAGAIN: | ||
1148 | retry++; | ||
1149 | break; | ||
1150 | case MIGRATEPAGE_SUCCESS: | ||
1151 | nr_succeeded++; | ||
1152 | break; | ||
1153 | default: | ||
1154 | /* Permanent failure */ | ||
1155 | nr_failed++; | ||
1156 | break; | ||
1157 | } | ||
1158 | } | ||
1159 | } | ||
1160 | rc = nr_failed + retry; | ||
1161 | out: | ||
1162 | if (nr_succeeded) | ||
1163 | count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded); | ||
1164 | if (nr_failed) | ||
1165 | count_vm_events(PGMIGRATE_FAIL, nr_failed); | ||
1166 | trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason); | ||
1167 | |||
1168 | if (!swapwrite) | ||
1169 | current->flags &= ~PF_SWAPWRITE; | ||
1170 | |||
1171 | return rc; | ||
1172 | } | ||
1173 | |||
1061 | int migrate_huge_page(struct page *hpage, new_page_t get_new_page, | 1174 | int migrate_huge_page(struct page *hpage, new_page_t get_new_page, |
1062 | unsigned long private, enum migrate_mode mode) | 1175 | unsigned long private, enum migrate_mode mode) |
1063 | { | 1176 | { |