aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c127
1 files changed, 4 insertions, 123 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index d25cc2c2736d..a2e9cad083d5 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -405,7 +405,6 @@ int replicate_page_move_mapping(struct address_space *mapping,
405 struct buffer_head *head, enum migrate_mode mode, 405 struct buffer_head *head, enum migrate_mode mode,
406 int extra_count) 406 int extra_count)
407{ 407{
408 int expected_count = 1 + extra_count;
409 int prev_count = page_count(page); 408 int prev_count = page_count(page);
410 void **pslot; 409 void **pslot;
411 410
@@ -415,38 +414,6 @@ int replicate_page_move_mapping(struct address_space *mapping,
415 414
416 pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); 415 pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page));
417 416
418 expected_count += 1 + page_has_private(page);
419
420 TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page));
421/*
422 if (page_count(page) != expected_count ||
423 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
424 spin_unlock_irq(&mapping->tree_lock);
425 TRACE_TASK(current, "1\n");
426 return -EAGAIN;
427 }
428
429 if (!page_freeze_refs(page, expected_count)) { // if page_count(page) == expected_count, then set page_count = 0
430 spin_unlock_irq(&mapping->tree_lock);
431 TRACE_TASK(current, "2\n");
432 return -EAGAIN;
433 }
434*/
435 /*
436 * In the async migration case of moving a page with buffers, lock the
437 * buffers using trylock before the mapping is moved. If the mapping
438 * was moved, we later failed to lock the buffers and could not move
439 * the mapping back due to an elevated page count, we would have to
440 * block waiting on other references to be dropped.
441 */
442/* if (mode == MIGRATE_ASYNC && head &&
443 !buffer_migrate_lock_buffers(head, mode)) {
444 page_unfreeze_refs(page, expected_count);
445 spin_unlock_irq(&mapping->tree_lock);
446 TRACE_TASK(current, "3\n");
447 return -EAGAIN;
448 }
449*/
450 /* 417 /*
451 * Now we know that no one else is looking at the page. 418 * Now we know that no one else is looking at the page.
452 */ 419 */
@@ -456,15 +423,11 @@ int replicate_page_move_mapping(struct address_space *mapping,
456 set_page_private(newpage, page_private(page)); 423 set_page_private(newpage, page_private(page));
457 } 424 }
458 425
459 //radix_tree_replace_slot(pslot, newpage);
460 //radix_tree_replace_slot(pslot, page);
461
462 /* 426 /*
463 * Drop cache reference from old page by unfreezing 427 * Drop cache reference from old page by unfreezing
464 * to one less reference. 428 * to the previous reference.
465 * We know this isn't the last reference. 429 * We know this isn't the last reference.
466 */ 430 */
467 //page_unfreeze_refs(page, expected_count - 1);
468 page_unfreeze_refs(page, prev_count); 431 page_unfreeze_refs(page, prev_count);
469 432
470 /* 433 /*
@@ -702,7 +665,6 @@ void replicate_page_copy(struct page *newpage, struct page *page)
702 */ 665 */
703 if (PageWriteback(newpage)) 666 if (PageWriteback(newpage))
704 end_page_writeback(newpage); 667 end_page_writeback(newpage);
705 TRACE_TASK(current, "replicate_page_copy done!\n");
706} 668}
707 669
708/************************************************************ 670/************************************************************
@@ -742,7 +704,6 @@ int replicate_page(struct address_space *mapping,
742 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 704 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
743 705
744 rc = replicate_page_move_mapping(mapping, newpage, page, NULL, mode, extra_count); 706 rc = replicate_page_move_mapping(mapping, newpage, page, NULL, mode, extra_count);
745 TRACE_TASK(current, "replicate_page_move_mapping returned %d\n", rc);
746 if (rc != MIGRATEPAGE_SUCCESS) 707 if (rc != MIGRATEPAGE_SUCCESS)
747 return rc; 708 return rc;
748 709
@@ -975,19 +936,9 @@ static int copy_to_new_page(struct page *newpage, struct page *page,
975 rc = migrate_page(mapping, newpage, page, mode); 936 rc = migrate_page(mapping, newpage, page, mode);
976 } 937 }
977 else if (mapping->a_ops->migratepage) { 938 else if (mapping->a_ops->migratepage) {
978 TRACE_TASK(current, "ops migration callback\n");
979 /*
980 * Most pages have a mapping and most filesystems provide a
981 * migratepage callback. Anonymous pages are part of swap
982 * space which also has its own migratepage callback. This
983 * is the most common path for page migration.
984 */
985 //rc = mapping->a_ops->migratepage(mapping,
986 // newpage, page, mode);
987 rc = replicate_page(mapping, newpage, page, mode, has_replica); 939 rc = replicate_page(mapping, newpage, page, mode, has_replica);
988 } 940 }
989 else { 941 else {
990 TRACE_TASK(current, "fallback function\n");
991 rc = fallback_migrate_page(mapping, newpage, page, mode); 942 rc = fallback_migrate_page(mapping, newpage, page, mode);
992 } 943 }
993 944
@@ -995,10 +946,8 @@ static int copy_to_new_page(struct page *newpage, struct page *page,
995 newpage->mapping = NULL; 946 newpage->mapping = NULL;
996 } else { 947 } else {
997 if (page_was_mapped) { 948 if (page_was_mapped) {
998 TRACE_TASK(current, "PAGE_WAS_MAPPED = 1\n");
999 remove_migration_ptes(page, newpage); 949 remove_migration_ptes(page, newpage);
1000 } 950 }
1001 //page->mapping = NULL;
1002 } 951 }
1003 952
1004 unlock_page(newpage); 953 unlock_page(newpage);
@@ -1178,76 +1127,18 @@ static int __unmap_and_copy(struct page *page, struct page *newpage,
1178 1127
1179 if (PageWriteback(page)) { 1128 if (PageWriteback(page)) {
1180 /* 1129 /*
1181 * Only in the case of a full synchronous migration is it 1130 * The code of shared library cannot be written.
1182 * necessary to wait for PageWriteback. In the async case,
1183 * the retry loop is too short and in the sync-light case,
1184 * the overhead of stalling is too much
1185 */ 1131 */
1186 BUG(); 1132 BUG();
1187 /*
1188 if (mode != MIGRATE_SYNC) {
1189 rc = -EBUSY;
1190 goto out_unlock;
1191 }
1192 if (!force)
1193 goto out_unlock;
1194 wait_on_page_writeback(page);
1195 */
1196 } 1133 }
1197 /* 1134
1198 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1199 * we cannot notice that anon_vma is freed while we migrates a page.
1200 * This get_anon_vma() delays freeing anon_vma pointer until the end
1201 * of migration. File cache pages are no problem because of page_lock()
1202 * File Caches may use write_page() or lock_page() in migration, then,
1203 * just care Anon page here.
1204 */
1205 if (PageAnon(page) && !PageKsm(page)) { 1135 if (PageAnon(page) && !PageKsm(page)) {
1206 printk(KERN_INFO "ANON but not KSM\n"); 1136 /* The shared library pages must be backed by a file. */
1207 BUG(); 1137 BUG();
1208 /*
1209 * Only page_lock_anon_vma_read() understands the subtleties of
1210 * getting a hold on an anon_vma from outside one of its mms.
1211 */
1212/*
1213 anon_vma = page_get_anon_vma(page);
1214 if (anon_vma) {
1215*/
1216 /*
1217 * Anon page
1218 */
1219/*
1220 } else if (PageSwapCache(page)) {
1221*/
1222 /*
1223 * We cannot be sure that the anon_vma of an unmapped
1224 * swapcache page is safe to use because we don't
1225 * know in advance if the VMA that this page belonged
1226 * to still exists. If the VMA and others sharing the
1227 * data have been freed, then the anon_vma could
1228 * already be invalid.
1229 *
1230 * To avoid this possibility, swapcache pages get
1231 * migrated but are not remapped when migration
1232 * completes
1233 */
1234/* } else {
1235 goto out_unlock;
1236 }
1237*/
1238 } 1138 }
1239 1139
1240 if (unlikely(isolated_balloon_page(page))) { 1140 if (unlikely(isolated_balloon_page(page))) {
1241 BUG(); 1141 BUG();
1242 /*
1243 * A ballooned page does not need any special attention from
1244 * physical to virtual reverse mapping procedures.
1245 * Skip any attempt to unmap PTEs or to remap swap cache,
1246 * in order to avoid burning cycles at rmap level, and perform
1247 * the page migration right away (proteced by page lock).
1248 */
1249 rc = balloon_page_migrate(newpage, page, mode);
1250 goto out_unlock;
1251 } 1142 }
1252 1143
1253 /* 1144 /*
@@ -1273,22 +1164,17 @@ static int __unmap_and_copy(struct page *page, struct page *newpage,
1273 1164
1274 /* Establish migration ptes or remove ptes */ 1165 /* Establish migration ptes or remove ptes */
1275 if (page_mapped(page)) { 1166 if (page_mapped(page)) {
1276 // ttu_ret = try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1277 struct rmap_walk_control rwc = { 1167 struct rmap_walk_control rwc = {
1278 .rmap_one = try_to_unmap_one_only, 1168 .rmap_one = try_to_unmap_one_only,
1279 .arg = (void *)(TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS), 1169 .arg = (void *)(TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS),
1280 }; 1170 };
1281
1282 ttu_ret = rmap_walk(page, &rwc); 1171 ttu_ret = rmap_walk(page, &rwc);
1283 1172
1284 page_was_mapped = 1; 1173 page_was_mapped = 1;
1285 TRACE_TASK(current, "Page %d unmapped from all PTEs\n", page_to_pfn(page));
1286 } 1174 }
1287 1175
1288skip_unmap: 1176skip_unmap:
1289 //if (!page_mapped(page)) {
1290 if (ttu_ret == SWAP_SUCCESS) { 1177 if (ttu_ret == SWAP_SUCCESS) {
1291 TRACE_TASK(current, "Call copy_to_new_page\n");
1292 rc = copy_to_new_page(newpage, page, page_was_mapped, mode, has_replica); 1178 rc = copy_to_new_page(newpage, page, page_was_mapped, mode, has_replica);
1293 } else if (ttu_ret == SWAP_AGAIN) 1179 } else if (ttu_ret == SWAP_AGAIN)
1294 printk(KERN_ERR "rmap_walk returned SWAP_AGAIN\n"); 1180 printk(KERN_ERR "rmap_walk returned SWAP_AGAIN\n");
@@ -1418,16 +1304,13 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page,
1418 newpage = get_new_page(page, private, &result); 1304 newpage = get_new_page(page, private, &result);
1419 if (!newpage) 1305 if (!newpage)
1420 return -ENOMEM; 1306 return -ENOMEM;
1421 //printk(KERN_ERR "Page %lx allocated\n", page_to_pfn(newpage));
1422 } else { 1307 } else {
1423 newpage = lib_page->r_page[cpu]; 1308 newpage = lib_page->r_page[cpu];
1424 has_replica = 1; 1309 has_replica = 1;
1425 //printk(KERN_ERR "Page %lx found\n", page_to_pfn(newpage));
1426 } 1310 }
1427 1311
1428 if (page_count(page) == 1) { 1312 if (page_count(page) == 1) {
1429 /* page was freed from under us. So we are done. */ 1313 /* page was freed from under us. So we are done. */
1430 TRACE_TASK(current, "page %x _count == 1\n", page_to_pfn(page));
1431 goto out; 1314 goto out;
1432 } 1315 }
1433 1316
@@ -1443,7 +1326,6 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page,
1443 } 1326 }
1444 1327
1445out: 1328out:
1446TRACE_TASK(current, "__unmap_and_copy returned %s\n", rc==MIGRATEPAGE_SUCCESS?"SUCCESS":"FAIL");
1447 if (rc != -EAGAIN) { 1329 if (rc != -EAGAIN) {
1448 /* 1330 /*
1449 * A page that has been migrated has all references 1331 * A page that has been migrated has all references
@@ -1457,7 +1339,6 @@ TRACE_TASK(current, "__unmap_and_copy returned %s\n", rc==MIGRATEPAGE_SUCCESS?"S
1457 putback_lru_page(page); 1339 putback_lru_page(page);
1458 } 1340 }
1459 1341
1460//TRACE_TASK(current, "old page freed\n");
1461 /* 1342 /*
1462 * If migration was not successful and there's a freeing callback, use 1343 * If migration was not successful and there's a freeing callback, use
1463 * it. Otherwise, putback_lru_page() will drop the reference grabbed 1344 * it. Otherwise, putback_lru_page() will drop the reference grabbed