diff options
author | Namhoon Kim <namhoonk@cs.unc.edu> | 2016-09-21 07:42:49 -0400 |
---|---|---|
committer | Namhoon Kim <namhoonk@cs.unc.edu> | 2016-09-21 07:42:49 -0400 |
commit | 2b1bccf411c97a933796526b0427785a2dafde1d (patch) | |
tree | 52d7cde691596ea8ef6cf842db774bcc4840a1ab | |
parent | 805c547ee3cdc2ef6a5f7556fdf449ced2e48680 (diff) |
9/21/2016wip-percore-lib
-rw-r--r-- | include/litmus/replicate_lib.h | 4 | ||||
-rw-r--r-- | litmus/litmus.c | 27 | ||||
-rw-r--r-- | mm/filemap.c | 6 | ||||
-rw-r--r-- | mm/migrate.c | 71 |
4 files changed, 69 insertions, 39 deletions
diff --git a/include/litmus/replicate_lib.h b/include/litmus/replicate_lib.h index 480ce4631529..98bfb9707144 100644 --- a/include/litmus/replicate_lib.h +++ b/include/litmus/replicate_lib.h | |||
@@ -6,9 +6,9 @@ | |||
6 | #include <linux/mm_inline.h> | 6 | #include <linux/mm_inline.h> |
7 | 7 | ||
8 | struct shared_lib_page { | 8 | struct shared_lib_page { |
9 | struct page *p_page; | 9 | struct page *master_page; |
10 | struct page *r_page; | 10 | struct page *r_page; |
11 | unsigned long p_pfn; | 11 | unsigned long master_pfn; |
12 | unsigned long r_pfn; | 12 | unsigned long r_pfn; |
13 | struct list_head list; | 13 | struct list_head list; |
14 | }; | 14 | }; |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 8e7f5e2e68df..ad3d50c78bb1 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -419,7 +419,7 @@ asmlinkage long sys_set_page_color(int cpu) | |||
419 | rcu_read_lock(); | 419 | rcu_read_lock(); |
420 | list_for_each_entry(lib_page, &shared_lib_pages, list) | 420 | list_for_each_entry(lib_page, &shared_lib_pages, list) |
421 | { | 421 | { |
422 | if (page_to_pfn(old_page) == lib_page->p_pfn) { | 422 | if (page_to_pfn(old_page) == lib_page->master_pfn) { |
423 | is_exist = 1; | 423 | is_exist = 1; |
424 | break; | 424 | break; |
425 | } | 425 | } |
@@ -428,15 +428,15 @@ asmlinkage long sys_set_page_color(int cpu) | |||
428 | 428 | ||
429 | if (is_exist == 0) { | 429 | if (is_exist == 0) { |
430 | lib_page = kmalloc(sizeof(struct shared_lib_page), GFP_KERNEL); | 430 | lib_page = kmalloc(sizeof(struct shared_lib_page), GFP_KERNEL); |
431 | lib_page->p_page = old_page; | 431 | lib_page->master_page = old_page; |
432 | lib_page->r_page = NULL; | 432 | lib_page->r_page = NULL; |
433 | lib_page->p_pfn = page_to_pfn(old_page); | 433 | lib_page->master_pfn = page_to_pfn(old_page); |
434 | lib_page->r_pfn = INVALID_PFN; | 434 | lib_page->r_pfn = INVALID_PFN; |
435 | list_add_tail(&lib_page->list, &shared_lib_pages); | 435 | list_add_tail(&lib_page->list, &shared_lib_pages); |
436 | TRACE_TASK(current, "NEW PAGE %ld ADDED.\n", lib_page->p_pfn); | 436 | TRACE_TASK(current, "NEW PAGE %ld ADDED.\n", lib_page->master_pfn); |
437 | } | 437 | } |
438 | else { | 438 | else { |
439 | TRACE_TASK(current, "FOUND PAGE %ld in the list.\n", lib_page->p_pfn); | 439 | TRACE_TASK(current, "FOUND PAGE %ld in the list.\n", lib_page->master_pfn); |
440 | } | 440 | } |
441 | 441 | ||
442 | /* add to task_shared_pagelist */ | 442 | /* add to task_shared_pagelist */ |
@@ -490,6 +490,21 @@ asmlinkage long sys_set_page_color(int cpu) | |||
490 | } | 490 | } |
491 | } | 491 | } |
492 | 492 | ||
493 | { | ||
494 | struct list_head *pos, *q; | ||
495 | list_for_each_safe(pos, q, &task_shared_pagelist) { | ||
496 | struct page *p_entry = NULL; | ||
497 | struct shared_lib_page *lib_desc = NULL; | ||
498 | |||
499 | p_entry = list_entry(pos, struct page, lru); | ||
500 | list_for_each_entry(lib_desc, &shared_lib_pages, list) { | ||
501 | if (p_entry == lib_desc->r_page) { | ||
502 | list_del(pos); | ||
503 | } | ||
504 | } | ||
505 | } | ||
506 | } | ||
507 | |||
493 | if (!list_empty(&task_shared_pagelist)) { | 508 | if (!list_empty(&task_shared_pagelist)) { |
494 | ret = replicate_pages(&task_shared_pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); | 509 | ret = replicate_pages(&task_shared_pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); |
495 | TRACE_TASK(current, "%ld shared pages not migrated.\n", ret); | 510 | TRACE_TASK(current, "%ld shared pages not migrated.\n", ret); |
@@ -525,7 +540,7 @@ asmlinkage long sys_set_page_color(int cpu) | |||
525 | rcu_read_lock(); | 540 | rcu_read_lock(); |
526 | list_for_each_entry(lpage, &shared_lib_pages, list) | 541 | list_for_each_entry(lpage, &shared_lib_pages, list) |
527 | { | 542 | { |
528 | TRACE_TASK(current, "p_PFN = %ld r_PFN = %ld\n", lpage->p_pfn, lpage->r_pfn); | 543 | TRACE_TASK(current, "master_PFN = %ld r_PFN = %ld PageSwapCache=%d\n", lpage->master_pfn, lpage->r_pfn, PageSwapCache(lpage->master_page)); |
529 | } | 544 | } |
530 | rcu_read_unlock(); | 545 | rcu_read_unlock(); |
531 | } | 546 | } |
diff --git a/mm/filemap.c b/mm/filemap.c index 6bf5e42d560a..8f378ac675d7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -970,7 +970,11 @@ repeat: | |||
970 | page = NULL; | 970 | page = NULL; |
971 | pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); | 971 | pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); |
972 | if (pagep) { | 972 | if (pagep) { |
973 | page = radix_tree_deref_slot(pagep); | 973 | void *pdesc; |
974 | pdesc = radix_tree_deref_slot(pagep); | ||
975 | if (pdesc) | ||
976 | page = (struct page*)pdesc; | ||
977 | //page = radix_tree_deref_slot(pagep); | ||
974 | if (unlikely(!page)) | 978 | if (unlikely(!page)) |
975 | goto out; | 979 | goto out; |
976 | if (radix_tree_exception(page)) { | 980 | if (radix_tree_exception(page)) { |
diff --git a/mm/migrate.c b/mm/migrate.c index c88f881f2daa..a5ff157cfe00 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -408,23 +408,17 @@ int replicate_page_move_mapping(struct address_space *mapping, | |||
408 | int expected_count = 1 + extra_count; | 408 | int expected_count = 1 + extra_count; |
409 | void **pslot; | 409 | void **pslot; |
410 | 410 | ||
411 | // if (!mapping) { | 411 | BUG_ON(!mapping); |
412 | /* Anonymous page without mapping */ | ||
413 | // if (page_count(page) != expected_count) | ||
414 | // return -EAGAIN; | ||
415 | // return MIGRATEPAGE_SUCCESS; | ||
416 | // } | ||
417 | |||
418 | TRACE_TASK(current, "page has mapping.\n"); | 412 | TRACE_TASK(current, "page has mapping.\n"); |
419 | spin_lock_irq(&mapping->tree_lock); | 413 | spin_lock_irq(&mapping->tree_lock); |
420 | 414 | ||
421 | pslot = radix_tree_lookup_slot(&mapping->page_tree, | 415 | pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); |
422 | page_index(page)); | ||
423 | 416 | ||
424 | expected_count += 1 + page_has_private(page); | 417 | expected_count += 1 + page_has_private(page); |
425 | 418 | ||
426 | TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page)); | 419 | TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page)); |
427 | 420 | ||
421 | expected_count++; | ||
428 | if (page_count(page) != expected_count || | 422 | if (page_count(page) != expected_count || |
429 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { | 423 | radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { |
430 | spin_unlock_irq(&mapping->tree_lock); | 424 | spin_unlock_irq(&mapping->tree_lock); |
@@ -463,13 +457,15 @@ int replicate_page_move_mapping(struct address_space *mapping, | |||
463 | } | 457 | } |
464 | 458 | ||
465 | radix_tree_replace_slot(pslot, newpage); | 459 | radix_tree_replace_slot(pslot, newpage); |
460 | //radix_tree_replace_slot(pslot, page); | ||
466 | 461 | ||
467 | /* | 462 | /* |
468 | * Drop cache reference from old page by unfreezing | 463 | * Drop cache reference from old page by unfreezing |
469 | * to one less reference. | 464 | * to one less reference. |
470 | * We know this isn't the last reference. | 465 | * We know this isn't the last reference. |
471 | */ | 466 | */ |
472 | page_unfreeze_refs(page, expected_count - 1); | 467 | //page_unfreeze_refs(page, expected_count - 1); |
468 | page_unfreeze_refs(page, expected_count - 2); | ||
473 | 469 | ||
474 | /* | 470 | /* |
475 | * If moved to a different zone then also account | 471 | * If moved to a different zone then also account |
@@ -682,7 +678,7 @@ EXPORT_SYMBOL(migrate_page); | |||
682 | 678 | ||
683 | int replicate_page(struct address_space *mapping, | 679 | int replicate_page(struct address_space *mapping, |
684 | struct page *newpage, struct page *page, | 680 | struct page *newpage, struct page *page, |
685 | enum migrate_mode mode) | 681 | enum migrate_mode mode, int has_replica) |
686 | { | 682 | { |
687 | int rc, extra_count = 0; | 683 | int rc, extra_count = 0; |
688 | 684 | ||
@@ -693,7 +689,8 @@ int replicate_page(struct address_space *mapping, | |||
693 | if (rc != MIGRATEPAGE_SUCCESS) | 689 | if (rc != MIGRATEPAGE_SUCCESS) |
694 | return rc; | 690 | return rc; |
695 | 691 | ||
696 | migrate_page_copy(newpage, page); | 692 | if (has_replica == 0) |
693 | migrate_page_copy(newpage, page); | ||
697 | return MIGRATEPAGE_SUCCESS; | 694 | return MIGRATEPAGE_SUCCESS; |
698 | } | 695 | } |
699 | 696 | ||
@@ -757,20 +754,23 @@ int buffer_migrate_page(struct address_space *mapping, | |||
757 | EXPORT_SYMBOL(buffer_migrate_page); | 754 | EXPORT_SYMBOL(buffer_migrate_page); |
758 | #endif | 755 | #endif |
759 | 756 | ||
757 | extern struct list_head shared_lib_pages; | ||
758 | |||
760 | int replicate_buffer_page(struct address_space *mapping, | 759 | int replicate_buffer_page(struct address_space *mapping, |
761 | struct page *newpage, struct page *page, enum migrate_mode mode) | 760 | struct page *newpage, struct page *page, enum migrate_mode mode, |
761 | int has_replica) | ||
762 | { | 762 | { |
763 | struct buffer_head *bh, *head; | 763 | struct buffer_head *bh, *head; |
764 | int rc; | 764 | int rc; |
765 | 765 | ||
766 | if (!page_has_buffers(page)) { | 766 | if (!page_has_buffers(page)) { |
767 | TRACE_TASK(current, "page does not have buffers\n"); | 767 | TRACE_TASK(current, "page does not have buffers\n"); |
768 | return replicate_page(mapping, newpage, page, mode); | 768 | return replicate_page(mapping, newpage, page, mode, has_replica); |
769 | } | 769 | } |
770 | 770 | ||
771 | head = page_buffers(page); | 771 | head = page_buffers(page); |
772 | 772 | ||
773 | rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); | 773 | rc = replicate_page_move_mapping(mapping, newpage, page, head, mode, 0); |
774 | 774 | ||
775 | if (rc != MIGRATEPAGE_SUCCESS) | 775 | if (rc != MIGRATEPAGE_SUCCESS) |
776 | return rc; | 776 | return rc; |
@@ -798,7 +798,8 @@ int replicate_buffer_page(struct address_space *mapping, | |||
798 | 798 | ||
799 | SetPagePrivate(newpage); | 799 | SetPagePrivate(newpage); |
800 | 800 | ||
801 | migrate_page_copy(newpage, page); | 801 | if (has_replica == 0) |
802 | migrate_page_copy(newpage, page); | ||
802 | 803 | ||
803 | bh = head; | 804 | bh = head; |
804 | do { | 805 | do { |
@@ -982,7 +983,7 @@ static int copy_to_new_page(struct page *newpage, struct page *page, | |||
982 | */ | 983 | */ |
983 | //rc = mapping->a_ops->migratepage(mapping, | 984 | //rc = mapping->a_ops->migratepage(mapping, |
984 | // newpage, page, mode); | 985 | // newpage, page, mode); |
985 | rc = replicate_buffer_page(mapping, newpage, page, mode); | 986 | rc = replicate_buffer_page(mapping, newpage, page, mode, has_replica); |
986 | } | 987 | } |
987 | else { | 988 | else { |
988 | TRACE_TASK(current, "fallback function\n"); | 989 | TRACE_TASK(current, "fallback function\n"); |
@@ -992,9 +993,13 @@ static int copy_to_new_page(struct page *newpage, struct page *page, | |||
992 | if (rc != MIGRATEPAGE_SUCCESS) { | 993 | if (rc != MIGRATEPAGE_SUCCESS) { |
993 | newpage->mapping = NULL; | 994 | newpage->mapping = NULL; |
994 | } else { | 995 | } else { |
996 | if (mem_cgroup_disabled()) | ||
997 | TRACE_TASK(current, "mem_cgroup_disabled()\n"); | ||
995 | mem_cgroup_migrate(page, newpage, false); | 998 | mem_cgroup_migrate(page, newpage, false); |
996 | if (page_was_mapped) | 999 | if (page_was_mapped) { |
1000 | TRACE_TASK(current, "PAGE_WAS_MAPPED = 1\n"); | ||
997 | remove_migration_ptes(page, newpage); | 1001 | remove_migration_ptes(page, newpage); |
1002 | } | ||
998 | page->mapping = NULL; | 1003 | page->mapping = NULL; |
999 | } | 1004 | } |
1000 | 1005 | ||
@@ -1378,7 +1383,7 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page, | |||
1378 | rcu_read_lock(); | 1383 | rcu_read_lock(); |
1379 | list_for_each_entry(lib_page, &shared_lib_pages, list) | 1384 | list_for_each_entry(lib_page, &shared_lib_pages, list) |
1380 | { | 1385 | { |
1381 | if (page_to_pfn(page) == lib_page->p_pfn) { | 1386 | if (page_to_pfn(page) == lib_page->master_pfn) { |
1382 | is_exist_in_psl = 1; | 1387 | is_exist_in_psl = 1; |
1383 | break; | 1388 | break; |
1384 | } | 1389 | } |
@@ -1386,14 +1391,13 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page, | |||
1386 | rcu_read_unlock(); | 1391 | rcu_read_unlock(); |
1387 | 1392 | ||
1388 | if (is_exist_in_psl) | 1393 | if (is_exist_in_psl) |
1389 | TRACE_TASK(current, "Page %ld exists in PSL list\n", lib_page->p_pfn); | 1394 | TRACE_TASK(current, "Page %ld exists in PSL list\n", lib_page->master_pfn); |
1390 | 1395 | ||
1391 | if (lib_page->r_page == NULL) { | 1396 | if (lib_page->r_page == NULL) { |
1392 | newpage = get_new_page(page, private, &result); | 1397 | newpage = get_new_page(page, private, &result); |
1393 | if (!newpage) | 1398 | if (!newpage) |
1394 | return -ENOMEM; | 1399 | return -ENOMEM; |
1395 | } | 1400 | } else { |
1396 | else { | ||
1397 | newpage = lib_page->r_page; | 1401 | newpage = lib_page->r_page; |
1398 | has_replica = 1; | 1402 | has_replica = 1; |
1399 | } | 1403 | } |
@@ -1409,21 +1413,28 @@ static ICE_noinline int unmap_and_copy(new_page_t get_new_page, | |||
1409 | goto out; | 1413 | goto out; |
1410 | 1414 | ||
1411 | rc = __unmap_and_copy(page, newpage, force, mode, has_replica); | 1415 | rc = __unmap_and_copy(page, newpage, force, mode, has_replica); |
1412 | 1416 | ||
1417 | if (has_replica == 0) { | ||
1418 | lib_page->r_page = newpage; | ||
1419 | lib_page->r_pfn = page_to_pfn(newpage); | ||
1420 | } | ||
1421 | |||
1413 | out: | 1422 | out: |
1414 | if (rc != -EAGAIN) { | 1423 | TRACE_TASK(current, "__unmap_and_copy returned %d\n", rc); |
1424 | // if (rc != -EAGAIN) { | ||
1415 | /* | 1425 | /* |
1416 | * A page that has been migrated has all references | 1426 | * A page that has been migrated has all references |
1417 | * removed and will be freed. A page that has not been | 1427 | * removed and will be freed. A page that has not been |
1418 | * migrated will have kepts its references and be | 1428 | * migrated will have kepts its references and be |
1419 | * restored. | 1429 | * restored. |
1420 | */ | 1430 | */ |
1421 | list_del(&page->lru); | 1431 | // list_del(&page->lru); |
1422 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 1432 | // dec_zone_page_state(page, NR_ISOLATED_ANON + |
1423 | page_is_file_cache(page)); | 1433 | // page_is_file_cache(page)); |
1424 | putback_lru_page(page); | 1434 | // putback_lru_page(page); |
1425 | } | 1435 | // } |
1426 | 1436 | ||
1437 | TRACE_TASK(current, "old page freed\n"); | ||
1427 | /* | 1438 | /* |
1428 | * If migration was not successful and there's a freeing callback, use | 1439 | * If migration was not successful and there's a freeing callback, use |
1429 | * it. Otherwise, putback_lru_page() will drop the reference grabbed | 1440 | * it. Otherwise, putback_lru_page() will drop the reference grabbed |