aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/migrate.h8
-rw-r--r--include/litmus/replicate_lib.h6
-rw-r--r--kernel/sched/litmus.c5
-rw-r--r--litmus/litmus.c60
-rw-r--r--litmus/replicate_lib.c2
-rw-r--r--mm/migrate.c552
6 files changed, 618 insertions, 15 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index cac1c0904d5f..b16047b82472 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -33,6 +33,8 @@ extern int migrate_page(struct address_space *,
33 struct page *, struct page *, enum migrate_mode); 33 struct page *, struct page *, enum migrate_mode);
34extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, 34extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
35 unsigned long private, enum migrate_mode mode, int reason); 35 unsigned long private, enum migrate_mode mode, int reason);
36extern int replicate_pages(struct list_head *l, new_page_t new, free_page_t free,
37 unsigned long private, enum migrate_mode mode, int reason);
36 38
37extern int migrate_prep(void); 39extern int migrate_prep(void);
38extern int migrate_prep_local(void); 40extern int migrate_prep_local(void);
@@ -50,7 +52,11 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
50 free_page_t free, unsigned long private, enum migrate_mode mode, 52 free_page_t free, unsigned long private, enum migrate_mode mode,
51 int reason) 53 int reason)
52 { return -ENOSYS; } 54 { return -ENOSYS; }
53 55static inline int replicate_pages(struct list_head *l, new_page_t new,
56 free_page_t free, unsigned long private, enum migrate_mode mode,
57 int reason)
58 { return -ENOSYS; }
59
54static inline int migrate_prep(void) { return -ENOSYS; } 60static inline int migrate_prep(void) { return -ENOSYS; }
55static inline int migrate_prep_local(void) { return -ENOSYS; } 61static inline int migrate_prep_local(void) { return -ENOSYS; }
56 62
diff --git a/include/litmus/replicate_lib.h b/include/litmus/replicate_lib.h
index af2af36b6b79..480ce4631529 100644
--- a/include/litmus/replicate_lib.h
+++ b/include/litmus/replicate_lib.h
@@ -7,8 +7,12 @@
7 7
8struct shared_lib_page { 8struct shared_lib_page {
9 struct page *p_page; 9 struct page *p_page;
10 unsigned long pfn; 10 struct page *r_page;
11 unsigned long p_pfn;
12 unsigned long r_pfn;
11 struct list_head list; 13 struct list_head list;
12}; 14};
13 15
16extern struct list_head shared_lib_pages;
17
14#endif 18#endif
diff --git a/kernel/sched/litmus.c b/kernel/sched/litmus.c
index 9d58690cf51a..cd36358cee75 100644
--- a/kernel/sched/litmus.c
+++ b/kernel/sched/litmus.c
@@ -20,8 +20,9 @@ static void update_time_litmus(struct rq *rq, struct task_struct *p)
20 /* task counter */ 20 /* task counter */
21 p->se.sum_exec_runtime += delta; 21 p->se.sum_exec_runtime += delta;
22 if (delta) { 22 if (delta) {
23 TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n", 23 //TRACE_TASK(p, "charged %llu exec time (total:%llu, rem:%llu)\n",
24 delta, p->rt_param.job_params.exec_time, budget_remaining(p)); 24 //delta, p->rt_param.job_params.exec_time, budget_remaining(p));
25 ;
25 } 26 }
26 /* sched_clock() */ 27 /* sched_clock() */
27 p->se.exec_start = rq->clock; 28 p->se.exec_start = rq->clock;
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 402c495f62c6..8e7f5e2e68df 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -353,6 +353,10 @@ extern void putback_movable_page(struct page *page);
353extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x); 353extern struct page *new_alloc_page(struct page *page, unsigned long node, int **x);
354 354
355DECLARE_PER_CPU(struct list_head, shared_lib_page_list); 355DECLARE_PER_CPU(struct list_head, shared_lib_page_list);
356#define INVALID_PFN (0xffffffff)
357LIST_HEAD(shared_lib_pages);
358//struct list_head shared_lib_pages = LIST_HEAD_INIT(shared_lib_pages);
359EXPORT_SYMBOL(shared_lib_pages);
356 360
357asmlinkage long sys_set_page_color(int cpu) 361asmlinkage long sys_set_page_color(int cpu)
358{ 362{
@@ -366,8 +370,8 @@ asmlinkage long sys_set_page_color(int cpu)
366 //struct list_head *shared_pagelist = this_cpu_ptr(&shared_lib_page_list); 370 //struct list_head *shared_pagelist = this_cpu_ptr(&shared_lib_page_list);
367 371
368 LIST_HEAD(pagelist); 372 LIST_HEAD(pagelist);
369 LIST_HEAD(shared_pagelist); 373 LIST_HEAD(task_shared_pagelist);
370 374
371 migrate_prep(); 375 migrate_prep();
372 376
373 rcu_read_lock(); 377 rcu_read_lock();
@@ -408,10 +412,36 @@ asmlinkage long sys_set_page_color(int cpu)
408 412
409 if (page_count(old_page) > 2 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) { 413 if (page_count(old_page) > 2 && vma_itr->vm_file != NULL && !(vma_itr->vm_flags&VM_WRITE)) {
410 struct shared_lib_page *lib_page; 414 struct shared_lib_page *lib_page;
411 lib_page = kmalloc(sizeof(struct shared_lib_page), GFP_KERNEL); 415 int is_exist = 0;
412 lib_page->p_page = old_page; 416
413 lib_page->pfn = page_to_pfn(old_page); 417 /* update PSL list */
414 list_add_tail(&lib_page->list, &shared_pagelist); 418 /* check if this page is in the PSL list */
419 rcu_read_lock();
420 list_for_each_entry(lib_page, &shared_lib_pages, list)
421 {
422 if (page_to_pfn(old_page) == lib_page->p_pfn) {
423 is_exist = 1;
424 break;
425 }
426 }
427 rcu_read_unlock();
428
429 if (is_exist == 0) {
430 lib_page = kmalloc(sizeof(struct shared_lib_page), GFP_KERNEL);
431 lib_page->p_page = old_page;
432 lib_page->r_page = NULL;
433 lib_page->p_pfn = page_to_pfn(old_page);
434 lib_page->r_pfn = INVALID_PFN;
435 list_add_tail(&lib_page->list, &shared_lib_pages);
436 TRACE_TASK(current, "NEW PAGE %ld ADDED.\n", lib_page->p_pfn);
437 }
438 else {
439 TRACE_TASK(current, "FOUND PAGE %ld in the list.\n", lib_page->p_pfn);
440 }
441
442 /* add to task_shared_pagelist */
443 list_add_tail(&old_page->lru, &task_shared_pagelist);
444
415 nr_shared_pages++; 445 nr_shared_pages++;
416 TRACE_TASK(current, "SHARED\n"); 446 TRACE_TASK(current, "SHARED\n");
417 } 447 }
@@ -428,7 +458,7 @@ asmlinkage long sys_set_page_color(int cpu)
428 } 458 }
429 //printk(KERN_INFO "PRIVATE _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page)); 459 //printk(KERN_INFO "PRIVATE _mapcount = %d, _count = %d\n", page_mapcount(old_page), page_count(old_page));
430 put_page(old_page); 460 put_page(old_page);
431 TRACE_TASK(current, "PRIVATE\n"); 461 //TRACE_TASK(current, "PRIVATE\n");
432 } 462 }
433 } 463 }
434 TRACE_TASK(current, "PAGES_IN_VMA = %d size = %d KB\n", pages_in_vma, pages_in_vma*4); 464 TRACE_TASK(current, "PAGES_IN_VMA = %d size = %d KB\n", pages_in_vma, pages_in_vma*4);
@@ -454,13 +484,21 @@ asmlinkage long sys_set_page_color(int cpu)
454 if (!list_empty(&pagelist)) { 484 if (!list_empty(&pagelist)) {
455 ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); 485 ret = migrate_pages(&pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
456 TRACE_TASK(current, "%ld pages not migrated.\n", ret); 486 TRACE_TASK(current, "%ld pages not migrated.\n", ret);
457 printk(KERN_INFO "%ld pages not migrated.\n", ret);
458 nr_not_migrated = ret; 487 nr_not_migrated = ret;
459 if (ret) { 488 if (ret) {
460 putback_movable_pages(&pagelist); 489 putback_movable_pages(&pagelist);
461 } 490 }
462 } 491 }
463 492
493 if (!list_empty(&task_shared_pagelist)) {
494 ret = replicate_pages(&task_shared_pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
495 TRACE_TASK(current, "%ld shared pages not migrated.\n", ret);
496 nr_not_migrated += ret;
497 if (ret) {
498 putback_movable_pages(&task_shared_pagelist);
499 }
500 }
501
464 /* handle sigpage and litmus ctrl_page */ 502 /* handle sigpage and litmus ctrl_page */
465/* vma_itr = current->mm->mmap; 503/* vma_itr = current->mm->mmap;
466 while (vma_itr != NULL) { 504 while (vma_itr != NULL) {
@@ -480,14 +518,14 @@ asmlinkage long sys_set_page_color(int cpu)
480 518
481 flush_cache(1); 519 flush_cache(1);
482/* for debug START */ 520/* for debug START */
483 TRACE_TASK(current, "SHARED PAGES\n"); 521 TRACE_TASK(current, "PSL PAGES\n");
484 { 522 {
485 struct shared_lib_page *lpage; 523 struct shared_lib_page *lpage;
486 524
487 rcu_read_lock(); 525 rcu_read_lock();
488 list_for_each_entry(lpage, &shared_pagelist, list) 526 list_for_each_entry(lpage, &shared_lib_pages, list)
489 { 527 {
490 TRACE_TASK(current, "PFN = %ld\n", lpage->pfn); 528 TRACE_TASK(current, "p_PFN = %ld r_PFN = %ld\n", lpage->p_pfn, lpage->r_pfn);
491 } 529 }
492 rcu_read_unlock(); 530 rcu_read_unlock();
493 } 531 }
diff --git a/litmus/replicate_lib.c b/litmus/replicate_lib.c
index 7aa240058ef5..cfc525809412 100644
--- a/litmus/replicate_lib.c
+++ b/litmus/replicate_lib.c
@@ -25,6 +25,8 @@ DEFINE_PER_CPU(struct list_head, shared_lib_page_list);
25#define shared_lib_pages_for(cpu_id) (&per_cpu(shared_lib_page_list, cpu_id)) 25#define shared_lib_pages_for(cpu_id) (&per_cpu(shared_lib_page_list, cpu_id))
26#define local_shared_lib_pages() (this_cpu_ptr(&shared_lib_page_list)) 26#define local_shared_lib_pages() (this_cpu_ptr(&shared_lib_page_list))
27 27
28#define INVALID_PFN (0xffffffff)
29
28static int __init litmus_replicate_lib_init(void) 30static int __init litmus_replicate_lib_init(void)
29{ 31{
30 int cpu, ret = 0; 32 int cpu, ret = 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index f53838fe3dfe..c88f881f2daa 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -38,6 +38,8 @@
38#include <linux/balloon_compaction.h> 38#include <linux/balloon_compaction.h>
39#include <linux/mmu_notifier.h> 39#include <linux/mmu_notifier.h>
40 40
41#include <litmus/litmus.h> // for TRACE_TASK
42
41#include <asm/tlbflush.h> 43#include <asm/tlbflush.h>
42 44
43#define CREATE_TRACE_POINTS 45#define CREATE_TRACE_POINTS
@@ -391,6 +393,106 @@ int migrate_page_move_mapping(struct address_space *mapping,
391} 393}
392 394
393/* 395/*
396 * Replace the page in the mapping.
397 *
398 * The number of remaining references must be:
399 * 1 for anonymous pages without a mapping
400 * 2 for pages with a mapping
401 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
402 */
403int replicate_page_move_mapping(struct address_space *mapping,
404 struct page *newpage, struct page *page,
405 struct buffer_head *head, enum migrate_mode mode,
406 int extra_count)
407{
408 int expected_count = 1 + extra_count;
409 void **pslot;
410
411// if (!mapping) {
412 /* Anonymous page without mapping */
413// if (page_count(page) != expected_count)
414// return -EAGAIN;
415// return MIGRATEPAGE_SUCCESS;
416// }
417
418 TRACE_TASK(current, "page has mapping.\n");
419 spin_lock_irq(&mapping->tree_lock);
420
421 pslot = radix_tree_lookup_slot(&mapping->page_tree,
422 page_index(page));
423
424 expected_count += 1 + page_has_private(page);
425
426 TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page));
427
428 if (page_count(page) != expected_count ||
429 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
430 spin_unlock_irq(&mapping->tree_lock);
431 TRACE_TASK(current, "1\n");
432 return -EAGAIN;
433 }
434
435 if (!page_freeze_refs(page, expected_count)) {
436 spin_unlock_irq(&mapping->tree_lock);
437 TRACE_TASK(current, "2\n");
438 return -EAGAIN;
439 }
440
441 /*
442 * In the async migration case of moving a page with buffers, lock the
443 * buffers using trylock before the mapping is moved. If the mapping
444 * was moved, we later failed to lock the buffers and could not move
445 * the mapping back due to an elevated page count, we would have to
446 * block waiting on other references to be dropped.
447 */
448/* if (mode == MIGRATE_ASYNC && head &&
449 !buffer_migrate_lock_buffers(head, mode)) {
450 page_unfreeze_refs(page, expected_count);
451 spin_unlock_irq(&mapping->tree_lock);
452 TRACE_TASK(current, "3\n");
453 return -EAGAIN;
454 }
455*/
456 /*
457 * Now we know that no one else is looking at the page.
458 */
459 get_page(newpage); /* add cache reference */
460 if (PageSwapCache(page)) {
461 SetPageSwapCache(newpage);
462 set_page_private(newpage, page_private(page));
463 }
464
465 radix_tree_replace_slot(pslot, newpage);
466
467 /*
468 * Drop cache reference from old page by unfreezing
469 * to one less reference.
470 * We know this isn't the last reference.
471 */
472 page_unfreeze_refs(page, expected_count - 1);
473
474 /*
475 * If moved to a different zone then also account
476 * the page for that zone. Other VM counters will be
477 * taken care of when we establish references to the
478 * new page and drop references to the old page.
479 *
480 * Note that anonymous pages are accounted for
481 * via NR_FILE_PAGES and NR_ANON_PAGES if they
482 * are mapped to swap space.
483 */
484 __dec_zone_page_state(page, NR_FILE_PAGES);
485 __inc_zone_page_state(newpage, NR_FILE_PAGES);
486 if (!PageSwapCache(page) && PageSwapBacked(page)) {
487 __dec_zone_page_state(page, NR_SHMEM);
488 __inc_zone_page_state(newpage, NR_SHMEM);
489 }
490 spin_unlock_irq(&mapping->tree_lock);
491
492 return MIGRATEPAGE_SUCCESS;
493}
494
495/*
394 * The expected number of remaining references is the same as that 496 * The expected number of remaining references is the same as that
395 * of migrate_page_move_mapping(). 497 * of migrate_page_move_mapping().
396 */ 498 */
@@ -578,6 +680,23 @@ int migrate_page(struct address_space *mapping,
578} 680}
579EXPORT_SYMBOL(migrate_page); 681EXPORT_SYMBOL(migrate_page);
580 682
683int replicate_page(struct address_space *mapping,
684 struct page *newpage, struct page *page,
685 enum migrate_mode mode)
686{
687 int rc, extra_count = 0;
688
689 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
690 //extra_count = page_count(page) - 2;
691 rc = replicate_page_move_mapping(mapping, newpage, page, NULL, mode, extra_count);
692 TRACE_TASK(current, "replicate_page_move_mapping returned %d\n", rc);
693 if (rc != MIGRATEPAGE_SUCCESS)
694 return rc;
695
696 migrate_page_copy(newpage, page);
697 return MIGRATEPAGE_SUCCESS;
698}
699
581#ifdef CONFIG_BLOCK 700#ifdef CONFIG_BLOCK
582/* 701/*
583 * Migration function for pages with buffers. This function can only be used 702 * Migration function for pages with buffers. This function can only be used
@@ -638,6 +757,60 @@ int buffer_migrate_page(struct address_space *mapping,
638EXPORT_SYMBOL(buffer_migrate_page); 757EXPORT_SYMBOL(buffer_migrate_page);
639#endif 758#endif
640 759
760int replicate_buffer_page(struct address_space *mapping,
761 struct page *newpage, struct page *page, enum migrate_mode mode)
762{
763 struct buffer_head *bh, *head;
764 int rc;
765
766 if (!page_has_buffers(page)) {
767 TRACE_TASK(current, "page does not have buffers\n");
768 return replicate_page(mapping, newpage, page, mode);
769 }
770
771 head = page_buffers(page);
772
773 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
774
775 if (rc != MIGRATEPAGE_SUCCESS)
776 return rc;
777
778 /*
779 * In the async case, migrate_page_move_mapping locked the buffers
780 * with an IRQ-safe spinlock held. In the sync case, the buffers
781 * need to be locked now
782 */
783 if (mode != MIGRATE_ASYNC)
784 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
785
786 ClearPagePrivate(page);
787 set_page_private(newpage, page_private(page));
788 set_page_private(page, 0);
789 put_page(page);
790 get_page(newpage);
791
792 bh = head;
793 do {
794 set_bh_page(bh, newpage, bh_offset(bh));
795 bh = bh->b_this_page;
796
797 } while (bh != head);
798
799 SetPagePrivate(newpage);
800
801 migrate_page_copy(newpage, page);
802
803 bh = head;
804 do {
805 unlock_buffer(bh);
806 put_bh(bh);
807 bh = bh->b_this_page;
808
809 } while (bh != head);
810
811 return MIGRATEPAGE_SUCCESS;
812}
813
641/* 814/*
642 * Writeback a page to clean the dirty state 815 * Writeback a page to clean the dirty state
643 */ 816 */
@@ -763,6 +936,74 @@ static int move_to_new_page(struct page *newpage, struct page *page,
763 return rc; 936 return rc;
764} 937}
765 938
939/*
940 * Copy a page to a newly allocated page
941 * The page is locked and all ptes have been successfully removed.
942 *
943 * The new page will have replaced the old page if this function
944 * is successful.
945 *
946 * Return value:
947 * < 0 - error code
948 * MIGRATEPAGE_SUCCESS - success
949 */
950static int copy_to_new_page(struct page *newpage, struct page *page,
951 int page_was_mapped, enum migrate_mode mode,
952 int has_replica)
953{
954 struct address_space *mapping;
955 int rc;
956
957 /*
958 * Block others from accessing the page when we get around to
959 * establishing additional references. We are the only one
960 * holding a reference to the new page at this point.
961 */
962 if (!trylock_page(newpage))
963 BUG();
964
965 /* Prepare mapping for the new page.*/
966 newpage->index = page->index;
967 newpage->mapping = page->mapping;
968 if (PageSwapBacked(page))
969 SetPageSwapBacked(newpage);
970
971 mapping = page_mapping(page);
972 if (!mapping) {
973 rc = migrate_page(mapping, newpage, page, mode);
974 }
975 else if (mapping->a_ops->migratepage) {
976 TRACE_TASK(current, "ops migration callback\n");
977 /*
978 * Most pages have a mapping and most filesystems provide a
979 * migratepage callback. Anonymous pages are part of swap
980 * space which also has its own migratepage callback. This
981 * is the most common path for page migration.
982 */
983 //rc = mapping->a_ops->migratepage(mapping,
984 // newpage, page, mode);
985 rc = replicate_buffer_page(mapping, newpage, page, mode);
986 }
987 else {
988 TRACE_TASK(current, "fallback function\n");
989 rc = fallback_migrate_page(mapping, newpage, page, mode);
990 }
991
992 if (rc != MIGRATEPAGE_SUCCESS) {
993 newpage->mapping = NULL;
994 } else {
995 mem_cgroup_migrate(page, newpage, false);
996 if (page_was_mapped)
997 remove_migration_ptes(page, newpage);
998 page->mapping = NULL;
999 }
1000
1001 unlock_page(newpage);
1002
1003 return rc;
1004}
1005
1006
766static int __unmap_and_move(struct page *page, struct page *newpage, 1007static int __unmap_and_move(struct page *page, struct page *newpage,
767 int force, enum migrate_mode mode) 1008 int force, enum migrate_mode mode)
768{ 1009{
@@ -901,6 +1142,146 @@ out:
901 return rc; 1142 return rc;
902} 1143}
903 1144
1145static int __unmap_and_copy(struct page *page, struct page *newpage,
1146 int force, enum migrate_mode mode, int has_replica)
1147{
1148 int rc = -EAGAIN;
1149 int page_was_mapped = 0;
1150 struct anon_vma *anon_vma = NULL;
1151
1152 if (!trylock_page(page)) {
1153 if (!force || mode == MIGRATE_ASYNC)
1154 goto out;
1155
1156 /*
1157 * It's not safe for direct compaction to call lock_page.
1158 * For example, during page readahead pages are added locked
1159 * to the LRU. Later, when the IO completes the pages are
1160 * marked uptodate and unlocked. However, the queueing
1161 * could be merging multiple pages for one bio (e.g.
1162 * mpage_readpages). If an allocation happens for the
1163 * second or third page, the process can end up locking
1164 * the same page twice and deadlocking. Rather than
1165 * trying to be clever about what pages can be locked,
1166 * avoid the use of lock_page for direct compaction
1167 * altogether.
1168 */
1169 if (current->flags & PF_MEMALLOC)
1170 goto out;
1171
1172 lock_page(page);
1173 }
1174
1175 if (PageWriteback(page)) {
1176 /*
1177 * Only in the case of a full synchronous migration is it
1178 * necessary to wait for PageWriteback. In the async case,
1179 * the retry loop is too short and in the sync-light case,
1180 * the overhead of stalling is too much
1181 */
1182 if (mode != MIGRATE_SYNC) {
1183 rc = -EBUSY;
1184 goto out_unlock;
1185 }
1186 if (!force)
1187 goto out_unlock;
1188 wait_on_page_writeback(page);
1189 }
1190 /*
1191 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1192 * we cannot notice that anon_vma is freed while we migrates a page.
1193 * This get_anon_vma() delays freeing anon_vma pointer until the end
1194 * of migration. File cache pages are no problem because of page_lock()
1195 * File Caches may use write_page() or lock_page() in migration, then,
1196 * just care Anon page here.
1197 */
1198 if (PageAnon(page) && !PageKsm(page)) {
1199 /*
1200 * Only page_lock_anon_vma_read() understands the subtleties of
1201 * getting a hold on an anon_vma from outside one of its mms.
1202 */
1203 anon_vma = page_get_anon_vma(page);
1204 if (anon_vma) {
1205 /*
1206 * Anon page
1207 */
1208 } else if (PageSwapCache(page)) {
1209 /*
1210 * We cannot be sure that the anon_vma of an unmapped
1211 * swapcache page is safe to use because we don't
1212 * know in advance if the VMA that this page belonged
1213 * to still exists. If the VMA and others sharing the
1214 * data have been freed, then the anon_vma could
1215 * already be invalid.
1216 *
1217 * To avoid this possibility, swapcache pages get
1218 * migrated but are not remapped when migration
1219 * completes
1220 */
1221 } else {
1222 goto out_unlock;
1223 }
1224 }
1225
1226 if (unlikely(isolated_balloon_page(page))) {
1227 /*
1228 * A ballooned page does not need any special attention from
1229 * physical to virtual reverse mapping procedures.
1230 * Skip any attempt to unmap PTEs or to remap swap cache,
1231 * in order to avoid burning cycles at rmap level, and perform
1232 * the page migration right away (proteced by page lock).
1233 */
1234 rc = balloon_page_migrate(newpage, page, mode);
1235 goto out_unlock;
1236 }
1237
1238 /*
1239 * Corner case handling:
1240 * 1. When a new swap-cache page is read into, it is added to the LRU
1241 * and treated as swapcache but it has no rmap yet.
1242 * Calling try_to_unmap() against a page->mapping==NULL page will
1243 * trigger a BUG. So handle it here.
1244 * 2. An orphaned page (see truncate_complete_page) might have
1245 * fs-private metadata. The page can be picked up due to memory
1246 * offlining. Everywhere else except page reclaim, the page is
1247 * invisible to the vm, so the page can not be migrated. So try to
1248 * free the metadata, so the page can be freed.
1249 */
1250 if (!page->mapping) {
1251 VM_BUG_ON_PAGE(PageAnon(page), page);
1252 if (page_has_private(page)) {
1253 try_to_free_buffers(page);
1254 goto out_unlock;
1255 }
1256 goto skip_unmap;
1257 }
1258
1259 /* Establish migration ptes or remove ptes */
1260 if (page_mapped(page)) {
1261 try_to_unmap(page,
1262 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1263 page_was_mapped = 1;
1264 }
1265
1266skip_unmap:
1267 if (!page_mapped(page)) {
1268 TRACE_TASK(current, "Call copy_to_new_page\n");
1269 rc = copy_to_new_page(newpage, page, page_was_mapped, mode, has_replica);
1270 }
1271
1272 if (rc && page_was_mapped)
1273 remove_migration_ptes(page, page);
1274
1275 /* Drop an anon_vma reference if we took one */
1276 if (anon_vma)
1277 put_anon_vma(anon_vma);
1278
1279out_unlock:
1280 unlock_page(page);
1281out:
1282 return rc;
1283}
1284
904/* 1285/*
905 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work 1286 * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
906 * around it. 1287 * around it.
@@ -976,6 +1357,97 @@ out:
976} 1357}
977 1358
978/* 1359/*
1360 * Obtain the lock on page, remove all ptes.
1361 * 1) If r_pfn == INVALID_PFN, then copy the page to the newly allocated page in newpage.
1362 * 2) If r_pfn != INVALID_PFN, then unmap and modify ptes.
1363 */
1364#include <litmus/replicate_lib.h>
1365
1366static ICE_noinline int unmap_and_copy(new_page_t get_new_page,
1367 free_page_t put_new_page,
1368 unsigned long private, struct page *page,
1369 int force, enum migrate_mode mode)
1370{
1371 int rc = 0;
1372 int *result = NULL;
1373 struct page *newpage;
1374 struct shared_lib_page *lib_page;
1375 int is_exist_in_psl = 0, has_replica = 0;
1376
1377 /* check if this page is in the PSL list */
1378 rcu_read_lock();
1379 list_for_each_entry(lib_page, &shared_lib_pages, list)
1380 {
1381 if (page_to_pfn(page) == lib_page->p_pfn) {
1382 is_exist_in_psl = 1;
1383 break;
1384 }
1385 }
1386 rcu_read_unlock();
1387
1388 if (is_exist_in_psl)
1389 TRACE_TASK(current, "Page %ld exists in PSL list\n", lib_page->p_pfn);
1390
1391 if (lib_page->r_page == NULL) {
1392 newpage = get_new_page(page, private, &result);
1393 if (!newpage)
1394 return -ENOMEM;
1395 }
1396 else {
1397 newpage = lib_page->r_page;
1398 has_replica = 1;
1399 }
1400
1401 if (page_count(page) == 1) {
1402 /* page was freed from under us. So we are done. */
1403 TRACE_TASK(current, "page %ld _count == 1\n", page_to_pfn(page));
1404 goto out;
1405 }
1406
1407 if (unlikely(PageTransHuge(page)))
1408 if (unlikely(split_huge_page(page)))
1409 goto out;
1410
1411 rc = __unmap_and_copy(page, newpage, force, mode, has_replica);
1412
1413out:
1414 if (rc != -EAGAIN) {
1415 /*
1416 * A page that has been migrated has all references
1417 * removed and will be freed. A page that has not been
1418 * migrated will have kepts its references and be
1419 * restored.
1420 */
1421 list_del(&page->lru);
1422 dec_zone_page_state(page, NR_ISOLATED_ANON +
1423 page_is_file_cache(page));
1424 putback_lru_page(page);
1425 }
1426
1427 /*
1428 * If migration was not successful and there's a freeing callback, use
1429 * it. Otherwise, putback_lru_page() will drop the reference grabbed
1430 * during isolation.
1431 */
1432 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
1433 ClearPageSwapBacked(newpage);
1434 put_new_page(newpage, private);
1435 } else if (unlikely(__is_movable_balloon_page(newpage))) {
1436 /* drop our reference, page already in the balloon */
1437 put_page(newpage);
1438 } else
1439 putback_lru_page(newpage);
1440
1441 if (result) {
1442 if (rc)
1443 *result = rc;
1444 else
1445 *result = page_to_nid(newpage);
1446 }
1447 return rc;
1448}
1449
1450/*
979 * Counterpart of unmap_and_move_page() for hugepage migration. 1451 * Counterpart of unmap_and_move_page() for hugepage migration.
980 * 1452 *
981 * This function doesn't wait the completion of hugepage I/O 1453 * This function doesn't wait the completion of hugepage I/O
@@ -1159,6 +1631,86 @@ out:
1159 return rc; 1631 return rc;
1160} 1632}
1161 1633
1634/*
1635 * replicate_pages - replicate the pages specified in a list
1636 *
1637 * @from: The list of pages to be migrated.
1638 * @get_new_page: The function used to allocate free pages to be used
1639 * if there is no replicated page.
1640 * @put_new_page: The function used to free target pages if migration
1641 * fails, or NULL if no special handling is necessary.
1642 * @private: Private data to be passed on to get_new_page()
1643 * @mode: The migration mode that specifies the constraints for
1644 * page migration, if any.
1645 * @reason: The reason for page migration.
1646 *
1647 * The function returns after 10 attempts or if no pages are movable any more
1648 * because the list has become empty or no retryable pages exist any more.
1649 * The caller should call putback_lru_pages() to return pages to the LRU
1650 * or free list only if ret != 0.
1651 *
1652 * Returns the number of pages that were not migrated, or an error code.
1653 */
1654int replicate_pages(struct list_head *from, new_page_t get_new_page,
1655 free_page_t put_new_page, unsigned long private,
1656 enum migrate_mode mode, int reason)
1657{
1658 int retry = 1;
1659 int nr_failed = 0;
1660 int nr_succeeded = 0;
1661 int pass = 0;
1662 struct page *page;
1663 struct page *page2;
1664 int swapwrite = current->flags & PF_SWAPWRITE;
1665 int rc;
1666
1667 if (!swapwrite)
1668 current->flags |= PF_SWAPWRITE;
1669
1670 for(pass = 0; pass < 10 && retry; pass++) {
1671 retry = 0;
1672
1673 list_for_each_entry_safe(page, page2, from, lru) {
1674 cond_resched();
1675
1676 rc = unmap_and_copy(get_new_page, put_new_page, private, page, pass > 2, mode);
1677 TRACE_TASK(current, "rc = %d\n", rc);
1678
1679 switch(rc) {
1680 case -ENOMEM:
1681 goto out;
1682 case -EAGAIN:
1683 retry++;
1684 break;
1685 case MIGRATEPAGE_SUCCESS:
1686 nr_succeeded++;
1687 break;
1688 default:
1689 /*
1690 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1691 * unlike -EAGAIN case, the failed page is
1692 * removed from migration page list and not
1693 * retried in the next outer loop.
1694 */
1695 nr_failed++;
1696 break;
1697 }
1698 }
1699 }
1700 rc = nr_failed + retry;
1701out:
1702 if (nr_succeeded)
1703 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1704 if (nr_failed)
1705 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1706 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1707
1708 if (!swapwrite)
1709 current->flags &= ~PF_SWAPWRITE;
1710
1711 return rc;
1712}
1713
1162#ifdef CONFIG_NUMA 1714#ifdef CONFIG_NUMA
1163/* 1715/*
1164 * Move a list of individual pages 1716 * Move a list of individual pages