aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2016-09-25 16:31:19 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2016-09-25 16:31:19 -0400
commitcbe4c29edb5e783a5839d72705c18e5a795c4e05 (patch)
tree5f1b36d0a82f82351fdfa3243ea769e8c83d65a5
parentba1e5e20e2a3481b973244e84597ed246a04fed0 (diff)
Fix isolrate_lru_page for shared pages
-rw-r--r--litmus/litmus.c54
-rw-r--r--mm/filemap.c5
-rw-r--r--mm/migrate.c12
3 files changed, 50 insertions, 21 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
index ad3d50c78bb1..f88cd16ab86d 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -440,10 +440,16 @@ asmlinkage long sys_set_page_color(int cpu)
440 } 440 }
441 441
442 /* add to task_shared_pagelist */ 442 /* add to task_shared_pagelist */
443 list_add_tail(&old_page->lru, &task_shared_pagelist); 443 ret = isolate_lru_page(old_page);
444 444 if (!ret) {
445 nr_shared_pages++; 445 list_add_tail(&old_page->lru, &task_shared_pagelist);
446 TRACE_TASK(current, "SHARED\n"); 446 inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page));
447 nr_shared_pages++;
448 TRACE_TASK(current, "SHARED isolate_lur_page success\n");
449 } else {
450 TRACE_TASK(current, "SHARED isolate_lru_page failed\n");
451 }
452 put_page(old_page);
447 } 453 }
448 else { 454 else {
449 ret = isolate_lru_page(old_page); 455 ret = isolate_lru_page(old_page);
@@ -489,7 +495,7 @@ asmlinkage long sys_set_page_color(int cpu)
489 putback_movable_pages(&pagelist); 495 putback_movable_pages(&pagelist);
490 } 496 }
491 } 497 }
492 498/*
493 { 499 {
494 struct list_head *pos, *q; 500 struct list_head *pos, *q;
495 list_for_each_safe(pos, q, &task_shared_pagelist) { 501 list_for_each_safe(pos, q, &task_shared_pagelist) {
@@ -504,7 +510,7 @@ asmlinkage long sys_set_page_color(int cpu)
504 } 510 }
505 } 511 }
506 } 512 }
507 513*/
508 if (!list_empty(&task_shared_pagelist)) { 514 if (!list_empty(&task_shared_pagelist)) {
509 ret = replicate_pages(&task_shared_pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); 515 ret = replicate_pages(&task_shared_pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL);
510 TRACE_TASK(current, "%ld shared pages not migrated.\n", ret); 516 TRACE_TASK(current, "%ld shared pages not migrated.\n", ret);
@@ -590,12 +596,14 @@ asmlinkage long sys_test_call(unsigned int param)
590 down_read(&current->mm->mmap_sem); 596 down_read(&current->mm->mmap_sem);
591 vma_itr = current->mm->mmap; 597 vma_itr = current->mm->mmap;
592 while (vma_itr != NULL) { 598 while (vma_itr != NULL) {
593 printk(KERN_INFO "--------------------------------------------\n"); 599 int i, num_pages;
594 printk(KERN_INFO "vm_start : %lx\n", vma_itr->vm_start); 600 struct page* old_page;
595 printk(KERN_INFO "vm_end : %lx\n", vma_itr->vm_end); 601 TRACE_TASK(current, "--------------------------------------------\n");
596 printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags); 602 TRACE_TASK(current, "vm_start : %lx\n", vma_itr->vm_start);
597 printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot)); 603 TRACE_TASK(current, "vm_end : %lx\n", vma_itr->vm_end);
598 printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED); 604 TRACE_TASK(current, "vm_flags : %lx\n", vma_itr->vm_flags);
605 TRACE_TASK(current, "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot));
606 TRACE_TASK(current, "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED);
599 /* if (vma_itr->vm_file) { 607 /* if (vma_itr->vm_file) {
600 struct file *fp = vma_itr->vm_file; 608 struct file *fp = vma_itr->vm_file;
601 unsigned long fcount = atomic_long_read(&(fp->f_count)); 609 unsigned long fcount = atomic_long_read(&(fp->f_count));
@@ -606,14 +614,28 @@ asmlinkage long sys_test_call(unsigned int param)
606 } 614 }
607 printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot)); 615 printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot));
608 */ 616 */
617 num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE;
618 for (i = 0; i < num_pages; i++) {
619 old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT);
620
621 if (IS_ERR(old_page))
622 continue;
623 if (!old_page)
624 continue;
625
626 if (PageReserved(old_page)) {
627 TRACE("Reserved Page!\n");
628 put_page(old_page);
629 continue;
630 }
631
632 TRACE_TASK(current, "addr: %08x, pfn: %ld, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-");
633 put_page(old_page);
634 }
609 vma_itr = vma_itr->vm_next; 635 vma_itr = vma_itr->vm_next;
610 } 636 }
611 printk(KERN_INFO "--------------------------------------------\n"); 637 printk(KERN_INFO "--------------------------------------------\n");
612 up_read(&current->mm->mmap_sem); 638 up_read(&current->mm->mmap_sem);
613
614 local_irq_save(flags);
615 l2c310_flush_all();
616 local_irq_restore(flags);
617 } 639 }
618 else if (param == 1) { 640 else if (param == 1) {
619 int i; 641 int i;
diff --git a/mm/filemap.c b/mm/filemap.c
index 8f378ac675d7..8ea609718839 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -36,6 +36,9 @@
36#include <linux/rmap.h> 36#include <linux/rmap.h>
37#include "internal.h" 37#include "internal.h"
38 38
39#include <litmus/litmus.h>
40#include <litmus/mc2_common.h>
41
39#define CREATE_TRACE_POINTS 42#define CREATE_TRACE_POINTS
40#include <trace/events/filemap.h> 43#include <trace/events/filemap.h>
41 44
@@ -1885,6 +1888,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1885 /* 1888 /*
1886 * Do we have something in the page cache already? 1889 * Do we have something in the page cache already?
1887 */ 1890 */
1891if (is_realtime(current))
1892 printk("FILEMAP_FAULT %ld\n", vma->vm_start);
1888 page = find_get_page(mapping, offset); 1893 page = find_get_page(mapping, offset);
1889 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 1894 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
1890 /* 1895 /*
diff --git a/mm/migrate.c b/mm/migrate.c
index 69687ab4277f..ee6732e1d590 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -418,7 +418,6 @@ int replicate_page_move_mapping(struct address_space *mapping,
418 418
419 TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page)); 419 TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page));
420 420
421 expected_count++;
422 if (page_count(page) != expected_count || 421 if (page_count(page) != expected_count ||
423 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 422 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
424 spin_unlock_irq(&mapping->tree_lock); 423 spin_unlock_irq(&mapping->tree_lock);
@@ -464,9 +463,8 @@ int replicate_page_move_mapping(struct address_space *mapping,
464 * to one less reference. 463 * to one less reference.
465 * We know this isn't the last reference. 464 * We know this isn't the last reference.
466 */ 465 */
467 //page_unfreeze_refs(page, expected_count - 1); 466 page_unfreeze_refs(page, expected_count - 1);
468 page_unfreeze_refs(page, expected_count - 2); 467
469
470 /* 468 /*
471 * If moved to a different zone then also account 469 * If moved to a different zone then also account
472 * the page for that zone. Other VM counters will be 470 * the page for that zone. Other VM counters will be
@@ -1184,6 +1182,8 @@ static int __unmap_and_copy(struct page *page, struct page *newpage,
1184 * the retry loop is too short and in the sync-light case, 1182 * the retry loop is too short and in the sync-light case,
1185 * the overhead of stalling is too much 1183 * the overhead of stalling is too much
1186 */ 1184 */
1185 BUG();
1186 /*
1187 if (mode != MIGRATE_SYNC) { 1187 if (mode != MIGRATE_SYNC) {
1188 rc = -EBUSY; 1188 rc = -EBUSY;
1189 goto out_unlock; 1189 goto out_unlock;
@@ -1191,6 +1191,7 @@ static int __unmap_and_copy(struct page *page, struct page *newpage,
1191 if (!force) 1191 if (!force)
1192 goto out_unlock; 1192 goto out_unlock;
1193 wait_on_page_writeback(page); 1193 wait_on_page_writeback(page);
1194 */
1194 } 1195 }
1195 /* 1196 /*
1196 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 1197 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
@@ -1683,7 +1684,8 @@ int replicate_pages(struct list_head *from, new_page_t get_new_page,
1683 1684
1684 list_for_each_entry_safe(page, page2, from, lru) { 1685 list_for_each_entry_safe(page, page2, from, lru) {
1685 cond_resched(); 1686 cond_resched();
1686 1687
1688 TRACE_TASK(current, "PageAnon=%d\n", PageAnon(page));
1687 rc = unmap_and_copy(get_new_page, put_new_page, private, page, pass > 2, mode); 1689 rc = unmap_and_copy(get_new_page, put_new_page, private, page, pass > 2, mode);
1688 TRACE_TASK(current, "rc = %d\n", rc); 1690 TRACE_TASK(current, "rc = %d\n", rc);
1689 1691