From cbe4c29edb5e783a5839d72705c18e5a795c4e05 Mon Sep 17 00:00:00 2001 From: Namhoon Kim Date: Sun, 25 Sep 2016 16:31:19 -0400 Subject: Fix isolrate_lru_page for shared pages --- litmus/litmus.c | 54 ++++++++++++++++++++++++++++++++++++++---------------- mm/filemap.c | 5 +++++ mm/migrate.c | 12 +++++++----- 3 files changed, 50 insertions(+), 21 deletions(-) diff --git a/litmus/litmus.c b/litmus/litmus.c index ad3d50c78bb1..f88cd16ab86d 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -440,10 +440,16 @@ asmlinkage long sys_set_page_color(int cpu) } /* add to task_shared_pagelist */ - list_add_tail(&old_page->lru, &task_shared_pagelist); - - nr_shared_pages++; - TRACE_TASK(current, "SHARED\n"); + ret = isolate_lru_page(old_page); + if (!ret) { + list_add_tail(&old_page->lru, &task_shared_pagelist); + inc_zone_page_state(old_page, NR_ISOLATED_ANON + !PageSwapBacked(old_page)); + nr_shared_pages++; + TRACE_TASK(current, "SHARED isolate_lur_page success\n"); + } else { + TRACE_TASK(current, "SHARED isolate_lru_page failed\n"); + } + put_page(old_page); } else { ret = isolate_lru_page(old_page); @@ -489,7 +495,7 @@ asmlinkage long sys_set_page_color(int cpu) putback_movable_pages(&pagelist); } } - +/* { struct list_head *pos, *q; list_for_each_safe(pos, q, &task_shared_pagelist) { @@ -504,7 +510,7 @@ asmlinkage long sys_set_page_color(int cpu) } } } - +*/ if (!list_empty(&task_shared_pagelist)) { ret = replicate_pages(&task_shared_pagelist, new_alloc_page, NULL, node, MIGRATE_SYNC, MR_SYSCALL); TRACE_TASK(current, "%ld shared pages not migrated.\n", ret); @@ -590,12 +596,14 @@ asmlinkage long sys_test_call(unsigned int param) down_read(¤t->mm->mmap_sem); vma_itr = current->mm->mmap; while (vma_itr != NULL) { - printk(KERN_INFO "--------------------------------------------\n"); - printk(KERN_INFO "vm_start : %lx\n", vma_itr->vm_start); - printk(KERN_INFO "vm_end : %lx\n", vma_itr->vm_end); - printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags); - printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot)); - printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED); + int i, num_pages; + struct page* old_page; + TRACE_TASK(current, "--------------------------------------------\n"); + TRACE_TASK(current, "vm_start : %lx\n", vma_itr->vm_start); + TRACE_TASK(current, "vm_end : %lx\n", vma_itr->vm_end); + TRACE_TASK(current, "vm_flags : %lx\n", vma_itr->vm_flags); + TRACE_TASK(current, "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot)); + TRACE_TASK(current, "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED); /* if (vma_itr->vm_file) { struct file *fp = vma_itr->vm_file; unsigned long fcount = atomic_long_read(&(fp->f_count)); @@ -606,14 +614,28 @@ asmlinkage long sys_test_call(unsigned int param) } printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot)); */ + num_pages = (vma_itr->vm_end - vma_itr->vm_start) / PAGE_SIZE; + for (i = 0; i < num_pages; i++) { + old_page = follow_page(vma_itr, vma_itr->vm_start + PAGE_SIZE*i, FOLL_GET|FOLL_SPLIT); + + if (IS_ERR(old_page)) + continue; + if (!old_page) + continue; + + if (PageReserved(old_page)) { + TRACE("Reserved Page!\n"); + put_page(old_page); + continue; + } + + TRACE_TASK(current, "addr: %08x, pfn: %ld, _mapcount: %d, _count: %d flags: %s%s%s\n", vma_itr->vm_start + PAGE_SIZE*i, page_to_pfn(old_page), page_mapcount(old_page), page_count(old_page), vma_itr->vm_flags&VM_READ?"r":"-", vma_itr->vm_flags&VM_WRITE?"w":"-", vma_itr->vm_flags&VM_EXEC?"x":"-"); + put_page(old_page); + } vma_itr = vma_itr->vm_next; } printk(KERN_INFO "--------------------------------------------\n"); up_read(¤t->mm->mmap_sem); - - local_irq_save(flags); - l2c310_flush_all(); - local_irq_restore(flags); } else if (param == 1) { int i; diff --git a/mm/filemap.c b/mm/filemap.c index 8f378ac675d7..8ea609718839 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -36,6 +36,9 @@ #include #include "internal.h" +#include +#include + #define CREATE_TRACE_POINTS #include @@ -1885,6 +1888,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* * Do we have something in the page cache already? */ +if (is_realtime(current)) + printk("FILEMAP_FAULT %ld\n", vma->vm_start); page = find_get_page(mapping, offset); if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { /* diff --git a/mm/migrate.c b/mm/migrate.c index 69687ab4277f..ee6732e1d590 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -418,7 +418,6 @@ int replicate_page_move_mapping(struct address_space *mapping, TRACE_TASK(current, "page_count(page) = %d, expected_count = %d, page_has_private? %d\n", page_count(page), expected_count, page_has_private(page)); - expected_count++; if (page_count(page) != expected_count || radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { spin_unlock_irq(&mapping->tree_lock); @@ -464,9 +463,8 @@ int replicate_page_move_mapping(struct address_space *mapping, * to one less reference. * We know this isn't the last reference. */ - //page_unfreeze_refs(page, expected_count - 1); - page_unfreeze_refs(page, expected_count - 2); - + page_unfreeze_refs(page, expected_count - 1); + /* * If moved to a different zone then also account * the page for that zone. Other VM counters will be @@ -1184,6 +1182,8 @@ static int __unmap_and_copy(struct page *page, struct page *newpage, * the retry loop is too short and in the sync-light case, * the overhead of stalling is too much */ + BUG(); + /* if (mode != MIGRATE_SYNC) { rc = -EBUSY; goto out_unlock; @@ -1191,6 +1191,7 @@ static int __unmap_and_copy(struct page *page, struct page *newpage, if (!force) goto out_unlock; wait_on_page_writeback(page); + */ } /* * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, @@ -1683,7 +1684,8 @@ int replicate_pages(struct list_head *from, new_page_t get_new_page, list_for_each_entry_safe(page, page2, from, lru) { cond_resched(); - + + TRACE_TASK(current, "PageAnon=%d\n", PageAnon(page)); rc = unmap_and_copy(get_new_page, put_new_page, private, page, pass > 2, mode); TRACE_TASK(current, "rc = %d\n", rc); -- cgit v1.2.2