From bf554059414a34dd17cd08a9c6bc6cfafa9ac717 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Wed, 2 May 2012 18:48:03 -0400 Subject: use remap_pfn_range and automatic reclaiming --- include/litmus/color.h | 5 ++- litmus/color.c | 111 ++++++++++++++++++++++++++----------------------- litmus/color_dev.c | 23 ++++++---- litmus/color_proc.c | 10 ----- 4 files changed, 79 insertions(+), 70 deletions(-) diff --git a/include/litmus/color.h b/include/litmus/color.h index 9c641c884ba2..998af33cd3ea 100644 --- a/include/litmus/color.h +++ b/include/litmus/color.h @@ -11,9 +11,10 @@ struct color_cache_info { unsigned long sets; }; -void add_page_to_color_list(struct page*); -void add_page_to_alloced_list(struct page*); struct page* get_colored_page(unsigned long); +void add_page_to_color_list(struct page*); +void add_page_to_alloced_list(struct page*, struct vm_area_struct*); +void reclaim_pages(struct vm_area_struct*); int color_add_pages_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); diff --git a/litmus/color.c b/litmus/color.c index ae6c3f6b8206..93d12a718543 100644 --- a/litmus/color.c +++ b/litmus/color.c @@ -27,11 +27,6 @@ struct color_group { atomic_t nr_pages; }; -static struct alloced_pages { - spinlock_t lock; - struct list_head list; -} alloced_pages; - static unsigned long color_mask; static struct color_group *color_groups; @@ -42,40 +37,31 @@ unsigned long nr_colors; struct color_cache_info color_cache_info; #endif int color_sysctl_add_pages_data; -int color_sysctl_reclaim_pages_data; static inline unsigned long page_color(struct page *page) { return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT); } +/* + * Page's count should be one, it sould not be on any LRU list. + */ void add_page_to_color_list(struct page *page) { const unsigned long color = page_color(page); struct color_group *cgroup = &color_groups[color]; BUG_ON(in_list(&page->lru) || PageLRU(page)); - BUG_ON(page_mapped(page) || page_count(page) > 1); + BUG_ON(page_count(page) > 1); spin_lock(&cgroup->lock); list_add_tail(&page->lru, &cgroup->list); atomic_inc(&cgroup->nr_pages); SetPageLRU(page); - SetPageReserved(page); spin_unlock(&cgroup->lock); } -void add_page_to_alloced_list(struct page *page) -{ - BUG_ON(in_list(&page->lru) || PageLRU(page)); - //BUG_ON(!page_mapped(page) || page_count(page) < 2); - TRACE_CUR("pfn:%d page_mapped:%d page_count:%d\n", - page_to_pfn(page), page_mapped(page), - page_count(page)); - spin_lock(&alloced_pages.lock); - list_add_tail(&page->lru, &alloced_pages.list); - SetPageLRU(page); - spin_unlock(&alloced_pages.lock); -} - +/* + * Increase's page's count to two. + */ struct page* get_colored_page(unsigned long color) { struct color_group *cgroup; @@ -92,7 +78,8 @@ struct page* get_colored_page(unsigned long color) goto out_unlock; } page = list_first_entry(&cgroup->list, struct page, lru); - BUG_ON(page_mapped(page) || page_count(page) > 1); + BUG_ON(page_count(page) > 1); + get_page(page); list_del(&page->lru); atomic_dec(&cgroup->nr_pages); ClearPageLRU(page); @@ -136,9 +123,10 @@ static int do_add_pages(void) goto out; } color = page_color(page); - if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) + if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { + SetPageReserved(page); add_page_to_color_list(page); - else + } else list_add_tail(&page->lru, &free_later); } list_for_each_entry_safe(page, page_tmp, &free_later, lru) { @@ -149,29 +137,63 @@ out: return ret; } -static int do_reclaim_pages(void) +static struct alloced_pages { + spinlock_t lock; + struct list_head list; +} alloced_pages; + +struct alloced_page { + struct page *page; + struct vm_area_struct *vma; + struct list_head list; +}; + +static struct alloced_page * new_alloced_page(struct page *page, + struct vm_area_struct *vma) { - struct page *page, *page_tmp; + struct alloced_page *ap = kmalloc(sizeof(*ap), GFP_KERNEL); + INIT_LIST_HEAD(&ap->list); + ap->page = page; + ap->vma = vma; + return ap; +} + +/* + * Page's count should be two or more. It should not be on aly LRU list. + */ +void add_page_to_alloced_list(struct page *page, struct vm_area_struct *vma) +{ + struct alloced_page *ap; + + BUG_ON(page_count(page) < 2); + ap = new_alloced_page(page, vma); + spin_lock(&alloced_pages.lock); + list_add_tail(&ap->list, &alloced_pages.list); + spin_unlock(&alloced_pages.lock); +} + +/* + * Reclaim pages. + */ +void reclaim_pages(struct vm_area_struct *vma) +{ + struct alloced_page *ap, *ap_tmp; unsigned long nr_reclaimed = 0; spin_lock(&alloced_pages.lock); - list_for_each_entry_safe(page, page_tmp, &alloced_pages.list, lru) { - TRACE_CUR("pfn:%8lu page_mapped:%d page_count:%d\n", - page_to_pfn(page), page_mapped(page), - page_count(page)); - if (1 == page_count(page) && !page_mapped(page)) { - list_del(&page->lru); - ClearPageLRU(page); - add_page_to_color_list(page); + list_for_each_entry_safe(ap, ap_tmp, &alloced_pages.list, list) { + if (vma == ap->vma) { + list_del(&ap->list); + put_page(ap->page); + add_page_to_color_list(ap->page); nr_reclaimed++; TRACE_CUR("reclaiming page (pa:0x%10llx, pfn:%8lu, " - "color:%3lu)\n", page_to_phys(page), - page_to_pfn(page), page_color(page)); + "color:%3lu)\n", page_to_phys(ap->page), + page_to_pfn(ap->page), page_color(ap->page)); + kfree(ap); } - } spin_unlock(&alloced_pages.lock); TRACE_CUR("Reclaimed %lu pages.\n", nr_reclaimed); - return 0; } /*********************************************************** @@ -215,19 +237,6 @@ out: return ret; } -int color_reclaim_pages_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - int ret = 0; - ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - if (ret) - goto out; - if (write && color_sysctl_reclaim_pages_data) - ret = do_reclaim_pages(); -out: - return ret; -} - /*********************************************************** * Initialization ***********************************************************/ diff --git a/litmus/color_dev.c b/litmus/color_dev.c index c6e500722819..d681f57be01f 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c @@ -54,9 +54,12 @@ static int mmap_common_checks(struct vm_area_struct *vma) if (vma->vm_pgoff != 0) return -EINVAL; +#if 0 /* you can't share it with anyone */ + /* well, maybe you can... */ if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) return -EINVAL; +#endif return 0; } @@ -92,9 +95,9 @@ static int map_color_ctrl_page(struct vm_area_struct *vma) pfn = page_to_pfn(color_ctrl); TRACE_CUR(CTRL_NAME - ": mapping %p (pfn:%lx, %lx) to 0x%lx (prot:%lx)\n", + ": mapping %p (pfn:%lx, %lx) to 0x%lx (flags:%lx prot:%lx)\n", tsk_rt(t)->color_ctrl_page, pfn, page_to_pfn(color_ctrl), - vma->vm_start, vma->vm_page_prot); + vma->vm_start, vma->vm_flags, pgprot_val(vma->vm_page_prot)); /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise * userspace actually gets a copy-on-write page. */ @@ -155,7 +158,8 @@ static int do_map_colored_pages(struct vm_area_struct *vma) color_t *cur_color; int err; - TRACE_CUR(ALLOC_NAME ": allocating %lu pages\n", nr_pages); + TRACE_CUR(ALLOC_NAME ": allocating %lu pages (flags:%lx prot:%lx)\n", + nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot)); for ( nr_mapped = 0, cur_color = tsk_rt(current)->color_ctrl_page->colors; @@ -177,19 +181,23 @@ static int do_map_colored_pages(struct vm_area_struct *vma) clear_user_highpage(page, addr); #endif TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, " - "color:%3lu) at 0x%lx\n", page_to_phys(page), - page_to_pfn(page), this_color, addr); + "color:%3lu) at 0x%lx (flags:%lx prot:%lx\n", + page_to_phys(page), + page_to_pfn(page), this_color, addr, + vma->vm_flags, pgprot_val(vma->vm_page_prot)); //err = vm_insert_page(vma, addr, page); err = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, PAGE_SHARED); if (err) { TRACE_CUR(ALLOC_NAME ": remap_pfn_range() failed " - "(%d)\n", err); + "(%d) (flags:%lx prot:%lx)\n", err, + vma->vm_flags, + pgprot_val(vma->vm_page_prot)); /* TODO unmap mapped pages */ err = -EINVAL; break; } - add_page_to_alloced_list(page); + add_page_to_alloced_list(page, vma); } return err; } @@ -223,6 +231,7 @@ static void litmus_color_alloc_vm_close(struct vm_area_struct *vma) TRACE_CUR(ALLOC_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", (void*) vma->vm_start, (void*) vma->vm_end, vma, vma->vm_private_data); + reclaim_pages(vma); } static int litmus_color_alloc_vm_fault(struct vm_area_struct *vma, diff --git a/litmus/color_proc.c b/litmus/color_proc.c index cac336ac1731..4cb6c9ac89bb 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c @@ -5,7 +5,6 @@ #include extern int color_sysctl_add_pages_data; /* litmus/color.c */ -extern int color_sysctl_reclaim_pages_data; /* litmus/color.c */ static int zero = 0; static int one = 1; @@ -30,15 +29,6 @@ static struct ctl_table color_table[] = .extra1 = &zero, .extra2 = &one, }, - { - .procname = "reclaim_pages", - .data = &color_sysctl_reclaim_pages_data, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = color_reclaim_pages_handler, - .extra1 = &zero, - .extra2 = &one, - }, { } }; -- cgit v1.2.2