From 5188ab60a757c92d140d3872e8fba95ededbafc1 Mon Sep 17 00:00:00 2001 From: Christopher Kenna Date: Tue, 2 Oct 2012 22:44:45 -0400 Subject: Clean up the LITMUS color device. --- litmus/color_dev.c | 229 +++++++++++++++++++++++++++++------------------------ 1 file changed, 125 insertions(+), 104 deletions(-) diff --git a/litmus/color_dev.c b/litmus/color_dev.c index 51760328418e..677905a2a6b1 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c @@ -15,16 +15,49 @@ extern unsigned long nr_colors; +/************************************************************ + * Common functions. +************************************************************/ + +static int mmap_common_checks(struct vm_area_struct *vma) +{ + /* you can only map the "first" page */ + if (vma->vm_pgoff != 0) + return -EINVAL; + + /* you can't share it with anyone */ + if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) { + TRACE_CUR("error: page had MAYSHARE or SHARED set\n"); + return -EINVAL; + } + + return 0; +} + +static void mmap_common_vma_flags(struct vm_area_struct *vma) +{ + /* This mapping should not be kept across forks, + * cannot be expanded, and is not a "normal" page. */ + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO; + + /* We don't want the first write access to trigger a "minor" page fault + * to mark the page as dirty. This is transient, private memory, we + * don't care if it was touched or not. __S011 means RW access, but not + * execute, and avoids copy-on-write behavior. + * See protection_map in mmap.c. */ + vma->vm_page_prot = __S011; +} + /*********************************************************** * Control device ***********************************************************/ static void litmus_color_ctrl_vm_close(struct vm_area_struct *vma) { - TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, + TRACE_CUR("flags=0x%lx prot=0x%lx\n", vma->vm_flags, pgprot_val(vma->vm_page_prot)); - TRACE_CUR(CTRL_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", + TRACE_CUR("%p:%p vma:%p vma->vm_private_data:%p closed.\n", (void*) vma->vm_start, (void*) vma->vm_end, vma, vma->vm_private_data); } @@ -35,11 +68,10 @@ static int litmus_color_ctrl_vm_fault(struct vm_area_struct *vma, /* This function should never be called, since * all pages should have been mapped by mmap() * already. */ - TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags); - printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__, - vma->vm_flags); + TRACE_CUR("flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff); + + WARN(1, "Page fault in color ctrl page! flags=0x%lx\n", vma->vm_flags); - /* nope, you only get one page */ return VM_FAULT_SIGBUS; } @@ -48,63 +80,41 @@ static struct vm_operations_struct litmus_color_ctrl_vm_ops = { .fault = litmus_color_ctrl_vm_fault, }; -static int mmap_common_checks(struct vm_area_struct *vma) -{ - /* you can only map the "first" page */ - if (vma->vm_pgoff != 0) - return -EINVAL; - -#if 0 - /* you can't share it with anyone */ - /* well, maybe you can... */ - if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) - return -EINVAL; -#endif - - return 0; -} - static int alloc_color_ctrl_page(void) { - struct task_struct *t; + struct task_struct *t = current; int err = 0; - t = current; /* only allocate if the task doesn't have one yet */ if (!tsk_rt(t)->color_ctrl_page) { tsk_rt(t)->color_ctrl_page = (void*) get_zeroed_page(GFP_KERNEL); - if (!tsk_rt(t)->color_ctrl_page) + if (!tsk_rt(t)->color_ctrl_page) { + TRACE_TASK(t, "could not allocate control page\n"); err = -ENOMEM; + goto out; + } /* will get de-allocated in task teardown */ - TRACE_TASK(t, "%s color_ctrl_page = %p\n", __FUNCTION__, + TRACE_TASK(t, "color_ctrl_page = %p\n", tsk_rt(t)->color_ctrl_page); } +out: return err; } static int map_color_ctrl_page(struct vm_area_struct *vma) { - int err; - unsigned long pfn; + int err = 0; struct task_struct *t = current; struct page *color_ctrl = virt_to_page(tsk_rt(t)->color_ctrl_page); - t = current; - /* Increase ref count. Is decreased when vma is destroyed. */ - get_page(color_ctrl); - pfn = page_to_pfn(color_ctrl); - - TRACE_CUR(CTRL_NAME - ": mapping %p (pfn:%lx, %lx) to 0x%lx (flags:%lx prot:%lx)\n", - tsk_rt(t)->color_ctrl_page, pfn, page_to_pfn(color_ctrl), + TRACE_CUR("mapping %p (pfn:%lx) to 0x%lx (flags:%lx prot:%lx)\n", + tsk_rt(t)->color_ctrl_page, page_to_pfn(color_ctrl), vma->vm_start, vma->vm_flags, pgprot_val(vma->vm_page_prot)); - /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise - * userspace actually gets a copy-on-write page. */ - err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED); - + /* Map it into the vma. */ + err = vm_insert_page(vma, vma->vm_start, color_ctrl); if (err) - TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err); + TRACE_CUR("vm_insert_page() failed (%d)\n", err); return err; } @@ -115,27 +125,25 @@ static int litmus_color_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) /* you can only get one page */ if (vma->vm_end - vma->vm_start != PAGE_SIZE) { - TRACE_CUR(CTRL_NAME ": must allocate a multiple of PAGE_SIZE\n"); + TRACE_CUR("must allocate a multiple of PAGE_SIZE\n"); err = -EINVAL; goto out; } err = mmap_common_checks(vma); if (err) { - TRACE_CUR(CTRL_NAME ": failed common mmap checks.\n"); + TRACE_CUR("failed common mmap checks.\n"); goto out; } vma->vm_ops = &litmus_color_ctrl_vm_ops; - /* this mapping should not be kept across forks, - * and cannot be expanded */ - vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; + mmap_common_vma_flags(vma); err = alloc_color_ctrl_page(); if (!err) err = map_color_ctrl_page(vma); - TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags, + TRACE_CUR("flags=0x%lx prot=0x%lx\n", vma->vm_flags, pgprot_val(vma->vm_page_prot)); out: return err; @@ -149,6 +157,51 @@ out: #define vma_nr_pages(vma) \ ({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;}) +static int do_map_colored_page_set(struct vm_area_struct *vma, + const unsigned long starting_at, + const unsigned long color_no, + const int nr_pages) +{ + int i, err = 0; + + for (i = 0; i < nr_pages; ++i) { + const unsigned long addr = starting_at + (i << PAGE_SHIFT); + struct page *page = get_colored_page(color_no); + + if (!page) { + TRACE_CUR("Could not get page with color %lu.\n", + color_no); + /* TODO unmap mapped pages? */ + err = -ENOMEM; + goto out; + } + +#ifdef CONFIG_SPARC + clear_user_highpage(page, addr); +#endif + + TRACE_CUR("insert page (pa:0x%10llx, pfn:%8lu, color:%3lu) " + "at 0x%lx vma:(flags:%16lx prot:%16lx)\n", + page_to_phys(page), + page_to_pfn(page), + color_no, + addr, + vma->vm_flags, + pgprot_val(vma->vm_page_prot)); + + err = vm_insert_page(vma, addr, page); + if (err) { + TRACE_CUR("vm_insert_page() failed (%d)\n", err); + /* TODO unmap mapped pages? */ + err = -EINVAL; + goto out; + } + add_page_to_alloced_list(page, vma); + } +out: + return err; +} + static int do_map_colored_pages(struct vm_area_struct *vma) { const unsigned long nr_pages = vma_nr_pages(vma); @@ -156,67 +209,36 @@ static int do_map_colored_pages(struct vm_area_struct *vma) unsigned long nr_mapped; int i, err = 0; - TRACE_CUR(ALLOC_NAME ": allocating %lu pages (flags:%lx prot:%lx)\n", + TRACE_CUR("allocating %lu pages (flags:%lx prot:%lx)\n", nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot)); #ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE + /* Adds non-cached on x86 */ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif for (i = 0, nr_mapped = 0; nr_mapped < nr_pages; ++i) { const unsigned long color_no = color_ctrl->colors[i]; - unsigned int page_no = 0; + const unsigned long color_quantity = color_ctrl->pages[i]; + const unsigned long addr = vma->vm_start + + (nr_mapped << PAGE_SHIFT); - for (; page_no < color_ctrl->pages[i]; ++page_no, ++nr_mapped) { - const unsigned long addr = vma->vm_start + - (nr_mapped << PAGE_SHIFT); - struct page *page = get_colored_page(color_no); -#ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE - const pgprot_t ins_prot = pgprot_noncached(PAGE_SHARED); -#else - const pgprot_t ins_prot = PAGE_SHARED; -#endif - - if (!page) { - TRACE_CUR(ALLOC_NAME ": Could not get page with" - " color %lu.\n", color_no); - /* TODO unmap mapped pages */ - err = -ENOMEM; - goto out; - } - -#ifdef CONFIG_SPARC - clear_user_highpage(page, addr); -#endif - - TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, " - "pfn:%8lu, color:%3lu, prot:%lx) at 0x%lx " - "vma:(flags:%16lx prot:%16lx)\n", - page_to_phys(page), - page_to_pfn(page), color_no, - pgprot_val(ins_prot), addr, - vma->vm_flags, - pgprot_val(vma->vm_page_prot)); - - err = remap_pfn_range(vma, addr, page_to_pfn(page), - PAGE_SIZE, ins_prot); - if (err) { - TRACE_CUR(ALLOC_NAME ": remap_pfn_range() fail " - "(%d)\n", err); - /* TODO unmap mapped pages */ - err = -EINVAL; - goto out; - } - add_page_to_alloced_list(page, vma); + if (!nr_pages) { + TRACE_CUR("0 pages given for color %lu\n", color_no); + err = -EINVAL; + goto out; } - if (!page_no) { - TRACE_CUR(ALLOC_NAME ": 0 pages given for color %lu\n", - color_no); + err = do_map_colored_page_set(vma, addr, + color_no, color_quantity); + if (err) { + TRACE_CUR("Could not map colored page set.\n"); err = -EINVAL; goto out; } + nr_mapped += color_quantity; } + TRACE_CUR("Successfully mapped %lu pages.\n", nr_mapped); out: return err; } @@ -244,10 +266,10 @@ out: static void litmus_color_alloc_vm_close(struct vm_area_struct *vma) { - TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, + TRACE_CUR("flags=0x%lx prot=0x%lx\n", vma->vm_flags, pgprot_val(vma->vm_page_prot)); - TRACE_CUR(ALLOC_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", + TRACE_CUR("%p:%p vma:%p vma->vm_private_data:%p closed.\n", (void*) vma->vm_start, (void*) vma->vm_end, vma, vma->vm_private_data); reclaim_pages(vma); @@ -259,11 +281,10 @@ static int litmus_color_alloc_vm_fault(struct vm_area_struct *vma, /* This function should never be called, since * all pages should have been mapped by mmap() * already. */ - TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags); - printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__, - vma->vm_flags); + TRACE_CUR("flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff); + + WARN(1, "Page fault in color ctrl page! flags=0x%lx\n", vma->vm_flags); - /* nope, you only get one page */ return VM_FAULT_SIGBUS; } @@ -283,17 +304,17 @@ static int litmus_color_alloc_mmap(struct file *filp, struct vm_area_struct *vma } err = mmap_common_checks(vma); - if (err) + if (err) { + TRACE_CUR("failed mmap common checks\n"); goto out; + } vma->vm_ops = &litmus_color_alloc_vm_ops; - /* this mapping should not be kept across forks, - * and cannot be expanded */ - vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; + mmap_common_vma_flags(vma); err = map_colored_pages(vma); - TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags, + TRACE_CUR("flags=0x%lx prot=0x%lx\n", vma->vm_flags, pgprot_val(vma->vm_page_prot)); out: return err; -- cgit v1.2.2