#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <asm/io.h> /* page_to_phys on SPARC */
#include <litmus/litmus.h>
#include <litmus/color.h>
#define ALLOC_NAME "litmus/color_alloc"
#define CTRL_NAME "litmus/color_ctrl"
extern unsigned long nr_colors;
/***********************************************************
* Control device
***********************************************************/
static void litmus_color_ctrl_vm_close(struct vm_area_struct *vma)
{
TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__,
vma->vm_flags, pgprot_val(vma->vm_page_prot));
TRACE_CUR(CTRL_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n",
(void*) vma->vm_start, (void*) vma->vm_end, vma,
vma->vm_private_data);
}
static int litmus_color_ctrl_vm_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
/* This function should never be called, since
* all pages should have been mapped by mmap()
* already. */
TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags);
printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__,
vma->vm_flags);
/* nope, you only get one page */
return VM_FAULT_SIGBUS;
}
static struct vm_operations_struct litmus_color_ctrl_vm_ops = {
.close = litmus_color_ctrl_vm_close,
.fault = litmus_color_ctrl_vm_fault,
};
static int mmap_common_checks(struct vm_area_struct *vma)
{
/* you can only map the "first" page */
if (vma->vm_pgoff != 0)
return -EINVAL;
#if 0
/* you can't share it with anyone */
/* well, maybe you can... */
if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
return -EINVAL;
#endif
return 0;
}
static int alloc_color_ctrl_page(void)
{
struct task_struct *t;
int err = 0;
t = current;
/* only allocate if the task doesn't have one yet */
if (!tsk_rt(t)->color_ctrl_page) {
tsk_rt(t)->color_ctrl_page = (void*) get_zeroed_page(GFP_KERNEL);
if (!tsk_rt(t)->color_ctrl_page)
err = -ENOMEM;
/* will get de-allocated in task teardown */
TRACE_TASK(t, "%s color_ctrl_page = %p\n", __FUNCTION__,
tsk_rt(t)->color_ctrl_page);
}
return err;
}
static int map_color_ctrl_page(struct vm_area_struct *vma)
{
int err;
unsigned long pfn;
struct task_struct *t = current;
struct page *color_ctrl = virt_to_page(tsk_rt(t)->color_ctrl_page);
t = current;
/* Increase ref count. Is decreased when vma is destroyed. */
get_page(color_ctrl);
pfn = page_to_pfn(color_ctrl);
TRACE_CUR(CTRL_NAME
": mapping %p (pfn:%lx, %lx) to 0x%lx (flags:%lx prot:%lx)\n",
tsk_rt(t)->color_ctrl_page, pfn, page_to_pfn(color_ctrl),
vma->vm_start, vma->vm_flags, pgprot_val(vma->vm_page_prot));
/* Map it into the vma. Make sure to use PAGE_SHARED, otherwise
* userspace actually gets a copy-on-write page. */
err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED);
if (err)
TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err);
return err;
}
static int litmus_color_ctrl_mmap(struct file *filp, struct vm_area_struct *vma)
{
int err = 0;
/* you can only get one page */
if (vma->vm_end - vma->vm_start != PAGE_SIZE) {
TRACE_CUR(CTRL_NAME ": must allocate a multiple of PAGE_SIZE\n");
err = -EINVAL;
goto out;
}
err = mmap_common_checks(vma);
if (err) {
TRACE_CUR(CTRL_NAME ": failed common mmap checks.\n");
goto out;
}
vma->vm_ops = &litmus_color_ctrl_vm_ops;
/* this mapping should not be kept across forks,
* and cannot be expanded */
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
err = alloc_color_ctrl_page();
if (!err)
err = map_color_ctrl_page(vma);
TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags,
pgprot_val(vma->vm_page_prot));
out:
return err;
}
/***********************************************************
* Allocation device
***********************************************************/
#define vma_nr_pages(vma) \
({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;})
static int do_map_colored_pages(struct vm_area_struct *vma)
{
const unsigned long nr_pages = vma_nr_pages(vma);
struct color_ctrl_page *color_ctrl = tsk_rt(current)->color_ctrl_page;
unsigned long nr_mapped;
int i, err = 0;
TRACE_CUR(ALLOC_NAME ": allocating %lu pages (flags:%lx prot:%lx)\n",
nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot));
#ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
for (i = 0, nr_mapped = 0; nr_mapped < nr_pages; ++i) {
const unsigned long color_no = color_ctrl->colors[i];
unsigned int page_no = 0;
for (; page_no < color_ctrl->pages[i]; ++page_no, ++nr_mapped) {
const unsigned long addr = vma->vm_start +
(nr_mapped << PAGE_SHIFT);
struct page *page = get_colored_page(color_no);
#ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE
const pgprot_t ins_prot = pgprot_noncached(PAGE_SHARED);
#else
const pgprot_t ins_prot = PAGE_SHARED;
#endif
if (!page) {
TRACE_CUR(ALLOC_NAME ": Could not get page with"
" color %lu.\n", color_no);
/* TODO unmap mapped pages */
err = -ENOMEM;
goto out;
}
#ifdef CONFIG_SPARC
clear_user_highpage(page, addr);
#endif
TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, "
"pfn:%8lu, color:%3lu, prot:%lx) at 0x%lx "
"vma:(flags:%16lx prot:%16lx)\n",
page_to_phys(page),
page_to_pfn(page), color_no,
pgprot_val(ins_prot), addr,
vma->vm_flags,
pgprot_val(vma->vm_page_prot));
err = remap_pfn_range(vma, addr, page_to_pfn(page),
PAGE_SIZE, ins_prot);
if (err) {
TRACE_CUR(ALLOC_NAME ": remap_pfn_range() fail "
"(%d)\n", err);
/* TODO unmap mapped pages */
err = -EINVAL;
goto out;
}
add_page_to_alloced_list(page, vma);
}
if (!page_no) {
TRACE_CUR(ALLOC_NAME ": 0 pages given for color %lu\n",
color_no);
err = -EINVAL;
goto out;
}
}
out:
return err;
}
static int map_colored_pages(struct vm_area_struct *vma)
{
int err = 0;
if (!tsk_rt(current)->color_ctrl_page) {
TRACE_CUR("Process has no color control page.\n");
err = -EINVAL;
goto out;
}
if (COLORS_PER_CONTROL_PAGE < vma_nr_pages(vma)) {
TRACE_CUR("Max page request %lu but want %lu.\n",
COLORS_PER_CONTROL_PAGE, vma_nr_pages(vma));
err = -EINVAL;
goto out;
}
err = do_map_colored_pages(vma);
out:
return err;
}
static void litmus_color_alloc_vm_close(struct vm_area_struct *vma)
{
TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__,
vma->vm_flags, pgprot_val(vma->vm_page_prot));
TRACE_CUR(ALLOC_NAME ": %p:%p vma:%p vma->vm_private_data:%p closed.\n",
(void*) vma->vm_start, (void*) vma->vm_end, vma,
vma->vm_private_data);
reclaim_pages(vma);
}
static int litmus_color_alloc_vm_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
/* This function should never be called, since
* all pages should have been mapped by mmap()
* already. */
TRACE_CUR("%s flags=0x%lx\n", __FUNCTION__, vma->vm_flags);
printk(KERN_WARNING "fault: %s flags=0x%lx\n", __FUNCTION__,
vma->vm_flags);
/* nope, you only get one page */
return VM_FAULT_SIGBUS;
}
static struct vm_operations_struct litmus_color_alloc_vm_ops = {
.close = litmus_color_alloc_vm_close,
.fault = litmus_color_alloc_vm_fault,
};
static int litmus_color_alloc_mmap(struct file *filp, struct vm_area_struct *vma)
{
int err = 0;
/* you may only request integer multiple of PAGE_SIZE */
if (offset_in_page(vma->vm_end - vma->vm_start)) {
err = -EINVAL;
goto out;
}
err = mmap_common_checks(vma);
if (err)
goto out;
vma->vm_ops = &litmus_color_alloc_vm_ops;
/* this mapping should not be kept across forks,
* and cannot be expanded */
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
err = map_colored_pages(vma);
TRACE_CUR("%s flags=0x%lx prot=0x%lx\n", __FUNCTION__, vma->vm_flags,
pgprot_val(vma->vm_page_prot));
out:
return err;
}
/***********************************************************
* Initilization
***********************************************************/
static struct file_operations litmus_color_ctrl_fops = {
.owner = THIS_MODULE,
.mmap = litmus_color_ctrl_mmap,
};
static struct miscdevice litmus_color_ctrl_dev = {
.name = CTRL_NAME,
.minor = MISC_DYNAMIC_MINOR,
.fops = &litmus_color_ctrl_fops,
};
static struct file_operations litmus_color_alloc_fops = {
.owner = THIS_MODULE,
.mmap = litmus_color_alloc_mmap,
};
static struct miscdevice litmus_color_alloc_dev = {
.name = ALLOC_NAME,
.minor = MISC_DYNAMIC_MINOR,
.fops = &litmus_color_alloc_fops,
};
static int __init init_dev(const char* name, struct miscdevice *dev)
{
int err;
err = misc_register(dev);
if (err)
printk(KERN_WARNING "Could not allocate %s device (%d).\n",
name, err);
return err;
}
static int __init init_color_devices(void)
{
int err;
printk("Allocating LITMUS^RT color devices.\n");
err = init_dev(ALLOC_NAME, &litmus_color_alloc_dev);
if (err)
goto out;
err = init_dev(CTRL_NAME, &litmus_color_ctrl_dev);
out:
return err;
}
module_init(init_color_devices);