From e249dc389530a3e709393f735edde22214fdf1d8 Mon Sep 17 00:00:00 2001 From: Namhoon Kim Date: Tue, 5 Apr 2016 13:44:32 -0400 Subject: Add colored shared memory device --- litmus/Makefile | 1 + litmus/color_shm.c | 316 +++++++++++++++++++++++++++++++++++++++++++++++++ litmus/litmus.c | 5 +- litmus/sched_psn_edf.c | 10 +- 4 files changed, 328 insertions(+), 4 deletions(-) create mode 100644 litmus/color_shm.c diff --git a/litmus/Makefile b/litmus/Makefile index 4a34b4d338a1..1845dda0b905 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -27,6 +27,7 @@ obj-y = sched_plugin.o litmus.o \ sched_pfp.o \ sched_mc2.o \ bank_proc.o \ + color_shm.o \ cache_proc.o obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o diff --git a/litmus/color_shm.c b/litmus/color_shm.c new file mode 100644 index 000000000000..1d77957607f0 --- /dev/null +++ b/litmus/color_shm.c @@ -0,0 +1,316 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DEV_NAME "litmus/color_shm" + +/* Major number assigned to our device. + * Refer Documentation/devices.txt */ +#define SHM_MAJOR 240 +#define MAX_COLORED_PAGE 256 +#define NUM_BANKS 8 +#define NUM_COLORS 16 + +static struct mutex dev_lock; + +struct ioctl_cmd { + unsigned int color; + unsigned int bank; +}; + +#define SET_COLOR_SHM_CMD \ + _IOW(SHM_MAJOR, 0x1, struct ioctl_cmd) + +struct ioctl_cmd color_param; + +static int mmap_common_checks(struct vm_area_struct *vma) +{ + /* you can only map the "first" page */ + if (vma->vm_pgoff != 0) + return -EINVAL; + + return 0; +} + +static void mmap_common_vma_flags(struct vm_area_struct *vma) +{ + /* This mapping should not be kept across forks, + * cannot be expanded, and is not a "normal" page. */ + //vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO | VM_SHARED | VM_MAYSHARE; + vma->vm_flags |= VM_SHARED | VM_MAYSHARE | VM_LOCKED; + + /* We don't want the first write access to trigger a "minor" page fault + * to mark the page as dirty. This is transient, private memory, we + * don't care if it was touched or not. __S011 means RW access, but not + * execute, and avoids copy-on-write behavior. + * See protection_map in mmap.c. */ + vma->vm_page_prot = PAGE_SHARED; +} + +#define vma_nr_pages(vma) \ + ({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;}) + +extern struct page* get_colored_page(unsigned long color); + +static int do_map_colored_page(struct vm_area_struct *vma, + const unsigned long addr, + const unsigned long color_no) +{ + int err = 0; + struct page *page = get_colored_page(color_no); + + if (!page) { + printk(KERN_INFO "Could not get page with color %lu.\n", + color_no); + err = -ENOMEM; + goto out; + } + + printk(KERN_INFO "vma: %p addr: 0x%lx color_no: %lu\n", + vma, addr, color_no); + + printk(KERN_INFO "inserting page (pa: 0x%lx) at vaddr: 0x%lx " + "flags: 0x%lx prot: 0x%lx\n", + page_to_phys(page), addr, + vma->vm_flags, pgprot_val(vma->vm_page_prot)); + + err = vm_insert_page(vma, addr, page); + if (err) { + printk(KERN_INFO "vm_insert_page() failed (%d)\n", err); + err = -EINVAL; + goto out; + } +out: + return err; +} + +static int do_map_colored_pages(struct vm_area_struct *vma) +{ + const unsigned long nr_pages = vma_nr_pages(vma); + unsigned long nr_mapped; + int i, start_bank = -1, start_color = -1; + int cur_bank = -1, cur_color = -1, err = 0; + int colors[16] = {0}, banks[8] = {0}; + +//#ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE +// vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +//#endif + + for (i = 0; i < NUM_BANKS; i++) { + if (((color_param.bank >> i)&0x1) == 1) + banks[i] = 1; + } + + for (i = 0; i < NUM_COLORS; i++) { + if (((color_param.color >> i)&0x1) == 1) + colors[i] = 1; + } + + for (i = 0; i < NUM_BANKS; i++) { + if (banks[i] == 1) { + start_bank = i; + break; + } + } + for (i = 0; i < NUM_COLORS; i++) { + if (colors[i] == 1) { + start_color = i; + break; + } + } + + cur_bank = start_bank; + cur_color = start_color; + + for (i = 0; i < NUM_BANKS; i++) { + printk(KERN_INFO "BANK[%d] = %d\n", i, banks[i]); + } + printk(KERN_INFO "cur_bank = %d\n", cur_bank); + for (i = 0; i < NUM_COLORS; i++) { + printk(KERN_INFO "COLOR[%d] = %d\n", i, colors[i]); + } + printk(KERN_INFO "cur_color = %d\n", cur_color); + + + TRACE_CUR("allocating %lu pages (flags:%lx prot:%lx)\n", + nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot)); + + for (nr_mapped = 0; nr_mapped < nr_pages; nr_mapped++) { + const unsigned long addr = vma->vm_start + (nr_mapped << PAGE_SHIFT); + const unsigned long color_no = cur_bank*NUM_COLORS + cur_color; + + err = do_map_colored_page(vma, addr, color_no); + printk(KERN_INFO "mapped bank[%d], color[%d], color_no = %lu at 0x%lx\n", + cur_bank, cur_color, color_no, addr); + if (err) { + TRACE_CUR("Could not map colored page set.\n"); + err = -EINVAL; + goto out; + } + do { + cur_color++; + } while(colors[cur_color] == 0); + + if (cur_color >= NUM_COLORS) { + do { + cur_bank++; + } while(banks[cur_bank] == 0); + cur_color = start_color; + } + + if (cur_bank >= NUM_BANKS) { + cur_bank = start_bank; + } + } + TRACE_CUR("Successfully mapped %lu pages.\n", nr_mapped); + out: + return err; +} + +static int map_colored_pages(struct vm_area_struct *vma) +{ + int err = 0; + + printk(KERN_INFO "User requests %lu pages.\n", vma_nr_pages(vma)); + if (MAX_COLORED_PAGE < vma_nr_pages(vma)) { + TRACE_CUR("Max page request %lu but want %lu.\n", + MAX_COLORED_PAGE, vma_nr_pages(vma)); + err = -EINVAL; + goto out; + } + err = do_map_colored_pages(vma); +out: + return err; +} + +static void litmus_color_shm_vm_close(struct vm_area_struct *vma) +{ + + TRACE_CUR("flags=0x%lx prot=0x%lx\n", + vma->vm_flags, pgprot_val(vma->vm_page_prot)); + + TRACE_CUR("%p:%p vma:%p vma->vm_private_data:%p closed.\n", + (void*) vma->vm_start, (void*) vma->vm_end, vma, + vma->vm_private_data); + +} + +static int litmus_color_shm_vm_fault(struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + /* This function should never be called, since + * all pages should have been mapped by mmap() + * already. */ + TRACE_CUR("flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff); + printk(KERN_INFO "flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff); + + printk(KERN_INFO "Page fault in color ctrl page! prot=0x%lx\n", pgprot_val(vma->vm_page_prot)); + + return VM_FAULT_SIGBUS; +} + +static struct vm_operations_struct litmus_color_shm_vm_ops = { + .close = litmus_color_shm_vm_close, + .fault = litmus_color_shm_vm_fault, +}; + +static int litmus_color_shm_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int err = 0; + + printk(KERN_INFO "mmap called\n"); + + if (color_param.color == 0x00000000 || color_param.bank == 0x00000000) { + printk(KERN_INFO "color_info not set.\n"); + return -EINVAL; + } + err = mmap_common_checks(vma); + if (err) { + TRACE_CUR("failed mmap common checks\n"); + goto out; + } + + vma->vm_ops = &litmus_color_shm_vm_ops; + mmap_common_vma_flags(vma); + + err = map_colored_pages(vma); + + TRACE_CUR("flags=0x%lx prot=0x%lx\n", vma->vm_flags, + pgprot_val(vma->vm_page_prot)); +out: + return err; + +} + +static long litmus_color_shm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long err = -ENOIOCTLCMD; + struct ioctl_cmd param; + + printk(KERN_INFO "color_shm ioctl\n"); + + if (_IOC_TYPE(cmd) != SHM_MAJOR) + return -ENOTTY; + + + switch (cmd) { + case SET_COLOR_SHM_CMD: + err = copy_from_user(¶m, (void*)arg, sizeof(struct ioctl_cmd)); + + color_param.color = param.color; + color_param.bank = param.bank; + printk(KERN_INFO "COLOR = %x\n", color_param.color); + printk(KERN_INFO "BANK = %x\n", color_param.bank); + err = 0; + break; + default: + printk(KERN_INFO "Invalid IOCTL CMD\n"); + err = -EINVAL; + } + + return err; +} + +static struct file_operations litmus_color_shm_fops = { + .owner = THIS_MODULE, + .mmap = litmus_color_shm_mmap, + .unlocked_ioctl = litmus_color_shm_ioctl, +}; + +static struct miscdevice litmus_color_shm_dev = { + .name = DEV_NAME, + .minor = MISC_DYNAMIC_MINOR, + .fops = &litmus_color_shm_fops, +}; + +static int __init init_color_shm_devices(void) +{ + int err; + + printk(KERN_INFO "Registering LITMUS^RT color_shm devices.\n"); + mutex_init(&dev_lock); + color_param.color = 0x0000ffff; + color_param.bank = 0x000000ff; + + err = misc_register(&litmus_color_shm_dev); + + return err; +} + +static void __exit exit_color_shm_devices(void) +{ + misc_deregister(&litmus_color_shm_dev); + printk(KERN_INFO "Unregistering %s device.\n", DEV_NAME); +} + +module_init(init_color_shm_devices); +module_exit(exit_color_shm_devices); \ No newline at end of file diff --git a/litmus/litmus.c b/litmus/litmus.c index 6d5f317e7537..702999191203 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -472,12 +472,13 @@ asmlinkage long sys_test_call(unsigned int param) down_read(¤t->mm->mmap_sem); vma_itr = current->mm->mmap; while (vma_itr != NULL) { + printk(KERN_INFO "--------------------------------------------\n"); printk(KERN_INFO "vm_start : %lx\n", vma_itr->vm_start); printk(KERN_INFO "vm_end : %lx\n", vma_itr->vm_end); printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags); printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot)); printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED); - if (vma_itr->vm_file) { +/* if (vma_itr->vm_file) { struct file *fp = vma_itr->vm_file; unsigned long fcount = atomic_long_read(&(fp->f_count)); printk(KERN_INFO "f_count : %ld\n", fcount); @@ -486,8 +487,10 @@ asmlinkage long sys_test_call(unsigned int param) } } printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot)); +*/ vma_itr = vma_itr->vm_next; } + printk(KERN_INFO "--------------------------------------------\n"); up_read(¤t->mm->mmap_sem); local_irq_save(flags); diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 2549a3fc28b9..76a57af9ae95 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -243,7 +243,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) if (next) { TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); } else { - TRACE("becoming idle at %llu\n", litmus_clock()); + ; //TRACE("becoming idle at %llu\n", litmus_clock()); } pedf->scheduled = next; @@ -644,10 +644,14 @@ static long psnedf_admit_task(struct task_struct* tsk) /* don't allow tasks on release master CPU */ && task_cpu(tsk) != remote_edf(task_cpu(tsk))->release_master #endif - ) + ) { + TRACE_TASK(tsk, "admitted\n"); return 0; - else + } + else { + TRACE_TASK(tsk, "not admitted\n"); return -EINVAL; + } } /* Plugin object */ -- cgit v1.2.2