aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2016-04-05 13:44:32 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2016-04-05 13:44:32 -0400
commite249dc389530a3e709393f735edde22214fdf1d8 (patch)
tree2ede86fa3d3d8e8f1845b0aa5adf7a2cf0ed5aa4
parent95d1cb634561b6716e6909b956d12177e508c1b0 (diff)
Add colored shared memory device
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/color_shm.c316
-rw-r--r--litmus/litmus.c5
-rw-r--r--litmus/sched_psn_edf.c10
4 files changed, 328 insertions, 4 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 4a34b4d338a1..1845dda0b905 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -27,6 +27,7 @@ obj-y = sched_plugin.o litmus.o \
27 sched_pfp.o \ 27 sched_pfp.o \
28 sched_mc2.o \ 28 sched_mc2.o \
29 bank_proc.o \ 29 bank_proc.o \
30 color_shm.o \
30 cache_proc.o 31 cache_proc.o
31 32
32obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 33obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
diff --git a/litmus/color_shm.c b/litmus/color_shm.c
new file mode 100644
index 000000000000..1d77957607f0
--- /dev/null
+++ b/litmus/color_shm.c
@@ -0,0 +1,316 @@
1#include <linux/sched.h>
2#include <linux/mm.h>
3#include <linux/fs.h>
4#include <linux/miscdevice.h>
5#include <linux/spinlock.h>
6#include <linux/module.h>
7#include <linux/highmem.h>
8#include <linux/slab.h>
9#include <linux/mutex.h>
10#include <asm/uaccess.h>
11
12#include <litmus/litmus.h>
13
14#define DEV_NAME "litmus/color_shm"
15
16/* Major number assigned to our device.
17 * Refer Documentation/devices.txt */
18#define SHM_MAJOR 240
19#define MAX_COLORED_PAGE 256
20#define NUM_BANKS 8
21#define NUM_COLORS 16
22
23static struct mutex dev_lock;
24
25struct ioctl_cmd {
26 unsigned int color;
27 unsigned int bank;
28};
29
30#define SET_COLOR_SHM_CMD \
31 _IOW(SHM_MAJOR, 0x1, struct ioctl_cmd)
32
33struct ioctl_cmd color_param;
34
35static int mmap_common_checks(struct vm_area_struct *vma)
36{
37 /* you can only map the "first" page */
38 if (vma->vm_pgoff != 0)
39 return -EINVAL;
40
41 return 0;
42}
43
44static void mmap_common_vma_flags(struct vm_area_struct *vma)
45{
46 /* This mapping should not be kept across forks,
47 * cannot be expanded, and is not a "normal" page. */
48 //vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO | VM_SHARED | VM_MAYSHARE;
49 vma->vm_flags |= VM_SHARED | VM_MAYSHARE | VM_LOCKED;
50
51 /* We don't want the first write access to trigger a "minor" page fault
52 * to mark the page as dirty. This is transient, private memory, we
53 * don't care if it was touched or not. __S011 means RW access, but not
54 * execute, and avoids copy-on-write behavior.
55 * See protection_map in mmap.c. */
56 vma->vm_page_prot = PAGE_SHARED;
57}
58
59#define vma_nr_pages(vma) \
60 ({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;})
61
62extern struct page* get_colored_page(unsigned long color);
63
64static int do_map_colored_page(struct vm_area_struct *vma,
65 const unsigned long addr,
66 const unsigned long color_no)
67{
68 int err = 0;
69 struct page *page = get_colored_page(color_no);
70
71 if (!page) {
72 printk(KERN_INFO "Could not get page with color %lu.\n",
73 color_no);
74 err = -ENOMEM;
75 goto out;
76 }
77
78 printk(KERN_INFO "vma: %p addr: 0x%lx color_no: %lu\n",
79 vma, addr, color_no);
80
81 printk(KERN_INFO "inserting page (pa: 0x%lx) at vaddr: 0x%lx "
82 "flags: 0x%lx prot: 0x%lx\n",
83 page_to_phys(page), addr,
84 vma->vm_flags, pgprot_val(vma->vm_page_prot));
85
86 err = vm_insert_page(vma, addr, page);
87 if (err) {
88 printk(KERN_INFO "vm_insert_page() failed (%d)\n", err);
89 err = -EINVAL;
90 goto out;
91 }
92out:
93 return err;
94}
95
96static int do_map_colored_pages(struct vm_area_struct *vma)
97{
98 const unsigned long nr_pages = vma_nr_pages(vma);
99 unsigned long nr_mapped;
100 int i, start_bank = -1, start_color = -1;
101 int cur_bank = -1, cur_color = -1, err = 0;
102 int colors[16] = {0}, banks[8] = {0};
103
104//#ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE
105// vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
106//#endif
107
108 for (i = 0; i < NUM_BANKS; i++) {
109 if (((color_param.bank >> i)&0x1) == 1)
110 banks[i] = 1;
111 }
112
113 for (i = 0; i < NUM_COLORS; i++) {
114 if (((color_param.color >> i)&0x1) == 1)
115 colors[i] = 1;
116 }
117
118 for (i = 0; i < NUM_BANKS; i++) {
119 if (banks[i] == 1) {
120 start_bank = i;
121 break;
122 }
123 }
124 for (i = 0; i < NUM_COLORS; i++) {
125 if (colors[i] == 1) {
126 start_color = i;
127 break;
128 }
129 }
130
131 cur_bank = start_bank;
132 cur_color = start_color;
133
134 for (i = 0; i < NUM_BANKS; i++) {
135 printk(KERN_INFO "BANK[%d] = %d\n", i, banks[i]);
136 }
137 printk(KERN_INFO "cur_bank = %d\n", cur_bank);
138 for (i = 0; i < NUM_COLORS; i++) {
139 printk(KERN_INFO "COLOR[%d] = %d\n", i, colors[i]);
140 }
141 printk(KERN_INFO "cur_color = %d\n", cur_color);
142
143
144 TRACE_CUR("allocating %lu pages (flags:%lx prot:%lx)\n",
145 nr_pages, vma->vm_flags, pgprot_val(vma->vm_page_prot));
146
147 for (nr_mapped = 0; nr_mapped < nr_pages; nr_mapped++) {
148 const unsigned long addr = vma->vm_start + (nr_mapped << PAGE_SHIFT);
149 const unsigned long color_no = cur_bank*NUM_COLORS + cur_color;
150
151 err = do_map_colored_page(vma, addr, color_no);
152 printk(KERN_INFO "mapped bank[%d], color[%d], color_no = %lu at 0x%lx\n",
153 cur_bank, cur_color, color_no, addr);
154 if (err) {
155 TRACE_CUR("Could not map colored page set.\n");
156 err = -EINVAL;
157 goto out;
158 }
159 do {
160 cur_color++;
161 } while(colors[cur_color] == 0);
162
163 if (cur_color >= NUM_COLORS) {
164 do {
165 cur_bank++;
166 } while(banks[cur_bank] == 0);
167 cur_color = start_color;
168 }
169
170 if (cur_bank >= NUM_BANKS) {
171 cur_bank = start_bank;
172 }
173 }
174 TRACE_CUR("Successfully mapped %lu pages.\n", nr_mapped);
175 out:
176 return err;
177}
178
179static int map_colored_pages(struct vm_area_struct *vma)
180{
181 int err = 0;
182
183 printk(KERN_INFO "User requests %lu pages.\n", vma_nr_pages(vma));
184 if (MAX_COLORED_PAGE < vma_nr_pages(vma)) {
185 TRACE_CUR("Max page request %lu but want %lu.\n",
186 MAX_COLORED_PAGE, vma_nr_pages(vma));
187 err = -EINVAL;
188 goto out;
189 }
190 err = do_map_colored_pages(vma);
191out:
192 return err;
193}
194
195static void litmus_color_shm_vm_close(struct vm_area_struct *vma)
196{
197
198 TRACE_CUR("flags=0x%lx prot=0x%lx\n",
199 vma->vm_flags, pgprot_val(vma->vm_page_prot));
200
201 TRACE_CUR("%p:%p vma:%p vma->vm_private_data:%p closed.\n",
202 (void*) vma->vm_start, (void*) vma->vm_end, vma,
203 vma->vm_private_data);
204
205}
206
207static int litmus_color_shm_vm_fault(struct vm_area_struct *vma,
208 struct vm_fault *vmf)
209{
210 /* This function should never be called, since
211 * all pages should have been mapped by mmap()
212 * already. */
213 TRACE_CUR("flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff);
214 printk(KERN_INFO "flags=0x%lx (off:%ld)\n", vma->vm_flags, vmf->pgoff);
215
216 printk(KERN_INFO "Page fault in color ctrl page! prot=0x%lx\n", pgprot_val(vma->vm_page_prot));
217
218 return VM_FAULT_SIGBUS;
219}
220
221static struct vm_operations_struct litmus_color_shm_vm_ops = {
222 .close = litmus_color_shm_vm_close,
223 .fault = litmus_color_shm_vm_fault,
224};
225
226static int litmus_color_shm_mmap(struct file *filp, struct vm_area_struct *vma)
227{
228 int err = 0;
229
230 printk(KERN_INFO "mmap called\n");
231
232 if (color_param.color == 0x00000000 || color_param.bank == 0x00000000) {
233 printk(KERN_INFO "color_info not set.\n");
234 return -EINVAL;
235 }
236 err = mmap_common_checks(vma);
237 if (err) {
238 TRACE_CUR("failed mmap common checks\n");
239 goto out;
240 }
241
242 vma->vm_ops = &litmus_color_shm_vm_ops;
243 mmap_common_vma_flags(vma);
244
245 err = map_colored_pages(vma);
246
247 TRACE_CUR("flags=0x%lx prot=0x%lx\n", vma->vm_flags,
248 pgprot_val(vma->vm_page_prot));
249out:
250 return err;
251
252}
253
254static long litmus_color_shm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
255{
256 long err = -ENOIOCTLCMD;
257 struct ioctl_cmd param;
258
259 printk(KERN_INFO "color_shm ioctl\n");
260
261 if (_IOC_TYPE(cmd) != SHM_MAJOR)
262 return -ENOTTY;
263
264
265 switch (cmd) {
266 case SET_COLOR_SHM_CMD:
267 err = copy_from_user(&param, (void*)arg, sizeof(struct ioctl_cmd));
268
269 color_param.color = param.color;
270 color_param.bank = param.bank;
271 printk(KERN_INFO "COLOR = %x\n", color_param.color);
272 printk(KERN_INFO "BANK = %x\n", color_param.bank);
273 err = 0;
274 break;
275 default:
276 printk(KERN_INFO "Invalid IOCTL CMD\n");
277 err = -EINVAL;
278 }
279
280 return err;
281}
282
283static struct file_operations litmus_color_shm_fops = {
284 .owner = THIS_MODULE,
285 .mmap = litmus_color_shm_mmap,
286 .unlocked_ioctl = litmus_color_shm_ioctl,
287};
288
289static struct miscdevice litmus_color_shm_dev = {
290 .name = DEV_NAME,
291 .minor = MISC_DYNAMIC_MINOR,
292 .fops = &litmus_color_shm_fops,
293};
294
295static int __init init_color_shm_devices(void)
296{
297 int err;
298
299 printk(KERN_INFO "Registering LITMUS^RT color_shm devices.\n");
300 mutex_init(&dev_lock);
301 color_param.color = 0x0000ffff;
302 color_param.bank = 0x000000ff;
303
304 err = misc_register(&litmus_color_shm_dev);
305
306 return err;
307}
308
309static void __exit exit_color_shm_devices(void)
310{
311 misc_deregister(&litmus_color_shm_dev);
312 printk(KERN_INFO "Unregistering %s device.\n", DEV_NAME);
313}
314
315module_init(init_color_shm_devices);
316module_exit(exit_color_shm_devices); \ No newline at end of file
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 6d5f317e7537..702999191203 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -472,12 +472,13 @@ asmlinkage long sys_test_call(unsigned int param)
472 down_read(&current->mm->mmap_sem); 472 down_read(&current->mm->mmap_sem);
473 vma_itr = current->mm->mmap; 473 vma_itr = current->mm->mmap;
474 while (vma_itr != NULL) { 474 while (vma_itr != NULL) {
475 printk(KERN_INFO "--------------------------------------------\n");
475 printk(KERN_INFO "vm_start : %lx\n", vma_itr->vm_start); 476 printk(KERN_INFO "vm_start : %lx\n", vma_itr->vm_start);
476 printk(KERN_INFO "vm_end : %lx\n", vma_itr->vm_end); 477 printk(KERN_INFO "vm_end : %lx\n", vma_itr->vm_end);
477 printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags); 478 printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags);
478 printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot)); 479 printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot));
479 printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED); 480 printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED);
480 if (vma_itr->vm_file) { 481/* if (vma_itr->vm_file) {
481 struct file *fp = vma_itr->vm_file; 482 struct file *fp = vma_itr->vm_file;
482 unsigned long fcount = atomic_long_read(&(fp->f_count)); 483 unsigned long fcount = atomic_long_read(&(fp->f_count));
483 printk(KERN_INFO "f_count : %ld\n", fcount); 484 printk(KERN_INFO "f_count : %ld\n", fcount);
@@ -486,8 +487,10 @@ asmlinkage long sys_test_call(unsigned int param)
486 } 487 }
487 } 488 }
488 printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot)); 489 printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot));
490*/
489 vma_itr = vma_itr->vm_next; 491 vma_itr = vma_itr->vm_next;
490 } 492 }
493 printk(KERN_INFO "--------------------------------------------\n");
491 up_read(&current->mm->mmap_sem); 494 up_read(&current->mm->mmap_sem);
492 495
493 local_irq_save(flags); 496 local_irq_save(flags);
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 2549a3fc28b9..76a57af9ae95 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -243,7 +243,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
243 if (next) { 243 if (next) {
244 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); 244 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
245 } else { 245 } else {
246 TRACE("becoming idle at %llu\n", litmus_clock()); 246 ; //TRACE("becoming idle at %llu\n", litmus_clock());
247 } 247 }
248 248
249 pedf->scheduled = next; 249 pedf->scheduled = next;
@@ -644,10 +644,14 @@ static long psnedf_admit_task(struct task_struct* tsk)
644 /* don't allow tasks on release master CPU */ 644 /* don't allow tasks on release master CPU */
645 && task_cpu(tsk) != remote_edf(task_cpu(tsk))->release_master 645 && task_cpu(tsk) != remote_edf(task_cpu(tsk))->release_master
646#endif 646#endif
647 ) 647 ) {
648 TRACE_TASK(tsk, "admitted\n");
648 return 0; 649 return 0;
649 else 650 }
651 else {
652 TRACE_TASK(tsk, "not admitted\n");
650 return -EINVAL; 653 return -EINVAL;
654 }
651} 655}
652 656
653/* Plugin object */ 657/* Plugin object */