aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2010-02-03 19:35:20 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:25:59 -0400
commitb973c95c86e6710c913c01a67013605f68a3c2c3 (patch)
tree73786af2a164c8cb89009cc4069e60b7de04bc4b /litmus
parent5e987d486c0f89d615d134512938fc1198b3ca67 (diff)
Add virtual LITMUS^RT control device.
This device only supports mmap()'ing a single page. This page is shared RW between the kernel and userspace. It is inteded to allow near-zero-overhead communication between the kernel and userspace. It's first use will be a proper implementation of user-signaled non-preemptable section support.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/ctrldev.c150
-rw-r--r--litmus/litmus.c35
3 files changed, 179 insertions, 7 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 3b3f1af8d5b4..ff4eb8a7b6c4 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -11,6 +11,7 @@ obj-y = sched_plugin.o litmus.o \
11 srp.o \ 11 srp.o \
12 fmlp.o \ 12 fmlp.o \
13 bheap.o \ 13 bheap.o \
14 ctrldev.o \
14 sched_gsn_edf.o \ 15 sched_gsn_edf.o \
15 sched_psn_edf.o \ 16 sched_psn_edf.o \
16 sched_pfair.o 17 sched_pfair.o
diff --git a/litmus/ctrldev.c b/litmus/ctrldev.c
new file mode 100644
index 000000000000..238da453228d
--- /dev/null
+++ b/litmus/ctrldev.c
@@ -0,0 +1,150 @@
1#include <linux/sched.h>
2#include <linux/mm.h>
3#include <linux/fs.h>
4#include <linux/miscdevice.h>
5#include <linux/module.h>
6
7#include <litmus/litmus.h>
8
9/* only one page for now, but we might want to add a RO version at some point */
10#define CTRL_MINOR_COUNT 1
11#define CTRL_NAME "litmus/ctrl"
12
13/* allocate t->rt_param.ctrl_page*/
14static int alloc_ctrl_page(struct task_struct *t)
15{
16 int err = 0;
17
18 /* only allocate if the task doesn't have one yet */
19 if (!tsk_rt(t)->ctrl_page) {
20 tsk_rt(t)->ctrl_page = (void*) get_zeroed_page(GFP_KERNEL);
21 if (!tsk_rt(t)->ctrl_page)
22 err = -ENOMEM;
23 /* will get de-allocated in task teardown */
24 TRACE_TASK(t, "%s ctrl_page = %p\n", __FUNCTION__,
25 tsk_rt(t)->ctrl_page);
26 }
27 return err;
28}
29
30static int map_ctrl_page(struct task_struct *t, struct vm_area_struct* vma)
31{
32 int err;
33 unsigned long pfn;
34
35 struct page* ctrl = virt_to_page(tsk_rt(t)->ctrl_page);
36
37 /* Increase ref count. Is decreased when vma is destroyed. */
38 get_page(ctrl);
39
40 /* compute page frame number */
41 pfn = page_to_pfn(ctrl);
42
43 TRACE_CUR(CTRL_NAME
44 ": mapping %p (pfn:%lx, %lx) to 0x%lx (prot:%lx)\n",
45 tsk_rt(t)->ctrl_page, pfn, page_to_pfn(ctrl), vma->vm_start,
46 vma->vm_page_prot);
47
48 /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise
49 * userspace actually gets a copy-on-write page. */
50 err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED);
51
52 if (err)
53 TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err);
54
55 return err;
56}
57
58static void litmus_ctrl_vm_close(struct vm_area_struct* vma)
59{
60 TRACE_CUR("%s flags=0x%x prot=0x%x\n", __FUNCTION__,
61 vma->vm_flags, vma->vm_page_prot);
62
63 TRACE_CUR(CTRL_NAME
64 ": %p:%p vma:%p vma->vm_private_data:%p closed.\n",
65 (void*) vma->vm_start, (void*) vma->vm_end, vma,
66 vma->vm_private_data, current->comm,
67 current->pid);
68}
69
70static int litmus_ctrl_vm_fault(struct vm_area_struct* vma,
71 struct vm_fault* vmf)
72{
73 /* This function should never be called, since
74 * all pages should have been mapped by mmap()
75 * already. */
76 TRACE_CUR("%s flags=0x%x\n", __FUNCTION__, vma->vm_flags);
77
78 /* nope, you only get one page */
79 return VM_FAULT_SIGBUS;
80}
81
82static struct vm_operations_struct litmus_ctrl_vm_ops = {
83 .close = litmus_ctrl_vm_close,
84 .fault = litmus_ctrl_vm_fault,
85};
86
87static int litmus_ctrl_mmap(struct file* filp, struct vm_area_struct* vma)
88{
89 int err = 0;
90
91 /* first make sure mapper knows what he's doing */
92
93 /* you can only get one page */
94 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
95 return -EINVAL;
96
97 /* you can only map the "first" page */
98 if (vma->vm_pgoff != 0)
99 return -EINVAL;
100
101 /* you can't share it with anyone */
102 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
103 return -EINVAL;
104
105 vma->vm_ops = &litmus_ctrl_vm_ops;
106 /* this mapping should not be kept across forks,
107 * and cannot be expanded */
108 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
109
110 err = alloc_ctrl_page(current);
111 if (!err)
112 err = map_ctrl_page(current, vma);
113
114 TRACE_CUR("%s flags=0x%x prot=0x%lx\n",
115 __FUNCTION__, vma->vm_flags, vma->vm_page_prot);
116
117 return err;
118}
119
120static struct file_operations litmus_ctrl_fops = {
121 .owner = THIS_MODULE,
122 .mmap = litmus_ctrl_mmap,
123};
124
125static struct miscdevice litmus_ctrl_dev = {
126 .name = CTRL_NAME,
127 .minor = MISC_DYNAMIC_MINOR,
128 .fops = &litmus_ctrl_fops,
129};
130
131static int __init init_litmus_ctrl_dev(void)
132{
133 int err;
134
135 BUILD_BUG_ON(sizeof(struct control_page) > PAGE_SIZE);
136
137 printk("Initializing LITMUS^RT control device.\n");
138 err = misc_register(&litmus_ctrl_dev);
139 if (err)
140 printk("Could not allocate %s device (%d).\n", CTRL_NAME, err);
141 return err;
142}
143
144static void __exit exit_litmus_ctrl_dev(void)
145{
146 misc_deregister(&litmus_ctrl_dev);
147}
148
149module_init(init_litmus_ctrl_dev);
150module_exit(exit_litmus_ctrl_dev);
diff --git a/litmus/litmus.c b/litmus/litmus.c
index d31a2ba030e4..589062f8ab87 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -268,12 +268,13 @@ asmlinkage long sys_null_call(cycles_t __user *ts)
268static void reinit_litmus_state(struct task_struct* p, int restore) 268static void reinit_litmus_state(struct task_struct* p, int restore)
269{ 269{
270 struct rt_task user_config = {}; 270 struct rt_task user_config = {};
271 __user short *np_flag = NULL; 271 void* ctrl_page = NULL;
272 272
273 if (restore) { 273 if (restore) {
274 /* Safe user-space provided configuration data. */ 274 /* Safe user-space provided configuration data.
275 * and allocated page. */
275 user_config = p->rt_param.task_params; 276 user_config = p->rt_param.task_params;
276 np_flag = p->rt_param.np_flag; 277 ctrl_page = p->rt_param.ctrl_page;
277 } 278 }
278 279
279 /* We probably should not be inheriting any task's priority 280 /* We probably should not be inheriting any task's priority
@@ -282,7 +283,7 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
282 WARN_ON(p->rt_param.inh_task); 283 WARN_ON(p->rt_param.inh_task);
283 284
284 /* We need to restore the priority of the task. */ 285 /* We need to restore the priority of the task. */
285// __setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio); 286// __setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio); XXX why is this commented?
286 287
287 /* Cleanup everything else. */ 288 /* Cleanup everything else. */
288 memset(&p->rt_param, 0, sizeof(p->rt_param)); 289 memset(&p->rt_param, 0, sizeof(p->rt_param));
@@ -290,7 +291,7 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
290 /* Restore preserved fields. */ 291 /* Restore preserved fields. */
291 if (restore) { 292 if (restore) {
292 p->rt_param.task_params = user_config; 293 p->rt_param.task_params = user_config;
293 p->rt_param.np_flag = np_flag; 294 p->rt_param.ctrl_page = ctrl_page;
294 } 295 }
295} 296}
296 297
@@ -412,8 +413,11 @@ out:
412void litmus_fork(struct task_struct* p) 413void litmus_fork(struct task_struct* p)
413{ 414{
414 if (is_realtime(p)) 415 if (is_realtime(p))
415 /* clean out any litmus related state, don't preserve anything*/ 416 /* clean out any litmus related state, don't preserve anything */
416 reinit_litmus_state(p, 0); 417 reinit_litmus_state(p, 0);
418 else
419 /* non-rt tasks might have ctrl_page set */
420 tsk_rt(p)->ctrl_page = NULL;
417} 421}
418 422
419/* Called upon execve(). 423/* Called upon execve().
@@ -426,12 +430,29 @@ void litmus_exec(void)
426 430
427 if (is_realtime(p)) { 431 if (is_realtime(p)) {
428 WARN_ON(p->rt_param.inh_task); 432 WARN_ON(p->rt_param.inh_task);
429 p->rt_param.np_flag = NULL; 433 if (tsk_rt(p)->ctrl_page) {
434 free_page((unsigned long) tsk_rt(p)->ctrl_page);
435 tsk_rt(p)->ctrl_page = NULL;
436 }
430 } 437 }
431} 438}
432 439
433void exit_litmus(struct task_struct *dead_tsk) 440void exit_litmus(struct task_struct *dead_tsk)
434{ 441{
442 /* We also allow non-RT tasks to
443 * allocate control pages to allow
444 * measurements with non-RT tasks.
445 * So check if we need to free the page
446 * in any case.
447 */
448 if (tsk_rt(dead_tsk)->ctrl_page) {
449 TRACE_TASK(dead_tsk,
450 "freeing ctrl_page %p\n",
451 tsk_rt(dead_tsk)->ctrl_page);
452 free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page);
453 }
454
455 /* main cleanup only for RT tasks */
435 if (is_realtime(dead_tsk)) 456 if (is_realtime(dead_tsk))
436 litmus_exit_task(dead_tsk); 457 litmus_exit_task(dead_tsk);
437} 458}