aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/litmus.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:23:36 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:05:45 -0400
commit4b38febbd59fd33542a343991262119eb9860f5e (patch)
tree1af88a0d354abe344c2c2869631f76a1806d75c3 /litmus/litmus.c
parent22763c5cf3690a681551162c15d34d935308c8d7 (diff)
[ported from 2008.3] Core LITMUS^RT infrastructure
Port 2008.3 Core LITMUS^RT infrastructure to Linux 2.6.32 litmus_sched_class implements 4 new methods: - prio_changed: void - switched_to: void - get_rr_interval: return infinity (i.e., 0) - select_task_rq: return current cpu
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r--litmus/litmus.c654
1 files changed, 654 insertions, 0 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
new file mode 100644
index 000000000000..eb0d17e298d7
--- /dev/null
+++ b/litmus/litmus.c
@@ -0,0 +1,654 @@
1/* litmus.c -- Implementation of the LITMUS syscalls, the LITMUS intialization code,
2 * and the procfs interface..
3 */
4#include <asm/uaccess.h>
5#include <linux/uaccess.h>
6#include <linux/sysrq.h>
7
8#include <linux/module.h>
9#include <linux/proc_fs.h>
10#include <linux/slab.h>
11
12#include <litmus/litmus.h>
13#include <linux/sched.h>
14#include <litmus/sched_plugin.h>
15
16#include <litmus/heap.h>
17
18#include <litmus/trace.h>
19
20/* Number of RT tasks that exist in the system */
21atomic_t rt_task_count = ATOMIC_INIT(0);
22static DEFINE_SPINLOCK(task_transition_lock);
23
24/* Give log messages sequential IDs. */
25atomic_t __log_seq_no = ATOMIC_INIT(0);
26
27/* current master CPU for handling timer IRQs */
28atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
29
30static struct kmem_cache * heap_node_cache;
31
32struct heap_node* heap_node_alloc(int gfp_flags)
33{
34 return kmem_cache_alloc(heap_node_cache, gfp_flags);
35}
36
37void heap_node_free(struct heap_node* hn)
38{
39 kmem_cache_free(heap_node_cache, hn);
40}
41
42/*
43 * sys_set_task_rt_param
44 * @pid: Pid of the task which scheduling parameters must be changed
45 * @param: New real-time extension parameters such as the execution cost and
46 * period
47 * Syscall for manipulating with task rt extension params
48 * Returns EFAULT if param is NULL.
49 * ESRCH if pid is not corrsponding
50 * to a valid task.
51 * EINVAL if either period or execution cost is <=0
52 * EPERM if pid is a real-time task
53 * 0 if success
54 *
55 * Only non-real-time tasks may be configured with this system call
56 * to avoid races with the scheduler. In practice, this means that a
57 * task's parameters must be set _before_ calling sys_prepare_rt_task()
58 *
59 * find_task_by_vpid() assumes that we are in the same namespace of the
60 * target.
61 */
62asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
63{
64 struct rt_task tp;
65 struct task_struct *target;
66 int retval = -EINVAL;
67
68 printk("Setting up rt task parameters for process %d.\n", pid);
69
70 if (pid < 0 || param == 0) {
71 goto out;
72 }
73 if (copy_from_user(&tp, param, sizeof(tp))) {
74 retval = -EFAULT;
75 goto out;
76 }
77
78 /* Task search and manipulation must be protected */
79 read_lock_irq(&tasklist_lock);
80 if (!(target = find_task_by_vpid(pid))) {
81 retval = -ESRCH;
82 goto out_unlock;
83 }
84
85 if (is_realtime(target)) {
86 /* The task is already a real-time task.
87 * We cannot not allow parameter changes at this point.
88 */
89 retval = -EBUSY;
90 goto out_unlock;
91 }
92
93 if (tp.exec_cost <= 0)
94 goto out_unlock;
95 if (tp.period <= 0)
96 goto out_unlock;
97 if (!cpu_online(tp.cpu))
98 goto out_unlock;
99 if (tp.period < tp.exec_cost)
100 {
101 printk(KERN_INFO "litmus: real-time task %d rejected "
102 "because wcet > period\n", pid);
103 goto out_unlock;
104 }
105
106 target->rt_param.task_params = tp;
107
108 retval = 0;
109 out_unlock:
110 read_unlock_irq(&tasklist_lock);
111 out:
112 return retval;
113}
114
115/*
116 * Getter of task's RT params
117 * returns EINVAL if param or pid is NULL
118 * returns ESRCH if pid does not correspond to a valid task
119 * returns EFAULT if copying of parameters has failed.
120 *
121 * find_task_by_vpid() assumes that we are in the same namespace of the
122 * target.
123 */
124asmlinkage long sys_get_rt_task_param(pid_t pid, struct rt_task __user * param)
125{
126 int retval = -EINVAL;
127 struct task_struct *source;
128 struct rt_task lp;
129 if (param == 0 || pid < 0)
130 goto out;
131 read_lock(&tasklist_lock);
132 if (!(source = find_task_by_vpid(pid))) {
133 retval = -ESRCH;
134 goto out_unlock;
135 }
136 lp = source->rt_param.task_params;
137 read_unlock(&tasklist_lock);
138 /* Do copying outside the lock */
139 retval =
140 copy_to_user(param, &lp, sizeof(lp)) ? -EFAULT : 0;
141 return retval;
142 out_unlock:
143 read_unlock(&tasklist_lock);
144 out:
145 return retval;
146
147}
148
149/*
150 * This is the crucial function for periodic task implementation,
151 * It checks if a task is periodic, checks if such kind of sleep
152 * is permitted and calls plugin-specific sleep, which puts the
153 * task into a wait array.
154 * returns 0 on successful wakeup
155 * returns EPERM if current conditions do not permit such sleep
156 * returns EINVAL if current task is not able to go to sleep
157 */
158asmlinkage long sys_complete_job(void)
159{
160 int retval = -EPERM;
161 if (!is_realtime(current)) {
162 retval = -EINVAL;
163 goto out;
164 }
165 /* Task with negative or zero period cannot sleep */
166 if (get_rt_period(current) <= 0) {
167 retval = -EINVAL;
168 goto out;
169 }
170 /* The plugin has to put the task into an
171 * appropriate queue and call schedule
172 */
173 retval = litmus->complete_job();
174 out:
175 return retval;
176}
177
178/* This is an "improved" version of sys_complete_job that
179 * addresses the problem of unintentionally missing a job after
180 * an overrun.
181 *
182 * returns 0 on successful wakeup
183 * returns EPERM if current conditions do not permit such sleep
184 * returns EINVAL if current task is not able to go to sleep
185 */
186asmlinkage long sys_wait_for_job_release(unsigned int job)
187{
188 int retval = -EPERM;
189 if (!is_realtime(current)) {
190 retval = -EINVAL;
191 goto out;
192 }
193
194 /* Task with negative or zero period cannot sleep */
195 if (get_rt_period(current) <= 0) {
196 retval = -EINVAL;
197 goto out;
198 }
199
200 retval = 0;
201
202 /* first wait until we have "reached" the desired job
203 *
204 * This implementation has at least two problems:
205 *
206 * 1) It doesn't gracefully handle the wrap around of
207 * job_no. Since LITMUS is a prototype, this is not much
208 * of a problem right now.
209 *
210 * 2) It is theoretically racy if a job release occurs
211 * between checking job_no and calling sleep_next_period().
212 * A proper solution would requiring adding another callback
213 * in the plugin structure and testing the condition with
214 * interrupts disabled.
215 *
216 * FIXME: At least problem 2 should be taken care of eventually.
217 */
218 while (!retval && job > current->rt_param.job_params.job_no)
219 /* If the last job overran then job <= job_no and we
220 * don't send the task to sleep.
221 */
222 retval = litmus->complete_job();
223 out:
224 return retval;
225}
226
227/* This is a helper syscall to query the current job sequence number.
228 *
229 * returns 0 on successful query
230 * returns EPERM if task is not a real-time task.
231 * returns EFAULT if &job is not a valid pointer.
232 */
233asmlinkage long sys_query_job_no(unsigned int __user *job)
234{
235 int retval = -EPERM;
236 if (is_realtime(current))
237 retval = put_user(current->rt_param.job_params.job_no, job);
238
239 return retval;
240}
241
242/* sys_null_call() is only used for determining raw system call
243 * overheads (kernel entry, kernel exit). It has no useful side effects.
244 * If ts is non-NULL, then the current Feather-Trace time is recorded.
245 */
246asmlinkage long sys_null_call(cycles_t __user *ts)
247{
248 long ret = 0;
249 cycles_t now;
250
251 if (ts) {
252 now = get_cycles();
253 ret = put_user(now, ts);
254 }
255
256 return ret;
257}
258
259/* p is a real-time task. Re-init its state as a best-effort task. */
260static void reinit_litmus_state(struct task_struct* p, int restore)
261{
262 struct rt_task user_config = {};
263 __user short *np_flag = NULL;
264
265 if (restore) {
266 /* Safe user-space provided configuration data. */
267 user_config = p->rt_param.task_params;
268 np_flag = p->rt_param.np_flag;
269 }
270
271 /* We probably should not be inheriting any task's priority
272 * at this point in time.
273 */
274 WARN_ON(p->rt_param.inh_task);
275
276 /* We need to restore the priority of the task. */
277// __setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio);
278
279 /* Cleanup everything else. */
280 memset(&p->rt_param, 0, sizeof(user_config));
281
282 /* Restore preserved fields. */
283 if (restore) {
284 p->rt_param.task_params = user_config;
285 p->rt_param.np_flag = np_flag;
286 }
287}
288
289long litmus_admit_task(struct task_struct* tsk)
290{
291 long retval = 0;
292 unsigned long flags;
293
294 BUG_ON(is_realtime(tsk));
295
296 if (get_rt_period(tsk) == 0 ||
297 get_exec_cost(tsk) > get_rt_period(tsk)) {
298 TRACE_TASK(tsk, "litmus admit: invalid task parameters "
299 "(%lu, %lu)\n",
300 get_exec_cost(tsk), get_rt_period(tsk));
301 return -EINVAL;
302 }
303
304 if (!cpu_online(get_partition(tsk)))
305 {
306 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n",
307 get_partition(tsk));
308 return -EINVAL;
309 }
310
311 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
312
313 /* avoid scheduler plugin changing underneath us */
314 spin_lock_irqsave(&task_transition_lock, flags);
315
316 /* allocate heap node for this task */
317 tsk_rt(tsk)->heap_node = heap_node_alloc(GFP_ATOMIC);
318 if (!tsk_rt(tsk)->heap_node ||
319 !tsk_rt(tsk)->rel_heap) {
320 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
321 retval = -ENOMEM;
322 heap_node_free(tsk_rt(tsk)->heap_node);
323 } else
324 heap_node_init(&tsk_rt(tsk)->heap_node, tsk);
325
326 if (!retval)
327 retval = litmus->admit_task(tsk);
328
329 if (!retval) {
330 sched_trace_task_name(tsk);
331 sched_trace_task_param(tsk);
332 atomic_inc(&rt_task_count);
333 }
334
335 spin_unlock_irqrestore(&task_transition_lock, flags);
336
337 return retval;
338}
339
340void litmus_exit_task(struct task_struct* tsk)
341{
342 if (is_realtime(tsk)) {
343 sched_trace_task_completion(tsk, 1);
344 litmus->task_exit(tsk);
345 BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node));
346 heap_node_free(tsk_rt(tsk)->heap_node);
347 atomic_dec(&rt_task_count);
348 reinit_litmus_state(tsk, 1);
349 }
350}
351
352/* Switching a plugin in use is tricky.
353 * We must watch out that no real-time tasks exists
354 * (and that none is created in parallel) and that the plugin is not
355 * currently in use on any processor (in theory).
356 *
357 * For now, we don't enforce the second part since it is unlikely to cause
358 * any trouble by itself as long as we don't unload modules.
359 */
360int switch_sched_plugin(struct sched_plugin* plugin)
361{
362 unsigned long flags;
363 int ret = 0;
364
365 BUG_ON(!plugin);
366
367 /* stop task transitions */
368 spin_lock_irqsave(&task_transition_lock, flags);
369
370 /* don't switch if there are active real-time tasks */
371 if (atomic_read(&rt_task_count) == 0) {
372 ret = litmus->deactivate_plugin();
373 if (0 != ret)
374 goto out;
375 ret = plugin->activate_plugin();
376 if (0 != ret) {
377 printk(KERN_INFO "Can't activate %s (%d).\n",
378 plugin->plugin_name, ret);
379 plugin = &linux_sched_plugin;
380 }
381 printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", plugin->plugin_name);
382 litmus = plugin;
383 } else
384 ret = -EBUSY;
385out:
386 spin_unlock_irqrestore(&task_transition_lock, flags);
387 return ret;
388}
389
390/* Called upon fork.
391 * p is the newly forked task.
392 */
393void litmus_fork(struct task_struct* p)
394{
395 if (is_realtime(p))
396 /* clean out any litmus related state, don't preserve anything*/
397 reinit_litmus_state(p, 0);
398}
399
400/* Called upon execve().
401 * current is doing the exec.
402 * Don't let address space specific stuff leak.
403 */
404void litmus_exec(void)
405{
406 struct task_struct* p = current;
407
408 if (is_realtime(p)) {
409 WARN_ON(p->rt_param.inh_task);
410 p->rt_param.np_flag = NULL;
411 }
412}
413
414void exit_litmus(struct task_struct *dead_tsk)
415{
416 if (is_realtime(dead_tsk))
417 litmus_exit_task(dead_tsk);
418}
419
420
421#ifdef CONFIG_MAGIC_SYSRQ
422int sys_kill(int pid, int sig);
423
424static void sysrq_handle_kill_rt_tasks(int key, struct tty_struct *tty)
425{
426 struct task_struct *t;
427 read_lock(&tasklist_lock);
428 for_each_process(t) {
429 if (is_realtime(t)) {
430 sys_kill(t->pid, SIGKILL);
431 }
432 }
433 read_unlock(&tasklist_lock);
434}
435
436static struct sysrq_key_op sysrq_kill_rt_tasks_op = {
437 .handler = sysrq_handle_kill_rt_tasks,
438 .help_msg = "quit-rt-tasks(X)",
439 .action_msg = "sent SIGKILL to all LITMUS^RT real-time tasks",
440};
441
442
443#endif
444
445
446static int proc_read_stats(char *page, char **start,
447 off_t off, int count,
448 int *eof, void *data)
449{
450 int len;
451
452 len = snprintf(page, PAGE_SIZE,
453 "real-time tasks = %d\n"
454 "ready for release = %d\n",
455 atomic_read(&rt_task_count),
456 0);
457 return len;
458}
459
460static int proc_read_plugins(char *page, char **start,
461 off_t off, int count,
462 int *eof, void *data)
463{
464 int len;
465
466 len = print_sched_plugins(page, PAGE_SIZE);
467 return len;
468}
469
470static int proc_read_curr(char *page, char **start,
471 off_t off, int count,
472 int *eof, void *data)
473{
474 int len;
475
476 len = snprintf(page, PAGE_SIZE, "%s\n", litmus->plugin_name);
477 return len;
478}
479
480static int proc_write_curr(struct file *file,
481 const char *buffer,
482 unsigned long count,
483 void *data)
484{
485 int len, ret;
486 char name[65];
487 struct sched_plugin* found;
488
489 if(count > 64)
490 len = 64;
491 else
492 len = count;
493
494 if(copy_from_user(name, buffer, len))
495 return -EFAULT;
496
497 name[len] = '\0';
498 /* chomp name */
499 if (len > 1 && name[len - 1] == '\n')
500 name[len - 1] = '\0';
501
502 found = find_sched_plugin(name);
503
504 if (found) {
505 ret = switch_sched_plugin(found);
506 if (ret != 0)
507 printk(KERN_INFO "Could not switch plugin: %d\n", ret);
508 } else
509 printk(KERN_INFO "Plugin '%s' is unknown.\n", name);
510
511 return len;
512}
513
514
515static int proc_read_release_master(char *page, char **start,
516 off_t off, int count,
517 int *eof, void *data)
518{
519 int len, master;
520 master = atomic_read(&release_master_cpu);
521 if (master == NO_CPU)
522 len = snprintf(page, PAGE_SIZE, "NO_CPU\n");
523 else
524 len = snprintf(page, PAGE_SIZE, "%d\n", master);
525 return len;
526}
527
528static int proc_write_release_master(struct file *file,
529 const char *buffer,
530 unsigned long count,
531 void *data)
532{
533 int cpu, err, online = 0;
534 char msg[64];
535
536 if (count > 63)
537 return -EINVAL;
538
539 if (copy_from_user(msg, buffer, count))
540 return -EFAULT;
541
542 /* terminate */
543 msg[count] = '\0';
544 /* chomp */
545 if (count > 1 && msg[count - 1] == '\n')
546 msg[count - 1] = '\0';
547
548 if (strcmp(msg, "NO_CPU") == 0) {
549 atomic_set(&release_master_cpu, NO_CPU);
550 return count;
551 } else {
552 err = sscanf(msg, "%d", &cpu);
553 if (err == 1 && cpu >= 0 && (online = cpu_online(cpu))) {
554 atomic_set(&release_master_cpu, cpu);
555 return count;
556 } else {
557 TRACE("invalid release master: '%s' "
558 "(err:%d cpu:%d online:%d)\n",
559 msg, err, cpu, online);
560 return -EINVAL;
561 }
562 }
563}
564
565static struct proc_dir_entry *litmus_dir = NULL,
566 *curr_file = NULL,
567 *stat_file = NULL,
568 *plugs_file = NULL,
569 *release_master_file = NULL;
570
571static int __init init_litmus_proc(void)
572{
573 litmus_dir = proc_mkdir("litmus", NULL);
574 if (!litmus_dir) {
575 printk(KERN_ERR "Could not allocate LITMUS^RT procfs entry.\n");
576 return -ENOMEM;
577 }
578
579 curr_file = create_proc_entry("active_plugin",
580 0644, litmus_dir);
581 if (!curr_file) {
582 printk(KERN_ERR "Could not allocate active_plugin "
583 "procfs entry.\n");
584 return -ENOMEM;
585 }
586 curr_file->read_proc = proc_read_curr;
587 curr_file->write_proc = proc_write_curr;
588
589 release_master_file = create_proc_entry("release_master",
590 0644, litmus_dir);
591 if (!release_master_file) {
592 printk(KERN_ERR "Could not allocate release_master "
593 "procfs entry.\n");
594 return -ENOMEM;
595 }
596 release_master_file->read_proc = proc_read_release_master;
597 release_master_file->write_proc = proc_write_release_master;
598
599 stat_file = create_proc_read_entry("stats", 0444, litmus_dir,
600 proc_read_stats, NULL);
601
602 plugs_file = create_proc_read_entry("plugins", 0444, litmus_dir,
603 proc_read_plugins, NULL);
604
605 return 0;
606}
607
608static void exit_litmus_proc(void)
609{
610 if (plugs_file)
611 remove_proc_entry("plugins", litmus_dir);
612 if (stat_file)
613 remove_proc_entry("stats", litmus_dir);
614 if (curr_file)
615 remove_proc_entry("active_plugin", litmus_dir);
616 if (litmus_dir)
617 remove_proc_entry("litmus", NULL);
618}
619
620extern struct sched_plugin linux_sched_plugin;
621
622static int __init _init_litmus(void)
623{
624 /* Common initializers,
625 * mode change lock is used to enforce single mode change
626 * operation.
627 */
628 printk("Starting LITMUS^RT kernel\n");
629
630 register_sched_plugin(&linux_sched_plugin);
631
632 heap_node_cache = KMEM_CACHE(heap_node, SLAB_PANIC);
633
634#ifdef CONFIG_MAGIC_SYSRQ
635 /* offer some debugging help */
636 if (!register_sysrq_key('x', &sysrq_kill_rt_tasks_op))
637 printk("Registered kill rt tasks magic sysrq.\n");
638 else
639 printk("Could not register kill rt tasks magic sysrq.\n");
640#endif
641
642 init_litmus_proc();
643
644 return 0;
645}
646
647static void _exit_litmus(void)
648{
649 exit_litmus_proc();
650 kmem_cache_destroy(heap_node_cache);
651}
652
653module_init(_init_litmus);
654module_exit(_exit_litmus);