aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/litmus.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r--litmus/litmus.c775
1 files changed, 775 insertions, 0 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
new file mode 100644
index 000000000000..e43596a5104c
--- /dev/null
+++ b/litmus/litmus.c
@@ -0,0 +1,775 @@
1/*
2 * litmus.c -- Implementation of the LITMUS syscalls,
3 * the LITMUS intialization code,
4 * and the procfs interface..
5 */
6#include <asm/uaccess.h>
7#include <linux/uaccess.h>
8#include <linux/sysrq.h>
9
10#include <linux/module.h>
11#include <linux/proc_fs.h>
12#include <linux/slab.h>
13
14#include <litmus/litmus.h>
15#include <linux/sched.h>
16#include <litmus/sched_plugin.h>
17
18#include <litmus/bheap.h>
19
20#include <litmus/trace.h>
21
22#include <litmus/rt_domain.h>
23
24/* Number of RT tasks that exist in the system */
25atomic_t rt_task_count = ATOMIC_INIT(0);
26static DEFINE_SPINLOCK(task_transition_lock);
27/* synchronize plugin switching */
28atomic_t cannot_use_plugin = ATOMIC_INIT(0);
29
30/* Give log messages sequential IDs. */
31atomic_t __log_seq_no = ATOMIC_INIT(0);
32
33/* current master CPU for handling timer IRQs */
34atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
35
36static struct kmem_cache * bheap_node_cache;
37extern struct kmem_cache * release_heap_cache;
38
39struct bheap_node* bheap_node_alloc(int gfp_flags)
40{
41 return kmem_cache_alloc(bheap_node_cache, gfp_flags);
42}
43
44void bheap_node_free(struct bheap_node* hn)
45{
46 kmem_cache_free(bheap_node_cache, hn);
47}
48
49struct release_heap* release_heap_alloc(int gfp_flags);
50void release_heap_free(struct release_heap* rh);
51
52/*
53 * sys_set_task_rt_param
54 * @pid: Pid of the task which scheduling parameters must be changed
55 * @param: New real-time extension parameters such as the execution cost and
56 * period
57 * Syscall for manipulating with task rt extension params
58 * Returns EFAULT if param is NULL.
59 * ESRCH if pid is not corrsponding
60 * to a valid task.
61 * EINVAL if either period or execution cost is <=0
62 * EPERM if pid is a real-time task
63 * 0 if success
64 *
65 * Only non-real-time tasks may be configured with this system call
66 * to avoid races with the scheduler. In practice, this means that a
67 * task's parameters must be set _before_ calling sys_prepare_rt_task()
68 *
69 * find_task_by_vpid() assumes that we are in the same namespace of the
70 * target.
71 */
72asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
73{
74 struct rt_task tp;
75 struct task_struct *target;
76 int retval = -EINVAL;
77
78 printk("Setting up rt task parameters for process %d.\n", pid);
79
80 if (pid < 0 || param == 0) {
81 goto out;
82 }
83 if (copy_from_user(&tp, param, sizeof(tp))) {
84 retval = -EFAULT;
85 goto out;
86 }
87
88 /* Task search and manipulation must be protected */
89 read_lock_irq(&tasklist_lock);
90 if (!(target = find_task_by_vpid(pid))) {
91 retval = -ESRCH;
92 goto out_unlock;
93 }
94
95 if (is_realtime(target)) {
96 /* The task is already a real-time task.
97 * We cannot not allow parameter changes at this point.
98 */
99 retval = -EBUSY;
100 goto out_unlock;
101 }
102
103 if (tp.exec_cost <= 0)
104 goto out_unlock;
105 if (tp.period <= 0)
106 goto out_unlock;
107 if (!cpu_online(tp.cpu))
108 goto out_unlock;
109 if (tp.period < tp.exec_cost)
110 {
111 printk(KERN_INFO "litmus: real-time task %d rejected "
112 "because wcet > period\n", pid);
113 goto out_unlock;
114 }
115
116 target->rt_param.task_params = tp;
117
118 retval = 0;
119 out_unlock:
120 read_unlock_irq(&tasklist_lock);
121 out:
122 return retval;
123}
124
125/*
126 * Getter of task's RT params
127 * returns EINVAL if param or pid is NULL
128 * returns ESRCH if pid does not correspond to a valid task
129 * returns EFAULT if copying of parameters has failed.
130 *
131 * find_task_by_vpid() assumes that we are in the same namespace of the
132 * target.
133 */
134asmlinkage long sys_get_rt_task_param(pid_t pid, struct rt_task __user * param)
135{
136 int retval = -EINVAL;
137 struct task_struct *source;
138 struct rt_task lp;
139 if (param == 0 || pid < 0)
140 goto out;
141 read_lock(&tasklist_lock);
142 if (!(source = find_task_by_vpid(pid))) {
143 retval = -ESRCH;
144 goto out_unlock;
145 }
146 lp = source->rt_param.task_params;
147 read_unlock(&tasklist_lock);
148 /* Do copying outside the lock */
149 retval =
150 copy_to_user(param, &lp, sizeof(lp)) ? -EFAULT : 0;
151 return retval;
152 out_unlock:
153 read_unlock(&tasklist_lock);
154 out:
155 return retval;
156
157}
158
159/*
160 * This is the crucial function for periodic task implementation,
161 * It checks if a task is periodic, checks if such kind of sleep
162 * is permitted and calls plugin-specific sleep, which puts the
163 * task into a wait array.
164 * returns 0 on successful wakeup
165 * returns EPERM if current conditions do not permit such sleep
166 * returns EINVAL if current task is not able to go to sleep
167 */
168asmlinkage long sys_complete_job(void)
169{
170 int retval = -EPERM;
171 if (!is_realtime(current)) {
172 retval = -EINVAL;
173 goto out;
174 }
175 /* Task with negative or zero period cannot sleep */
176 if (get_rt_period(current) <= 0) {
177 retval = -EINVAL;
178 goto out;
179 }
180 /* The plugin has to put the task into an
181 * appropriate queue and call schedule
182 */
183 retval = litmus->complete_job();
184 out:
185 return retval;
186}
187
188/* This is an "improved" version of sys_complete_job that
189 * addresses the problem of unintentionally missing a job after
190 * an overrun.
191 *
192 * returns 0 on successful wakeup
193 * returns EPERM if current conditions do not permit such sleep
194 * returns EINVAL if current task is not able to go to sleep
195 */
196asmlinkage long sys_wait_for_job_release(unsigned int job)
197{
198 int retval = -EPERM;
199 if (!is_realtime(current)) {
200 retval = -EINVAL;
201 goto out;
202 }
203
204 /* Task with negative or zero period cannot sleep */
205 if (get_rt_period(current) <= 0) {
206 retval = -EINVAL;
207 goto out;
208 }
209
210 retval = 0;
211
212 /* first wait until we have "reached" the desired job
213 *
214 * This implementation has at least two problems:
215 *
216 * 1) It doesn't gracefully handle the wrap around of
217 * job_no. Since LITMUS is a prototype, this is not much
218 * of a problem right now.
219 *
220 * 2) It is theoretically racy if a job release occurs
221 * between checking job_no and calling sleep_next_period().
222 * A proper solution would requiring adding another callback
223 * in the plugin structure and testing the condition with
224 * interrupts disabled.
225 *
226 * FIXME: At least problem 2 should be taken care of eventually.
227 */
228 while (!retval && job > current->rt_param.job_params.job_no)
229 /* If the last job overran then job <= job_no and we
230 * don't send the task to sleep.
231 */
232 retval = litmus->complete_job();
233 out:
234 return retval;
235}
236
237/* This is a helper syscall to query the current job sequence number.
238 *
239 * returns 0 on successful query
240 * returns EPERM if task is not a real-time task.
241 * returns EFAULT if &job is not a valid pointer.
242 */
243asmlinkage long sys_query_job_no(unsigned int __user *job)
244{
245 int retval = -EPERM;
246 if (is_realtime(current))
247 retval = put_user(current->rt_param.job_params.job_no, job);
248
249 return retval;
250}
251
252/* sys_null_call() is only used for determining raw system call
253 * overheads (kernel entry, kernel exit). It has no useful side effects.
254 * If ts is non-NULL, then the current Feather-Trace time is recorded.
255 */
256asmlinkage long sys_null_call(cycles_t __user *ts)
257{
258 long ret = 0;
259 cycles_t now;
260
261 if (ts) {
262 now = get_cycles();
263 ret = put_user(now, ts);
264 }
265
266 return ret;
267}
268
269/* p is a real-time task. Re-init its state as a best-effort task. */
270static void reinit_litmus_state(struct task_struct* p, int restore)
271{
272 struct rt_task user_config = {};
273 void* ctrl_page = NULL;
274
275 if (restore) {
276 /* Safe user-space provided configuration data.
277 * and allocated page. */
278 user_config = p->rt_param.task_params;
279 ctrl_page = p->rt_param.ctrl_page;
280 }
281
282 /* We probably should not be inheriting any task's priority
283 * at this point in time.
284 */
285 WARN_ON(p->rt_param.inh_task);
286
287 /* We need to restore the priority of the task. */
288// __setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio); XXX why is this commented?
289
290 /* Cleanup everything else. */
291 memset(&p->rt_param, 0, sizeof(p->rt_param));
292
293 /* Restore preserved fields. */
294 if (restore) {
295 p->rt_param.task_params = user_config;
296 p->rt_param.ctrl_page = ctrl_page;
297 }
298}
299
300long litmus_admit_task(struct task_struct* tsk)
301{
302 long retval = 0;
303 unsigned long flags;
304
305 BUG_ON(is_realtime(tsk));
306
307 if (get_rt_period(tsk) == 0 ||
308 get_exec_cost(tsk) > get_rt_period(tsk)) {
309 TRACE_TASK(tsk, "litmus admit: invalid task parameters "
310 "(%lu, %lu)\n",
311 get_exec_cost(tsk), get_rt_period(tsk));
312 retval = -EINVAL;
313 goto out;
314 }
315
316 if (!cpu_online(get_partition(tsk))) {
317 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n",
318 get_partition(tsk));
319 retval = -EINVAL;
320 goto out;
321 }
322
323 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
324
325 /* avoid scheduler plugin changing underneath us */
326 spin_lock_irqsave(&task_transition_lock, flags);
327
328 /* allocate heap node for this task */
329 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
330 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
331
332 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
333 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
334
335 bheap_node_free(tsk_rt(tsk)->heap_node);
336 release_heap_free(tsk_rt(tsk)->rel_heap);
337
338 retval = -ENOMEM;
339 goto out_unlock;
340 } else {
341 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk);
342 }
343
344 retval = litmus->admit_task(tsk);
345
346 if (!retval) {
347 sched_trace_task_name(tsk);
348 sched_trace_task_param(tsk);
349 atomic_inc(&rt_task_count);
350 }
351
352out_unlock:
353 spin_unlock_irqrestore(&task_transition_lock, flags);
354out:
355 return retval;
356}
357
358void litmus_exit_task(struct task_struct* tsk)
359{
360 if (is_realtime(tsk)) {
361 sched_trace_task_completion(tsk, 1);
362
363 litmus->task_exit(tsk);
364
365 BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node));
366 bheap_node_free(tsk_rt(tsk)->heap_node);
367 release_heap_free(tsk_rt(tsk)->rel_heap);
368
369 atomic_dec(&rt_task_count);
370 reinit_litmus_state(tsk, 1);
371 }
372}
373
374/* IPI callback to synchronize plugin switching */
375static void synch_on_plugin_switch(void* info)
376{
377 while (atomic_read(&cannot_use_plugin))
378 cpu_relax();
379}
380
381/* Switching a plugin in use is tricky.
382 * We must watch out that no real-time tasks exists
383 * (and that none is created in parallel) and that the plugin is not
384 * currently in use on any processor (in theory).
385 */
386int switch_sched_plugin(struct sched_plugin* plugin)
387{
388 unsigned long flags;
389 int ret = 0;
390
391 BUG_ON(!plugin);
392
393 /* forbid other cpus to use the plugin */
394 atomic_set(&cannot_use_plugin, 1);
395 /* send IPI to force other CPUs to synch with us */
396 smp_call_function(synch_on_plugin_switch, NULL, 0);
397
398 /* stop task transitions */
399 spin_lock_irqsave(&task_transition_lock, flags);
400
401 /* don't switch if there are active real-time tasks */
402 if (atomic_read(&rt_task_count) == 0) {
403 ret = litmus->deactivate_plugin();
404 if (0 != ret)
405 goto out;
406 ret = plugin->activate_plugin();
407 if (0 != ret) {
408 printk(KERN_INFO "Can't activate %s (%d).\n",
409 plugin->plugin_name, ret);
410 plugin = &linux_sched_plugin;
411 }
412 printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", plugin->plugin_name);
413 litmus = plugin;
414 } else
415 ret = -EBUSY;
416out:
417 spin_unlock_irqrestore(&task_transition_lock, flags);
418 atomic_set(&cannot_use_plugin, 0);
419 return ret;
420}
421
422/* Called upon fork.
423 * p is the newly forked task.
424 */
425void litmus_fork(struct task_struct* p)
426{
427 if (is_realtime(p))
428 /* clean out any litmus related state, don't preserve anything */
429 reinit_litmus_state(p, 0);
430 else
431 /* non-rt tasks might have ctrl_page set */
432 tsk_rt(p)->ctrl_page = NULL;
433
434 /* od tables are never inherited across a fork */
435 p->od_table = NULL;
436}
437
438/* Called upon execve().
439 * current is doing the exec.
440 * Don't let address space specific stuff leak.
441 */
442void litmus_exec(void)
443{
444 struct task_struct* p = current;
445
446 if (is_realtime(p)) {
447 WARN_ON(p->rt_param.inh_task);
448 if (tsk_rt(p)->ctrl_page) {
449 free_page((unsigned long) tsk_rt(p)->ctrl_page);
450 tsk_rt(p)->ctrl_page = NULL;
451 }
452 }
453}
454
455void exit_litmus(struct task_struct *dead_tsk)
456{
457 /* We also allow non-RT tasks to
458 * allocate control pages to allow
459 * measurements with non-RT tasks.
460 * So check if we need to free the page
461 * in any case.
462 */
463 if (tsk_rt(dead_tsk)->ctrl_page) {
464 TRACE_TASK(dead_tsk,
465 "freeing ctrl_page %p\n",
466 tsk_rt(dead_tsk)->ctrl_page);
467 free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page);
468 }
469
470 /* main cleanup only for RT tasks */
471 if (is_realtime(dead_tsk))
472 litmus_exit_task(dead_tsk);
473}
474
475
476#ifdef CONFIG_MAGIC_SYSRQ
477int sys_kill(int pid, int sig);
478
479static void sysrq_handle_kill_rt_tasks(int key, struct tty_struct *tty)
480{
481 struct task_struct *t;
482 read_lock(&tasklist_lock);
483 for_each_process(t) {
484 if (is_realtime(t)) {
485 sys_kill(t->pid, SIGKILL);
486 }
487 }
488 read_unlock(&tasklist_lock);
489}
490
491static struct sysrq_key_op sysrq_kill_rt_tasks_op = {
492 .handler = sysrq_handle_kill_rt_tasks,
493 .help_msg = "quit-rt-tasks(X)",
494 .action_msg = "sent SIGKILL to all LITMUS^RT real-time tasks",
495};
496#endif
497
498/* in litmus/sync.c */
499int count_tasks_waiting_for_release(void);
500
501static int proc_read_stats(char *page, char **start,
502 off_t off, int count,
503 int *eof, void *data)
504{
505 int len;
506
507 len = snprintf(page, PAGE_SIZE,
508 "real-time tasks = %d\n"
509 "ready for release = %d\n",
510 atomic_read(&rt_task_count),
511 count_tasks_waiting_for_release());
512 return len;
513}
514
515static int proc_read_plugins(char *page, char **start,
516 off_t off, int count,
517 int *eof, void *data)
518{
519 int len;
520
521 len = print_sched_plugins(page, PAGE_SIZE);
522 return len;
523}
524
525static int proc_read_curr(char *page, char **start,
526 off_t off, int count,
527 int *eof, void *data)
528{
529 int len;
530
531 len = snprintf(page, PAGE_SIZE, "%s\n", litmus->plugin_name);
532 return len;
533}
534
535static int proc_write_curr(struct file *file,
536 const char *buffer,
537 unsigned long count,
538 void *data)
539{
540 int len, ret;
541 char name[65];
542 struct sched_plugin* found;
543
544 if(count > 64)
545 len = 64;
546 else
547 len = count;
548
549 if(copy_from_user(name, buffer, len))
550 return -EFAULT;
551
552 name[len] = '\0';
553 /* chomp name */
554 if (len > 1 && name[len - 1] == '\n')
555 name[len - 1] = '\0';
556
557 found = find_sched_plugin(name);
558
559 if (found) {
560 ret = switch_sched_plugin(found);
561 if (ret != 0)
562 printk(KERN_INFO "Could not switch plugin: %d\n", ret);
563 } else
564 printk(KERN_INFO "Plugin '%s' is unknown.\n", name);
565
566 return len;
567}
568
569static int proc_read_cluster_size(char *page, char **start,
570 off_t off, int count,
571 int *eof, void *data)
572{
573 int len;
574 if (cluster_cache_index == 2)
575 len = snprintf(page, PAGE_SIZE, "L2\n");
576 else if (cluster_cache_index == 3)
577 len = snprintf(page, PAGE_SIZE, "L3\n");
578 else /* (cluster_cache_index == 1) */
579 len = snprintf(page, PAGE_SIZE, "L1\n");
580
581 return len;
582}
583
584static int proc_write_cluster_size(struct file *file,
585 const char *buffer,
586 unsigned long count,
587 void *data)
588{
589 int len;
590 /* L2, L3 */
591 char cache_name[33];
592
593 if(count > 32)
594 len = 32;
595 else
596 len = count;
597
598 if(copy_from_user(cache_name, buffer, len))
599 return -EFAULT;
600
601 cache_name[len] = '\0';
602 /* chomp name */
603 if (len > 1 && cache_name[len - 1] == '\n')
604 cache_name[len - 1] = '\0';
605
606 /* do a quick and dirty comparison to find the cluster size */
607 if (!strcmp(cache_name, "L2"))
608 cluster_cache_index = 2;
609 else if (!strcmp(cache_name, "L3"))
610 cluster_cache_index = 3;
611 else if (!strcmp(cache_name, "L1"))
612 cluster_cache_index = 1;
613 else
614 printk(KERN_INFO "Cluster '%s' is unknown.\n", cache_name);
615
616 return len;
617}
618
619static int proc_read_release_master(char *page, char **start,
620 off_t off, int count,
621 int *eof, void *data)
622{
623 int len, master;
624 master = atomic_read(&release_master_cpu);
625 if (master == NO_CPU)
626 len = snprintf(page, PAGE_SIZE, "NO_CPU\n");
627 else
628 len = snprintf(page, PAGE_SIZE, "%d\n", master);
629 return len;
630}
631
632static int proc_write_release_master(struct file *file,
633 const char *buffer,
634 unsigned long count,
635 void *data)
636{
637 int cpu, err, online = 0;
638 char msg[64];
639
640 if (count > 63)
641 return -EINVAL;
642
643 if (copy_from_user(msg, buffer, count))
644 return -EFAULT;
645
646 /* terminate */
647 msg[count] = '\0';
648 /* chomp */
649 if (count > 1 && msg[count - 1] == '\n')
650 msg[count - 1] = '\0';
651
652 if (strcmp(msg, "NO_CPU") == 0) {
653 atomic_set(&release_master_cpu, NO_CPU);
654 return count;
655 } else {
656 err = sscanf(msg, "%d", &cpu);
657 if (err == 1 && cpu >= 0 && (online = cpu_online(cpu))) {
658 atomic_set(&release_master_cpu, cpu);
659 return count;
660 } else {
661 TRACE("invalid release master: '%s' "
662 "(err:%d cpu:%d online:%d)\n",
663 msg, err, cpu, online);
664 return -EINVAL;
665 }
666 }
667}
668
669static struct proc_dir_entry *litmus_dir = NULL,
670 *curr_file = NULL,
671 *stat_file = NULL,
672 *plugs_file = NULL,
673 *clus_cache_idx_file = NULL,
674 *release_master_file = NULL;
675
676static int __init init_litmus_proc(void)
677{
678 litmus_dir = proc_mkdir("litmus", NULL);
679 if (!litmus_dir) {
680 printk(KERN_ERR "Could not allocate LITMUS^RT procfs entry.\n");
681 return -ENOMEM;
682 }
683
684 curr_file = create_proc_entry("active_plugin",
685 0644, litmus_dir);
686 if (!curr_file) {
687 printk(KERN_ERR "Could not allocate active_plugin "
688 "procfs entry.\n");
689 return -ENOMEM;
690 }
691 curr_file->read_proc = proc_read_curr;
692 curr_file->write_proc = proc_write_curr;
693
694 release_master_file = create_proc_entry("release_master",
695 0644, litmus_dir);
696 if (!release_master_file) {
697 printk(KERN_ERR "Could not allocate release_master "
698 "procfs entry.\n");
699 return -ENOMEM;
700 }
701 release_master_file->read_proc = proc_read_release_master;
702 release_master_file->write_proc = proc_write_release_master;
703
704 clus_cache_idx_file = create_proc_entry("cluster_cache",
705 0644, litmus_dir);
706 if (!clus_cache_idx_file) {
707 printk(KERN_ERR "Could not allocate cluster_cache "
708 "procfs entry.\n");
709 return -ENOMEM;
710 }
711 clus_cache_idx_file->read_proc = proc_read_cluster_size;
712 clus_cache_idx_file->write_proc = proc_write_cluster_size;
713
714 stat_file = create_proc_read_entry("stats", 0444, litmus_dir,
715 proc_read_stats, NULL);
716
717 plugs_file = create_proc_read_entry("plugins", 0444, litmus_dir,
718 proc_read_plugins, NULL);
719
720 return 0;
721}
722
723static void exit_litmus_proc(void)
724{
725 if (plugs_file)
726 remove_proc_entry("plugins", litmus_dir);
727 if (stat_file)
728 remove_proc_entry("stats", litmus_dir);
729 if (curr_file)
730 remove_proc_entry("active_plugin", litmus_dir);
731 if (clus_cache_idx_file)
732 remove_proc_entry("cluster_cache", litmus_dir);
733 if (release_master_file)
734 remove_proc_entry("release_master", litmus_dir);
735 if (litmus_dir)
736 remove_proc_entry("litmus", NULL);
737}
738
739extern struct sched_plugin linux_sched_plugin;
740
741static int __init _init_litmus(void)
742{
743 /* Common initializers,
744 * mode change lock is used to enforce single mode change
745 * operation.
746 */
747 printk("Starting LITMUS^RT kernel\n");
748
749 register_sched_plugin(&linux_sched_plugin);
750
751 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC);
752 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
753
754#ifdef CONFIG_MAGIC_SYSRQ
755 /* offer some debugging help */
756 if (!register_sysrq_key('x', &sysrq_kill_rt_tasks_op))
757 printk("Registered kill rt tasks magic sysrq.\n");
758 else
759 printk("Could not register kill rt tasks magic sysrq.\n");
760#endif
761
762 init_litmus_proc();
763
764 return 0;
765}
766
767static void _exit_litmus(void)
768{
769 exit_litmus_proc();
770 kmem_cache_destroy(bheap_node_cache);
771 kmem_cache_destroy(release_heap_cache);
772}
773
774module_init(_init_litmus);
775module_exit(_exit_litmus);