aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/litmus.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r--litmus/litmus.c555
1 files changed, 555 insertions, 0 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
new file mode 100644
index 000000000000..b22f84a02010
--- /dev/null
+++ b/litmus/litmus.c
@@ -0,0 +1,555 @@
1/*
2 * litmus.c -- Implementation of the LITMUS syscalls,
3 * the LITMUS intialization code,
4 * and the procfs interface..
5 */
6#include <asm/uaccess.h>
7#include <linux/uaccess.h>
8#include <linux/sysrq.h>
9#include <linux/sched.h>
10#include <linux/module.h>
11#include <linux/slab.h>
12
13#include <litmus/litmus.h>
14#include <litmus/bheap.h>
15#include <litmus/trace.h>
16#include <litmus/rt_domain.h>
17#include <litmus/litmus_proc.h>
18#include <litmus/sched_trace.h>
19
20/* Number of RT tasks that exist in the system */
21atomic_t rt_task_count = ATOMIC_INIT(0);
22static DEFINE_RAW_SPINLOCK(task_transition_lock);
23/* synchronize plugin switching */
24atomic_t cannot_use_plugin = ATOMIC_INIT(0);
25
26/* Give log messages sequential IDs. */
27atomic_t __log_seq_no = ATOMIC_INIT(0);
28
29#ifdef CONFIG_RELEASE_MASTER
30/* current master CPU for handling timer IRQs */
31atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
32#endif
33
34static struct kmem_cache * bheap_node_cache;
35extern struct kmem_cache * release_heap_cache;
36
37struct bheap_node* bheap_node_alloc(int gfp_flags)
38{
39 return kmem_cache_alloc(bheap_node_cache, gfp_flags);
40}
41
42void bheap_node_free(struct bheap_node* hn)
43{
44 kmem_cache_free(bheap_node_cache, hn);
45}
46
47struct release_heap* release_heap_alloc(int gfp_flags);
48void release_heap_free(struct release_heap* rh);
49
50/*
51 * sys_set_task_rt_param
52 * @pid: Pid of the task which scheduling parameters must be changed
53 * @param: New real-time extension parameters such as the execution cost and
54 * period
55 * Syscall for manipulating with task rt extension params
56 * Returns EFAULT if param is NULL.
57 * ESRCH if pid is not corrsponding
58 * to a valid task.
59 * EINVAL if either period or execution cost is <=0
60 * EPERM if pid is a real-time task
61 * 0 if success
62 *
63 * Only non-real-time tasks may be configured with this system call
64 * to avoid races with the scheduler. In practice, this means that a
65 * task's parameters must be set _before_ calling sys_prepare_rt_task()
66 *
67 * find_task_by_vpid() assumes that we are in the same namespace of the
68 * target.
69 */
70asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
71{
72 struct rt_task tp;
73 struct task_struct *target;
74 int retval = -EINVAL;
75
76 printk("Setting up rt task parameters for process %d.\n", pid);
77
78 if (pid < 0 || param == 0) {
79 goto out;
80 }
81 if (copy_from_user(&tp, param, sizeof(tp))) {
82 retval = -EFAULT;
83 goto out;
84 }
85
86 /* Task search and manipulation must be protected */
87 read_lock_irq(&tasklist_lock);
88 if (!(target = find_task_by_vpid(pid))) {
89 retval = -ESRCH;
90 goto out_unlock;
91 }
92
93 if (is_realtime(target)) {
94 /* The task is already a real-time task.
95 * We cannot not allow parameter changes at this point.
96 */
97 retval = -EBUSY;
98 goto out_unlock;
99 }
100
101 if (tp.exec_cost <= 0)
102 goto out_unlock;
103 if (tp.period <= 0)
104 goto out_unlock;
105 if (!cpu_online(tp.cpu))
106 goto out_unlock;
107 if (tp.period < tp.exec_cost)
108 {
109 printk(KERN_INFO "litmus: real-time task %d rejected "
110 "because wcet > period\n", pid);
111 goto out_unlock;
112 }
113 if (tp.budget_policy != NO_ENFORCEMENT &&
114 tp.budget_policy != QUANTUM_ENFORCEMENT &&
115 tp.budget_policy != PRECISE_ENFORCEMENT)
116 {
117 printk(KERN_INFO "litmus: real-time task %d rejected "
118 "because unsupported budget enforcement policy "
119 "specified (%d)\n",
120 pid, tp.budget_policy);
121 goto out_unlock;
122 }
123
124 if (tp.priority >= LITMUS_MAX_PRIORITY) {
125 printk(KERN_INFO "litmus: invalid priority (%u); "
126 "task %s/%d rejected\n",
127 tp.priority, target->comm, target->pid);
128 goto out_unlock;
129 }
130
131 target->rt_param.task_params = tp;
132
133 retval = 0;
134 out_unlock:
135 read_unlock_irq(&tasklist_lock);
136 out:
137 return retval;
138}
139
140/*
141 * Getter of task's RT params
142 * returns EINVAL if param or pid is NULL
143 * returns ESRCH if pid does not correspond to a valid task
144 * returns EFAULT if copying of parameters has failed.
145 *
146 * find_task_by_vpid() assumes that we are in the same namespace of the
147 * target.
148 */
149asmlinkage long sys_get_rt_task_param(pid_t pid, struct rt_task __user * param)
150{
151 int retval = -EINVAL;
152 struct task_struct *source;
153 struct rt_task lp;
154 if (param == 0 || pid < 0)
155 goto out;
156 read_lock(&tasklist_lock);
157 if (!(source = find_task_by_vpid(pid))) {
158 retval = -ESRCH;
159 goto out_unlock;
160 }
161 lp = source->rt_param.task_params;
162 read_unlock(&tasklist_lock);
163 /* Do copying outside the lock */
164 retval =
165 copy_to_user(param, &lp, sizeof(lp)) ? -EFAULT : 0;
166 return retval;
167 out_unlock:
168 read_unlock(&tasklist_lock);
169 out:
170 return retval;
171
172}
173
174/*
175 * This is the crucial function for periodic task implementation,
176 * It checks if a task is periodic, checks if such kind of sleep
177 * is permitted and calls plugin-specific sleep, which puts the
178 * task into a wait array.
179 * returns 0 on successful wakeup
180 * returns EPERM if current conditions do not permit such sleep
181 * returns EINVAL if current task is not able to go to sleep
182 */
183asmlinkage long sys_complete_job(void)
184{
185 int retval = -EPERM;
186 if (!is_realtime(current)) {
187 retval = -EINVAL;
188 goto out;
189 }
190 /* Task with negative or zero period cannot sleep */
191 if (get_rt_period(current) <= 0) {
192 retval = -EINVAL;
193 goto out;
194 }
195 /* The plugin has to put the task into an
196 * appropriate queue and call schedule
197 */
198 retval = litmus->complete_job();
199 out:
200 return retval;
201}
202
203/* This is an "improved" version of sys_complete_job that
204 * addresses the problem of unintentionally missing a job after
205 * an overrun.
206 *
207 * returns 0 on successful wakeup
208 * returns EPERM if current conditions do not permit such sleep
209 * returns EINVAL if current task is not able to go to sleep
210 */
211asmlinkage long sys_wait_for_job_release(unsigned int job)
212{
213 int retval = -EPERM;
214 if (!is_realtime(current)) {
215 retval = -EINVAL;
216 goto out;
217 }
218
219 /* Task with negative or zero period cannot sleep */
220 if (get_rt_period(current) <= 0) {
221 retval = -EINVAL;
222 goto out;
223 }
224
225 retval = 0;
226
227 /* first wait until we have "reached" the desired job
228 *
229 * This implementation has at least two problems:
230 *
231 * 1) It doesn't gracefully handle the wrap around of
232 * job_no. Since LITMUS is a prototype, this is not much
233 * of a problem right now.
234 *
235 * 2) It is theoretically racy if a job release occurs
236 * between checking job_no and calling sleep_next_period().
237 * A proper solution would requiring adding another callback
238 * in the plugin structure and testing the condition with
239 * interrupts disabled.
240 *
241 * FIXME: At least problem 2 should be taken care of eventually.
242 */
243 while (!retval && job > current->rt_param.job_params.job_no)
244 /* If the last job overran then job <= job_no and we
245 * don't send the task to sleep.
246 */
247 retval = litmus->complete_job();
248 out:
249 return retval;
250}
251
252/* This is a helper syscall to query the current job sequence number.
253 *
254 * returns 0 on successful query
255 * returns EPERM if task is not a real-time task.
256 * returns EFAULT if &job is not a valid pointer.
257 */
258asmlinkage long sys_query_job_no(unsigned int __user *job)
259{
260 int retval = -EPERM;
261 if (is_realtime(current))
262 retval = put_user(current->rt_param.job_params.job_no, job);
263
264 return retval;
265}
266
267/* sys_null_call() is only used for determining raw system call
268 * overheads (kernel entry, kernel exit). It has no useful side effects.
269 * If ts is non-NULL, then the current Feather-Trace time is recorded.
270 */
271asmlinkage long sys_null_call(cycles_t __user *ts)
272{
273 long ret = 0;
274 cycles_t now;
275
276 if (ts) {
277 now = get_cycles();
278 ret = put_user(now, ts);
279 }
280
281 return ret;
282}
283
284/* p is a real-time task. Re-init its state as a best-effort task. */
285static void reinit_litmus_state(struct task_struct* p, int restore)
286{
287 struct rt_task user_config = {};
288 void* ctrl_page = NULL;
289
290 if (restore) {
291 /* Safe user-space provided configuration data.
292 * and allocated page. */
293 user_config = p->rt_param.task_params;
294 ctrl_page = p->rt_param.ctrl_page;
295 }
296
297 /* We probably should not be inheriting any task's priority
298 * at this point in time.
299 */
300 WARN_ON(p->rt_param.inh_task);
301
302 /* Cleanup everything else. */
303 memset(&p->rt_param, 0, sizeof(p->rt_param));
304
305 /* Restore preserved fields. */
306 if (restore) {
307 p->rt_param.task_params = user_config;
308 p->rt_param.ctrl_page = ctrl_page;
309 }
310}
311
312long litmus_admit_task(struct task_struct* tsk)
313{
314 long retval = 0;
315 unsigned long flags;
316
317 BUG_ON(is_realtime(tsk));
318
319 if (get_rt_period(tsk) == 0 ||
320 get_exec_cost(tsk) > get_rt_period(tsk)) {
321 TRACE_TASK(tsk, "litmus admit: invalid task parameters "
322 "(%lu, %lu)\n",
323 get_exec_cost(tsk), get_rt_period(tsk));
324 retval = -EINVAL;
325 goto out;
326 }
327
328 if (!cpu_online(get_partition(tsk))) {
329 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n",
330 get_partition(tsk));
331 retval = -EINVAL;
332 goto out;
333 }
334
335 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
336
337 /* avoid scheduler plugin changing underneath us */
338 raw_spin_lock_irqsave(&task_transition_lock, flags);
339
340 /* allocate heap node for this task */
341 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
342 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
343
344 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
345 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
346
347 bheap_node_free(tsk_rt(tsk)->heap_node);
348 release_heap_free(tsk_rt(tsk)->rel_heap);
349
350 retval = -ENOMEM;
351 goto out_unlock;
352 } else {
353 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk);
354 }
355
356 retval = litmus->admit_task(tsk);
357
358 if (!retval) {
359 sched_trace_task_name(tsk);
360 sched_trace_task_param(tsk);
361 atomic_inc(&rt_task_count);
362 }
363
364out_unlock:
365 raw_spin_unlock_irqrestore(&task_transition_lock, flags);
366out:
367 return retval;
368}
369
370void litmus_exit_task(struct task_struct* tsk)
371{
372 if (is_realtime(tsk)) {
373 sched_trace_task_completion(tsk, 1);
374
375 litmus->task_exit(tsk);
376
377 BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node));
378 bheap_node_free(tsk_rt(tsk)->heap_node);
379 release_heap_free(tsk_rt(tsk)->rel_heap);
380
381 atomic_dec(&rt_task_count);
382 reinit_litmus_state(tsk, 1);
383 }
384}
385
386/* IPI callback to synchronize plugin switching */
387static void synch_on_plugin_switch(void* info)
388{
389 atomic_inc(&cannot_use_plugin);
390 while (atomic_read(&cannot_use_plugin) > 0)
391 cpu_relax();
392}
393
394/* Switching a plugin in use is tricky.
395 * We must watch out that no real-time tasks exists
396 * (and that none is created in parallel) and that the plugin is not
397 * currently in use on any processor (in theory).
398 */
399int switch_sched_plugin(struct sched_plugin* plugin)
400{
401 unsigned long flags;
402 int ret = 0;
403
404 BUG_ON(!plugin);
405
406 /* forbid other cpus to use the plugin */
407 atomic_set(&cannot_use_plugin, 1);
408 /* send IPI to force other CPUs to synch with us */
409 smp_call_function(synch_on_plugin_switch, NULL, 0);
410
411 /* wait until all other CPUs have started synch */
412 while (atomic_read(&cannot_use_plugin) < num_online_cpus())
413 cpu_relax();
414
415 /* stop task transitions */
416 raw_spin_lock_irqsave(&task_transition_lock, flags);
417
418 /* don't switch if there are active real-time tasks */
419 if (atomic_read(&rt_task_count) == 0) {
420 ret = litmus->deactivate_plugin();
421 if (0 != ret)
422 goto out;
423 ret = plugin->activate_plugin();
424 if (0 != ret) {
425 printk(KERN_INFO "Can't activate %s (%d).\n",
426 plugin->plugin_name, ret);
427 plugin = &linux_sched_plugin;
428 }
429 printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", plugin->plugin_name);
430 litmus = plugin;
431 } else
432 ret = -EBUSY;
433out:
434 raw_spin_unlock_irqrestore(&task_transition_lock, flags);
435 atomic_set(&cannot_use_plugin, 0);
436 return ret;
437}
438
439/* Called upon fork.
440 * p is the newly forked task.
441 */
442void litmus_fork(struct task_struct* p)
443{
444 if (is_realtime(p)) {
445 /* clean out any litmus related state, don't preserve anything */
446 reinit_litmus_state(p, 0);
447 /* Don't let the child be a real-time task. */
448 p->sched_reset_on_fork = 1;
449 } else
450 /* non-rt tasks might have ctrl_page set */
451 tsk_rt(p)->ctrl_page = NULL;
452
453 /* od tables are never inherited across a fork */
454 p->od_table = NULL;
455}
456
457/* Called upon execve().
458 * current is doing the exec.
459 * Don't let address space specific stuff leak.
460 */
461void litmus_exec(void)
462{
463 struct task_struct* p = current;
464
465 if (is_realtime(p)) {
466 WARN_ON(p->rt_param.inh_task);
467 if (tsk_rt(p)->ctrl_page) {
468 free_page((unsigned long) tsk_rt(p)->ctrl_page);
469 tsk_rt(p)->ctrl_page = NULL;
470 }
471 }
472}
473
474void exit_litmus(struct task_struct *dead_tsk)
475{
476 /* We also allow non-RT tasks to
477 * allocate control pages to allow
478 * measurements with non-RT tasks.
479 * So check if we need to free the page
480 * in any case.
481 */
482 if (tsk_rt(dead_tsk)->ctrl_page) {
483 TRACE_TASK(dead_tsk,
484 "freeing ctrl_page %p\n",
485 tsk_rt(dead_tsk)->ctrl_page);
486 free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page);
487 }
488
489 /* main cleanup only for RT tasks */
490 if (is_realtime(dead_tsk))
491 litmus_exit_task(dead_tsk);
492}
493
494
495#ifdef CONFIG_MAGIC_SYSRQ
496int sys_kill(int pid, int sig);
497
498static void sysrq_handle_kill_rt_tasks(int key)
499{
500 struct task_struct *t;
501 read_lock(&tasklist_lock);
502 for_each_process(t) {
503 if (is_realtime(t)) {
504 sys_kill(t->pid, SIGKILL);
505 }
506 }
507 read_unlock(&tasklist_lock);
508}
509
510static struct sysrq_key_op sysrq_kill_rt_tasks_op = {
511 .handler = sysrq_handle_kill_rt_tasks,
512 .help_msg = "quit-rt-tasks(X)",
513 .action_msg = "sent SIGKILL to all LITMUS^RT real-time tasks",
514};
515#endif
516
517extern struct sched_plugin linux_sched_plugin;
518
519static int __init _init_litmus(void)
520{
521 /* Common initializers,
522 * mode change lock is used to enforce single mode change
523 * operation.
524 */
525 printk("Starting LITMUS^RT kernel\n");
526
527 BUILD_BUG_ON(sizeof(union np_flag) != sizeof(uint32_t));
528
529 register_sched_plugin(&linux_sched_plugin);
530
531 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC);
532 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
533
534#ifdef CONFIG_MAGIC_SYSRQ
535 /* offer some debugging help */
536 if (!register_sysrq_key('x', &sysrq_kill_rt_tasks_op))
537 printk("Registered kill rt tasks magic sysrq.\n");
538 else
539 printk("Could not register kill rt tasks magic sysrq.\n");
540#endif
541
542 init_litmus_proc();
543
544 return 0;
545}
546
547static void _exit_litmus(void)
548{
549 exit_litmus_proc();
550 kmem_cache_destroy(bheap_node_cache);
551 kmem_cache_destroy(release_heap_cache);
552}
553
554module_init(_init_litmus);
555module_exit(_exit_litmus);