aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/litmus.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r--litmus/litmus.c554
1 files changed, 554 insertions, 0 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
new file mode 100644
index 000000000000..26938acacafc
--- /dev/null
+++ b/litmus/litmus.c
@@ -0,0 +1,554 @@
1/*
2 * litmus.c -- Implementation of the LITMUS syscalls,
3 * the LITMUS intialization code,
4 * and the procfs interface..
5 */
6#include <asm/uaccess.h>
7#include <linux/uaccess.h>
8#include <linux/sysrq.h>
9#include <linux/sched.h>
10#include <linux/module.h>
11#include <linux/slab.h>
12
13#include <litmus/litmus.h>
14#include <litmus/bheap.h>
15#include <litmus/trace.h>
16#include <litmus/rt_domain.h>
17#include <litmus/litmus_proc.h>
18#include <litmus/sched_trace.h>
19
20/* Number of RT tasks that exist in the system */
21atomic_t rt_task_count = ATOMIC_INIT(0);
22static DEFINE_RAW_SPINLOCK(task_transition_lock);
23/* synchronize plugin switching */
24atomic_t cannot_use_plugin = ATOMIC_INIT(0);
25
26/* Give log messages sequential IDs. */
27atomic_t __log_seq_no = ATOMIC_INIT(0);
28
29#ifdef CONFIG_RELEASE_MASTER
30/* current master CPU for handling timer IRQs */
31atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
32#endif
33
34static struct kmem_cache * bheap_node_cache;
35extern struct kmem_cache * release_heap_cache;
36
37struct bheap_node* bheap_node_alloc(int gfp_flags)
38{
39 return kmem_cache_alloc(bheap_node_cache, gfp_flags);
40}
41
42void bheap_node_free(struct bheap_node* hn)
43{
44 kmem_cache_free(bheap_node_cache, hn);
45}
46
47struct release_heap* release_heap_alloc(int gfp_flags);
48void release_heap_free(struct release_heap* rh);
49
50/*
51 * sys_set_task_rt_param
52 * @pid: Pid of the task which scheduling parameters must be changed
53 * @param: New real-time extension parameters such as the execution cost and
54 * period
55 * Syscall for manipulating with task rt extension params
56 * Returns EFAULT if param is NULL.
57 * ESRCH if pid is not corrsponding
58 * to a valid task.
59 * EINVAL if either period or execution cost is <=0
60 * EPERM if pid is a real-time task
61 * 0 if success
62 *
63 * Only non-real-time tasks may be configured with this system call
64 * to avoid races with the scheduler. In practice, this means that a
65 * task's parameters must be set _before_ calling sys_prepare_rt_task()
66 *
67 * find_task_by_vpid() assumes that we are in the same namespace of the
68 * target.
69 */
70asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
71{
72 struct rt_task tp;
73 struct task_struct *target;
74 int retval = -EINVAL;
75
76 printk("Setting up rt task parameters for process %d.\n", pid);
77
78 if (pid < 0 || param == 0) {
79 goto out;
80 }
81 if (copy_from_user(&tp, param, sizeof(tp))) {
82 retval = -EFAULT;
83 goto out;
84 }
85
86 /* Task search and manipulation must be protected */
87 read_lock_irq(&tasklist_lock);
88 if (!(target = find_task_by_vpid(pid))) {
89 retval = -ESRCH;
90 goto out_unlock;
91 }
92
93 if (is_realtime(target)) {
94 /* The task is already a real-time task.
95 * We cannot not allow parameter changes at this point.
96 */
97 retval = -EBUSY;
98 goto out_unlock;
99 }
100
101 if (tp.exec_cost <= 0)
102 goto out_unlock;
103 if (tp.period <= 0)
104 goto out_unlock;
105 if (!cpu_online(tp.cpu))
106 goto out_unlock;
107 if (tp.period < tp.exec_cost)
108 {
109 printk(KERN_INFO "litmus: real-time task %d rejected "
110 "because wcet > period\n", pid);
111 goto out_unlock;
112 }
113 if ( tp.cls != RT_CLASS_HARD &&
114 tp.cls != RT_CLASS_SOFT &&
115 tp.cls != RT_CLASS_BEST_EFFORT)
116 {
117 printk(KERN_INFO "litmus: real-time task %d rejected "
118 "because its class is invalid\n");
119 goto out_unlock;
120 }
121 if (tp.budget_policy != NO_ENFORCEMENT &&
122 tp.budget_policy != QUANTUM_ENFORCEMENT &&
123 tp.budget_policy != PRECISE_ENFORCEMENT)
124 {
125 printk(KERN_INFO "litmus: real-time task %d rejected "
126 "because unsupported budget enforcement policy "
127 "specified (%d)\n",
128 pid, tp.budget_policy);
129 goto out_unlock;
130 }
131
132 target->rt_param.task_params = tp;
133
134 retval = 0;
135 out_unlock:
136 read_unlock_irq(&tasklist_lock);
137 out:
138 return retval;
139}
140
141/*
142 * Getter of task's RT params
143 * returns EINVAL if param or pid is NULL
144 * returns ESRCH if pid does not correspond to a valid task
145 * returns EFAULT if copying of parameters has failed.
146 *
147 * find_task_by_vpid() assumes that we are in the same namespace of the
148 * target.
149 */
150asmlinkage long sys_get_rt_task_param(pid_t pid, struct rt_task __user * param)
151{
152 int retval = -EINVAL;
153 struct task_struct *source;
154 struct rt_task lp;
155 if (param == 0 || pid < 0)
156 goto out;
157 read_lock(&tasklist_lock);
158 if (!(source = find_task_by_vpid(pid))) {
159 retval = -ESRCH;
160 goto out_unlock;
161 }
162 lp = source->rt_param.task_params;
163 read_unlock(&tasklist_lock);
164 /* Do copying outside the lock */
165 retval =
166 copy_to_user(param, &lp, sizeof(lp)) ? -EFAULT : 0;
167 return retval;
168 out_unlock:
169 read_unlock(&tasklist_lock);
170 out:
171 return retval;
172
173}
174
175/*
176 * This is the crucial function for periodic task implementation,
177 * It checks if a task is periodic, checks if such kind of sleep
178 * is permitted and calls plugin-specific sleep, which puts the
179 * task into a wait array.
180 * returns 0 on successful wakeup
181 * returns EPERM if current conditions do not permit such sleep
182 * returns EINVAL if current task is not able to go to sleep
183 */
184asmlinkage long sys_complete_job(void)
185{
186 int retval = -EPERM;
187 if (!is_realtime(current)) {
188 retval = -EINVAL;
189 goto out;
190 }
191 /* Task with negative or zero period cannot sleep */
192 if (get_rt_period(current) <= 0) {
193 retval = -EINVAL;
194 goto out;
195 }
196 /* The plugin has to put the task into an
197 * appropriate queue and call schedule
198 */
199 retval = litmus->complete_job();
200 out:
201 return retval;
202}
203
204/* This is an "improved" version of sys_complete_job that
205 * addresses the problem of unintentionally missing a job after
206 * an overrun.
207 *
208 * returns 0 on successful wakeup
209 * returns EPERM if current conditions do not permit such sleep
210 * returns EINVAL if current task is not able to go to sleep
211 */
212asmlinkage long sys_wait_for_job_release(unsigned int job)
213{
214 int retval = -EPERM;
215 if (!is_realtime(current)) {
216 retval = -EINVAL;
217 goto out;
218 }
219
220 /* Task with negative or zero period cannot sleep */
221 if (get_rt_period(current) <= 0) {
222 retval = -EINVAL;
223 goto out;
224 }
225
226 retval = 0;
227
228 /* first wait until we have "reached" the desired job
229 *
230 * This implementation has at least two problems:
231 *
232 * 1) It doesn't gracefully handle the wrap around of
233 * job_no. Since LITMUS is a prototype, this is not much
234 * of a problem right now.
235 *
236 * 2) It is theoretically racy if a job release occurs
237 * between checking job_no and calling sleep_next_period().
238 * A proper solution would requiring adding another callback
239 * in the plugin structure and testing the condition with
240 * interrupts disabled.
241 *
242 * FIXME: At least problem 2 should be taken care of eventually.
243 */
244 while (!retval && job > current->rt_param.job_params.job_no)
245 /* If the last job overran then job <= job_no and we
246 * don't send the task to sleep.
247 */
248 retval = litmus->complete_job();
249 out:
250 return retval;
251}
252
253/* This is a helper syscall to query the current job sequence number.
254 *
255 * returns 0 on successful query
256 * returns EPERM if task is not a real-time task.
257 * returns EFAULT if &job is not a valid pointer.
258 */
259asmlinkage long sys_query_job_no(unsigned int __user *job)
260{
261 int retval = -EPERM;
262 if (is_realtime(current))
263 retval = put_user(current->rt_param.job_params.job_no, job);
264
265 return retval;
266}
267
268/* sys_null_call() is only used for determining raw system call
269 * overheads (kernel entry, kernel exit). It has no useful side effects.
270 * If ts is non-NULL, then the current Feather-Trace time is recorded.
271 */
272asmlinkage long sys_null_call(cycles_t __user *ts)
273{
274 long ret = 0;
275 cycles_t now;
276
277 if (ts) {
278 now = get_cycles();
279 ret = put_user(now, ts);
280 }
281
282 return ret;
283}
284
285/* p is a real-time task. Re-init its state as a best-effort task. */
286static void reinit_litmus_state(struct task_struct* p, int restore)
287{
288 struct rt_task user_config = {};
289 void* ctrl_page = NULL;
290
291 if (restore) {
292 /* Safe user-space provided configuration data.
293 * and allocated page. */
294 user_config = p->rt_param.task_params;
295 ctrl_page = p->rt_param.ctrl_page;
296 }
297
298 /* We probably should not be inheriting any task's priority
299 * at this point in time.
300 */
301 WARN_ON(p->rt_param.inh_task);
302
303 /* Cleanup everything else. */
304 memset(&p->rt_param, 0, sizeof(p->rt_param));
305
306 /* Restore preserved fields. */
307 if (restore) {
308 p->rt_param.task_params = user_config;
309 p->rt_param.ctrl_page = ctrl_page;
310 }
311}
312
313long litmus_admit_task(struct task_struct* tsk)
314{
315 long retval = 0;
316 unsigned long flags;
317
318 BUG_ON(is_realtime(tsk));
319
320 if (get_rt_period(tsk) == 0 ||
321 get_exec_cost(tsk) > get_rt_period(tsk)) {
322 TRACE_TASK(tsk, "litmus admit: invalid task parameters "
323 "(%lu, %lu)\n",
324 get_exec_cost(tsk), get_rt_period(tsk));
325 retval = -EINVAL;
326 goto out;
327 }
328
329 if (!cpu_online(get_partition(tsk))) {
330 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n",
331 get_partition(tsk));
332 retval = -EINVAL;
333 goto out;
334 }
335
336 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
337
338 /* avoid scheduler plugin changing underneath us */
339 raw_spin_lock_irqsave(&task_transition_lock, flags);
340
341 /* allocate heap node for this task */
342 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
343 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
344
345 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
346 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
347
348 bheap_node_free(tsk_rt(tsk)->heap_node);
349 release_heap_free(tsk_rt(tsk)->rel_heap);
350
351 retval = -ENOMEM;
352 goto out_unlock;
353 } else {
354 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk);
355 }
356
357 retval = litmus->admit_task(tsk);
358
359 if (!retval) {
360 sched_trace_task_name(tsk);
361 sched_trace_task_param(tsk);
362 atomic_inc(&rt_task_count);
363 }
364
365out_unlock:
366 raw_spin_unlock_irqrestore(&task_transition_lock, flags);
367out:
368 return retval;
369}
370
371void litmus_exit_task(struct task_struct* tsk)
372{
373 if (is_realtime(tsk)) {
374 sched_trace_task_completion(tsk, 1);
375
376 litmus->task_exit(tsk);
377
378 BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node));
379 bheap_node_free(tsk_rt(tsk)->heap_node);
380 release_heap_free(tsk_rt(tsk)->rel_heap);
381
382 atomic_dec(&rt_task_count);
383 reinit_litmus_state(tsk, 1);
384 }
385}
386
387/* IPI callback to synchronize plugin switching */
388static void synch_on_plugin_switch(void* info)
389{
390 atomic_inc(&cannot_use_plugin);
391 while (atomic_read(&cannot_use_plugin) > 0)
392 cpu_relax();
393}
394
395/* Switching a plugin in use is tricky.
396 * We must watch out that no real-time tasks exists
397 * (and that none is created in parallel) and that the plugin is not
398 * currently in use on any processor (in theory).
399 */
400int switch_sched_plugin(struct sched_plugin* plugin)
401{
402 unsigned long flags;
403 int ret = 0;
404
405 BUG_ON(!plugin);
406
407 /* forbid other cpus to use the plugin */
408 atomic_set(&cannot_use_plugin, 1);
409 /* send IPI to force other CPUs to synch with us */
410 smp_call_function(synch_on_plugin_switch, NULL, 0);
411
412 /* wait until all other CPUs have started synch */
413 while (atomic_read(&cannot_use_plugin) < num_online_cpus())
414 cpu_relax();
415
416 /* stop task transitions */
417 raw_spin_lock_irqsave(&task_transition_lock, flags);
418
419 /* don't switch if there are active real-time tasks */
420 if (atomic_read(&rt_task_count) == 0) {
421 ret = litmus->deactivate_plugin();
422 if (0 != ret)
423 goto out;
424 ret = plugin->activate_plugin();
425 if (0 != ret) {
426 printk(KERN_INFO "Can't activate %s (%d).\n",
427 plugin->plugin_name, ret);
428 plugin = &linux_sched_plugin;
429 }
430 printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", plugin->plugin_name);
431 litmus = plugin;
432 } else
433 ret = -EBUSY;
434out:
435 raw_spin_unlock_irqrestore(&task_transition_lock, flags);
436 atomic_set(&cannot_use_plugin, 0);
437 return ret;
438}
439
440/* Called upon fork.
441 * p is the newly forked task.
442 */
443void litmus_fork(struct task_struct* p)
444{
445 if (is_realtime(p)) {
446 /* clean out any litmus related state, don't preserve anything */
447 reinit_litmus_state(p, 0);
448 /* Don't let the child be a real-time task. */
449 p->sched_reset_on_fork = 1;
450 } else
451 /* non-rt tasks might have ctrl_page set */
452 tsk_rt(p)->ctrl_page = NULL;
453
454 /* od tables are never inherited across a fork */
455 p->od_table = NULL;
456}
457
458/* Called upon execve().
459 * current is doing the exec.
460 * Don't let address space specific stuff leak.
461 */
462void litmus_exec(void)
463{
464 struct task_struct* p = current;
465
466 if (is_realtime(p)) {
467 WARN_ON(p->rt_param.inh_task);
468 if (tsk_rt(p)->ctrl_page) {
469 free_page((unsigned long) tsk_rt(p)->ctrl_page);
470 tsk_rt(p)->ctrl_page = NULL;
471 }
472 }
473}
474
475void exit_litmus(struct task_struct *dead_tsk)
476{
477 /* We also allow non-RT tasks to
478 * allocate control pages to allow
479 * measurements with non-RT tasks.
480 * So check if we need to free the page
481 * in any case.
482 */
483 if (tsk_rt(dead_tsk)->ctrl_page) {
484 TRACE_TASK(dead_tsk,
485 "freeing ctrl_page %p\n",
486 tsk_rt(dead_tsk)->ctrl_page);
487 free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page);
488 }
489
490 /* main cleanup only for RT tasks */
491 if (is_realtime(dead_tsk))
492 litmus_exit_task(dead_tsk);
493}
494
495
496#ifdef CONFIG_MAGIC_SYSRQ
497int sys_kill(int pid, int sig);
498
499static void sysrq_handle_kill_rt_tasks(int key)
500{
501 struct task_struct *t;
502 read_lock(&tasklist_lock);
503 for_each_process(t) {
504 if (is_realtime(t)) {
505 sys_kill(t->pid, SIGKILL);
506 }
507 }
508 read_unlock(&tasklist_lock);
509}
510
511static struct sysrq_key_op sysrq_kill_rt_tasks_op = {
512 .handler = sysrq_handle_kill_rt_tasks,
513 .help_msg = "quit-rt-tasks(X)",
514 .action_msg = "sent SIGKILL to all LITMUS^RT real-time tasks",
515};
516#endif
517
518extern struct sched_plugin linux_sched_plugin;
519
520static int __init _init_litmus(void)
521{
522 /* Common initializers,
523 * mode change lock is used to enforce single mode change
524 * operation.
525 */
526 printk("Starting LITMUS^RT kernel\n");
527
528 register_sched_plugin(&linux_sched_plugin);
529
530 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC);
531 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
532
533#ifdef CONFIG_MAGIC_SYSRQ
534 /* offer some debugging help */
535 if (!register_sysrq_key('x', &sysrq_kill_rt_tasks_op))
536 printk("Registered kill rt tasks magic sysrq.\n");
537 else
538 printk("Could not register kill rt tasks magic sysrq.\n");
539#endif
540
541 init_litmus_proc();
542
543 return 0;
544}
545
546static void _exit_litmus(void)
547{
548 exit_litmus_proc();
549 kmem_cache_destroy(bheap_node_cache);
550 kmem_cache_destroy(release_heap_cache);
551}
552
553module_init(_init_litmus);
554module_exit(_exit_litmus);