aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/litmus.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-04-19 17:31:52 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2013-04-19 17:31:52 -0400
commitf70a290e8a889caa905ab7650c696f2bb299be1a (patch)
tree56f0886d839499e9f522f189999024b3e86f9be2 /litmus/litmus.c
parentfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (diff)
parent7ef4a793a624c6e66c16ca1051847f75161f5bec (diff)
Merge branch 'wip-nested-locking' into tegra-nested-lockingwip-nested-locking
Conflicts: Makefile include/linux/fs.h
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r--litmus/litmus.c579
1 files changed, 579 insertions, 0 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
new file mode 100644
index 00000000000..dc94be71bfb
--- /dev/null
+++ b/litmus/litmus.c
@@ -0,0 +1,579 @@
1/*
2 * litmus.c -- Implementation of the LITMUS syscalls,
3 * the LITMUS intialization code,
4 * and the procfs interface..
5 */
6#include <asm/uaccess.h>
7#include <linux/uaccess.h>
8#include <linux/sysrq.h>
9#include <linux/sched.h>
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/reboot.h>
13#include <linux/stop_machine.h>
14
15#include <litmus/litmus.h>
16#include <litmus/bheap.h>
17#include <litmus/trace.h>
18#include <litmus/rt_domain.h>
19#include <litmus/litmus_proc.h>
20#include <litmus/sched_trace.h>
21
22#ifdef CONFIG_SCHED_CPU_AFFINITY
23#include <litmus/affinity.h>
24#endif
25
26/* Number of RT tasks that exist in the system */
27atomic_t rt_task_count = ATOMIC_INIT(0);
28
29/* Give log messages sequential IDs. */
30atomic_t __log_seq_no = ATOMIC_INIT(0);
31
32#ifdef CONFIG_RELEASE_MASTER
33/* current master CPU for handling timer IRQs */
34atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
35#endif
36
37static struct kmem_cache * bheap_node_cache;
38extern struct kmem_cache * release_heap_cache;
39
40struct bheap_node* bheap_node_alloc(int gfp_flags)
41{
42 return kmem_cache_alloc(bheap_node_cache, gfp_flags);
43}
44
45void bheap_node_free(struct bheap_node* hn)
46{
47 kmem_cache_free(bheap_node_cache, hn);
48}
49
50struct release_heap* release_heap_alloc(int gfp_flags);
51void release_heap_free(struct release_heap* rh);
52
53/*
54 * sys_set_task_rt_param
55 * @pid: Pid of the task which scheduling parameters must be changed
56 * @param: New real-time extension parameters such as the execution cost and
57 * period
58 * Syscall for manipulating with task rt extension params
59 * Returns EFAULT if param is NULL.
60 * ESRCH if pid is not corrsponding
61 * to a valid task.
62 * EINVAL if either period or execution cost is <=0
63 * EPERM if pid is a real-time task
64 * 0 if success
65 *
66 * Only non-real-time tasks may be configured with this system call
67 * to avoid races with the scheduler. In practice, this means that a
68 * task's parameters must be set _before_ calling sys_prepare_rt_task()
69 *
70 * find_task_by_vpid() assumes that we are in the same namespace of the
71 * target.
72 */
73asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
74{
75 struct rt_task tp;
76 struct task_struct *target;
77 int retval = -EINVAL;
78
79 printk("Setting up rt task parameters for process %d.\n", pid);
80
81 if (pid < 0 || param == 0) {
82 goto out;
83 }
84 if (copy_from_user(&tp, param, sizeof(tp))) {
85 retval = -EFAULT;
86 goto out;
87 }
88
89 /* Task search and manipulation must be protected */
90 read_lock_irq(&tasklist_lock);
91 if (!(target = find_task_by_vpid(pid))) {
92 retval = -ESRCH;
93 goto out_unlock;
94 }
95
96 if (is_realtime(target)) {
97 /* The task is already a real-time task.
98 * We cannot not allow parameter changes at this point.
99 */
100 retval = -EBUSY;
101 goto out_unlock;
102 }
103
104 /* set relative deadline to be implicit if left unspecified */
105 if (tp.relative_deadline == 0)
106 tp.relative_deadline = tp.period;
107
108 if (tp.exec_cost <= 0)
109 goto out_unlock;
110 if (tp.period <= 0)
111 goto out_unlock;
112 if (!cpu_online(tp.cpu))
113 goto out_unlock;
114 if (min(tp.relative_deadline, tp.period) < tp.exec_cost) /*density check*/
115 {
116 printk(KERN_INFO "litmus: real-time task %d rejected "
117 "because task density > 1.0\n", pid);
118 goto out_unlock;
119 }
120 if (tp.cls != RT_CLASS_HARD &&
121 tp.cls != RT_CLASS_SOFT &&
122 tp.cls != RT_CLASS_BEST_EFFORT)
123 {
124 printk(KERN_INFO "litmus: real-time task %d rejected "
125 "because its class is invalid\n", pid);
126 goto out_unlock;
127 }
128 if (tp.budget_policy != NO_ENFORCEMENT &&
129 tp.budget_policy != QUANTUM_ENFORCEMENT &&
130 tp.budget_policy != PRECISE_ENFORCEMENT)
131 {
132 printk(KERN_INFO "litmus: real-time task %d rejected "
133 "because unsupported budget enforcement policy "
134 "specified (%d)\n",
135 pid, tp.budget_policy);
136 goto out_unlock;
137 }
138
139 target->rt_param.task_params = tp;
140
141 retval = 0;
142 out_unlock:
143 read_unlock_irq(&tasklist_lock);
144 out:
145 return retval;
146}
147
148/*
149 * Getter of task's RT params
150 * returns EINVAL if param or pid is NULL
151 * returns ESRCH if pid does not correspond to a valid task
152 * returns EFAULT if copying of parameters has failed.
153 *
154 * find_task_by_vpid() assumes that we are in the same namespace of the
155 * target.
156 */
157asmlinkage long sys_get_rt_task_param(pid_t pid, struct rt_task __user * param)
158{
159 int retval = -EINVAL;
160 struct task_struct *source;
161 struct rt_task lp;
162 if (param == 0 || pid < 0)
163 goto out;
164 read_lock(&tasklist_lock);
165 if (!(source = find_task_by_vpid(pid))) {
166 retval = -ESRCH;
167 goto out_unlock;
168 }
169 lp = source->rt_param.task_params;
170 read_unlock(&tasklist_lock);
171 /* Do copying outside the lock */
172 retval =
173 copy_to_user(param, &lp, sizeof(lp)) ? -EFAULT : 0;
174 return retval;
175 out_unlock:
176 read_unlock(&tasklist_lock);
177 out:
178 return retval;
179
180}
181
182/*
183 * This is the crucial function for periodic task implementation,
184 * It checks if a task is periodic, checks if such kind of sleep
185 * is permitted and calls plugin-specific sleep, which puts the
186 * task into a wait array.
187 * returns 0 on successful wakeup
188 * returns EPERM if current conditions do not permit such sleep
189 * returns EINVAL if current task is not able to go to sleep
190 */
191asmlinkage long sys_complete_job(void)
192{
193 int retval = -EPERM;
194 if (!is_realtime(current)) {
195 retval = -EINVAL;
196 goto out;
197 }
198 /* Task with negative or zero period cannot sleep */
199 if (get_rt_period(current) <= 0) {
200 retval = -EINVAL;
201 goto out;
202 }
203 /* The plugin has to put the task into an
204 * appropriate queue and call schedule
205 */
206 retval = litmus->complete_job();
207 out:
208 return retval;
209}
210
211/* This is an "improved" version of sys_complete_job that
212 * addresses the problem of unintentionally missing a job after
213 * an overrun.
214 *
215 * returns 0 on successful wakeup
216 * returns EPERM if current conditions do not permit such sleep
217 * returns EINVAL if current task is not able to go to sleep
218 */
219asmlinkage long sys_wait_for_job_release(unsigned int job)
220{
221 int retval = -EPERM;
222 if (!is_realtime(current)) {
223 retval = -EINVAL;
224 goto out;
225 }
226
227 /* Task with negative or zero period cannot sleep */
228 if (get_rt_period(current) <= 0) {
229 retval = -EINVAL;
230 goto out;
231 }
232
233 retval = 0;
234
235 /* first wait until we have "reached" the desired job
236 *
237 * This implementation has at least two problems:
238 *
239 * 1) It doesn't gracefully handle the wrap around of
240 * job_no. Since LITMUS is a prototype, this is not much
241 * of a problem right now.
242 *
243 * 2) It is theoretically racy if a job release occurs
244 * between checking job_no and calling sleep_next_period().
245 * A proper solution would requiring adding another callback
246 * in the plugin structure and testing the condition with
247 * interrupts disabled.
248 *
249 * FIXME: At least problem 2 should be taken care of eventually.
250 */
251 while (!retval && job > current->rt_param.job_params.job_no)
252 /* If the last job overran then job <= job_no and we
253 * don't send the task to sleep.
254 */
255 retval = litmus->complete_job();
256 out:
257 return retval;
258}
259
260/* This is a helper syscall to query the current job sequence number.
261 *
262 * returns 0 on successful query
263 * returns EPERM if task is not a real-time task.
264 * returns EFAULT if &job is not a valid pointer.
265 */
266asmlinkage long sys_query_job_no(unsigned int __user *job)
267{
268 int retval = -EPERM;
269 if (is_realtime(current))
270 retval = put_user(current->rt_param.job_params.job_no, job);
271
272 return retval;
273}
274
275/* sys_null_call() is only used for determining raw system call
276 * overheads (kernel entry, kernel exit). It has no useful side effects.
277 * If ts is non-NULL, then the current Feather-Trace time is recorded.
278 */
279asmlinkage long sys_null_call(cycles_t __user *ts)
280{
281 long ret = 0;
282 cycles_t now;
283
284 if (ts) {
285 now = get_cycles();
286 ret = put_user(now, ts);
287 }
288
289 return ret;
290}
291
292/* p is a real-time task. Re-init its state as a best-effort task. */
293static void reinit_litmus_state(struct task_struct* p, int restore)
294{
295 struct rt_task user_config = {};
296 void* ctrl_page = NULL;
297
298 if (restore) {
299 /* Safe user-space provided configuration data.
300 * and allocated page. */
301 user_config = p->rt_param.task_params;
302 ctrl_page = p->rt_param.ctrl_page;
303 }
304
305 /* We probably should not be inheriting any task's priority
306 * at this point in time.
307 */
308 WARN_ON(p->rt_param.inh_task);
309
310 /* Cleanup everything else. */
311 memset(&p->rt_param, 0, sizeof(p->rt_param));
312
313 /* Restore preserved fields. */
314 if (restore) {
315 p->rt_param.task_params = user_config;
316 p->rt_param.ctrl_page = ctrl_page;
317 }
318}
319
320long litmus_admit_task(struct task_struct* tsk)
321{
322 long retval = 0;
323
324 BUG_ON(is_realtime(tsk));
325
326 tsk_rt(tsk)->heap_node = NULL;
327 tsk_rt(tsk)->rel_heap = NULL;
328
329 if (get_rt_relative_deadline(tsk) == 0 ||
330 get_exec_cost(tsk) >
331 min(get_rt_relative_deadline(tsk), get_rt_period(tsk)) ) {
332 TRACE_TASK(tsk,
333 "litmus admit: invalid task parameters "
334 "(e = %lu, p = %lu, d = %lu)\n",
335 get_exec_cost(tsk), get_rt_period(tsk),
336 get_rt_relative_deadline(tsk));
337 retval = -EINVAL;
338 goto out;
339 }
340
341 if (!cpu_online(get_partition(tsk))) {
342 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n",
343 get_partition(tsk));
344 retval = -EINVAL;
345 goto out;
346 }
347
348 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
349
350 /* allocate heap node for this task */
351 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
352 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
353
354 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
355 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
356
357 retval = -ENOMEM;
358 goto out;
359 } else {
360 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk);
361 }
362
363 preempt_disable();
364
365 retval = litmus->admit_task(tsk);
366
367 if (!retval) {
368 sched_trace_task_name(tsk);
369 sched_trace_task_param(tsk);
370 atomic_inc(&rt_task_count);
371 }
372
373 preempt_enable();
374
375out:
376 if (retval) {
377 bheap_node_free(tsk_rt(tsk)->heap_node);
378 release_heap_free(tsk_rt(tsk)->rel_heap);
379 }
380 return retval;
381}
382
383void litmus_exit_task(struct task_struct* tsk)
384{
385 if (is_realtime(tsk)) {
386 sched_trace_task_completion(tsk, 1);
387
388 litmus->task_exit(tsk);
389
390 BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node));
391 bheap_node_free(tsk_rt(tsk)->heap_node);
392 release_heap_free(tsk_rt(tsk)->rel_heap);
393
394 atomic_dec(&rt_task_count);
395 reinit_litmus_state(tsk, 1);
396 }
397}
398
399static int do_plugin_switch(void *_plugin)
400{
401 int ret;
402 struct sched_plugin* plugin = _plugin;
403
404 /* don't switch if there are active real-time tasks */
405 if (atomic_read(&rt_task_count) == 0) {
406 ret = litmus->deactivate_plugin();
407 if (0 != ret)
408 goto out;
409 ret = plugin->activate_plugin();
410 if (0 != ret) {
411 printk(KERN_INFO "Can't activate %s (%d).\n",
412 plugin->plugin_name, ret);
413 plugin = &linux_sched_plugin;
414 }
415 printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", plugin->plugin_name);
416 litmus = plugin;
417 } else
418 ret = -EBUSY;
419out:
420 return ret;
421}
422
423/* Switching a plugin in use is tricky.
424 * We must watch out that no real-time tasks exists
425 * (and that none is created in parallel) and that the plugin is not
426 * currently in use on any processor (in theory).
427 */
428int switch_sched_plugin(struct sched_plugin* plugin)
429{
430 BUG_ON(!plugin);
431
432 if (atomic_read(&rt_task_count) == 0)
433 return stop_machine(do_plugin_switch, plugin, NULL);
434 else
435 return -EBUSY;
436}
437
438/* Called upon fork.
439 * p is the newly forked task.
440 */
441void litmus_fork(struct task_struct* p)
442{
443 if (is_realtime(p)) {
444 /* clean out any litmus related state, don't preserve anything */
445 reinit_litmus_state(p, 0);
446 /* Don't let the child be a real-time task. */
447 p->sched_reset_on_fork = 1;
448 } else
449 /* non-rt tasks might have ctrl_page set */
450 tsk_rt(p)->ctrl_page = NULL;
451
452 /* od tables are never inherited across a fork */
453 p->od_table = NULL;
454}
455
456/* Called upon execve().
457 * current is doing the exec.
458 * Don't let address space specific stuff leak.
459 */
460void litmus_exec(void)
461{
462 struct task_struct* p = current;
463
464 if (is_realtime(p)) {
465 WARN_ON(p->rt_param.inh_task);
466 if (tsk_rt(p)->ctrl_page) {
467 free_page((unsigned long) tsk_rt(p)->ctrl_page);
468 tsk_rt(p)->ctrl_page = NULL;
469 }
470 }
471}
472
473void exit_litmus(struct task_struct *dead_tsk)
474{
475 /* We also allow non-RT tasks to
476 * allocate control pages to allow
477 * measurements with non-RT tasks.
478 * So check if we need to free the page
479 * in any case.
480 */
481 if (tsk_rt(dead_tsk)->ctrl_page) {
482 TRACE_TASK(dead_tsk,
483 "freeing ctrl_page %p\n",
484 tsk_rt(dead_tsk)->ctrl_page);
485 free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page);
486 }
487
488 /* main cleanup only for RT tasks */
489 if (is_realtime(dead_tsk))
490 litmus_exit_task(dead_tsk);
491}
492
493
494#ifdef CONFIG_MAGIC_SYSRQ
495int sys_kill(int pid, int sig);
496
497static void sysrq_handle_kill_rt_tasks(int key)
498{
499 struct task_struct *t;
500 read_lock(&tasklist_lock);
501 for_each_process(t) {
502 if (is_realtime(t)) {
503 sys_kill(t->pid, SIGKILL);
504 }
505 }
506 read_unlock(&tasklist_lock);
507}
508
509static struct sysrq_key_op sysrq_kill_rt_tasks_op = {
510 .handler = sysrq_handle_kill_rt_tasks,
511 .help_msg = "quit-rt-tasks(X)",
512 .action_msg = "sent SIGKILL to all LITMUS^RT real-time tasks",
513};
514#endif
515
516extern struct sched_plugin linux_sched_plugin;
517
518static int litmus_shutdown_nb(struct notifier_block *unused1,
519 unsigned long unused2, void *unused3)
520{
521 /* Attempt to switch back to regular Linux scheduling.
522 * Forces the active plugin to clean up.
523 */
524 if (litmus != &linux_sched_plugin) {
525 int ret = switch_sched_plugin(&linux_sched_plugin);
526 if (ret) {
527 printk("Auto-shutdown of active Litmus plugin failed.\n");
528 }
529 }
530 return NOTIFY_DONE;
531}
532
533static struct notifier_block shutdown_notifier = {
534 .notifier_call = litmus_shutdown_nb,
535};
536
537static int __init _init_litmus(void)
538{
539 /* Common initializers,
540 * mode change lock is used to enforce single mode change
541 * operation.
542 */
543 printk("Starting LITMUS^RT kernel\n");
544
545 register_sched_plugin(&linux_sched_plugin);
546
547 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC);
548 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
549
550#ifdef CONFIG_MAGIC_SYSRQ
551 /* offer some debugging help */
552 if (!register_sysrq_key('x', &sysrq_kill_rt_tasks_op))
553 printk("Registered kill rt tasks magic sysrq.\n");
554 else
555 printk("Could not register kill rt tasks magic sysrq.\n");
556#endif
557
558 init_litmus_proc();
559
560#ifdef CONFIG_SCHED_CPU_AFFINITY
561 init_topology();
562#endif
563
564 register_reboot_notifier(&shutdown_notifier);
565
566 return 0;
567}
568
569static void _exit_litmus(void)
570{
571 unregister_reboot_notifier(&shutdown_notifier);
572
573 exit_litmus_proc();
574 kmem_cache_destroy(bheap_node_cache);
575 kmem_cache_destroy(release_heap_cache);
576}
577
578module_init(_init_litmus);
579module_exit(_exit_litmus);