aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/litmus.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:31:46 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:14:41 -0400
commit53696c1fe6a6ada66f2a47c078d62aee40ad8ebe (patch)
tree01353124d90341322967c6b6e4b010fe4ed22026 /litmus/litmus.c
parent4e593e7105dec02e62ea7a1812dccb35a0d56d01 (diff)
[ported from 2008.3] Add rt_domain_t support
Still to be merged: - arm_release_timer() with no rq locking
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r--litmus/litmus.c45
1 files changed, 32 insertions, 13 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 9254f1621af7..de751a14d77c 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -19,6 +19,8 @@
19 19
20#include <litmus/trace.h> 20#include <litmus/trace.h>
21 21
22#include <litmus/rt_domain.h>
23
22/* Number of RT tasks that exist in the system */ 24/* Number of RT tasks that exist in the system */
23atomic_t rt_task_count = ATOMIC_INIT(0); 25atomic_t rt_task_count = ATOMIC_INIT(0);
24static DEFINE_SPINLOCK(task_transition_lock); 26static DEFINE_SPINLOCK(task_transition_lock);
@@ -30,6 +32,7 @@ atomic_t __log_seq_no = ATOMIC_INIT(0);
30atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU); 32atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
31 33
32static struct kmem_cache * heap_node_cache; 34static struct kmem_cache * heap_node_cache;
35extern struct kmem_cache * release_heap_cache;
33 36
34struct heap_node* heap_node_alloc(int gfp_flags) 37struct heap_node* heap_node_alloc(int gfp_flags)
35{ 38{
@@ -41,6 +44,9 @@ void heap_node_free(struct heap_node* hn)
41 kmem_cache_free(heap_node_cache, hn); 44 kmem_cache_free(heap_node_cache, hn);
42} 45}
43 46
47struct release_heap* release_heap_alloc(int gfp_flags);
48void release_heap_free(struct release_heap* rh);
49
44/* 50/*
45 * sys_set_task_rt_param 51 * sys_set_task_rt_param
46 * @pid: Pid of the task which scheduling parameters must be changed 52 * @pid: Pid of the task which scheduling parameters must be changed
@@ -299,15 +305,16 @@ long litmus_admit_task(struct task_struct* tsk)
299 get_exec_cost(tsk) > get_rt_period(tsk)) { 305 get_exec_cost(tsk) > get_rt_period(tsk)) {
300 TRACE_TASK(tsk, "litmus admit: invalid task parameters " 306 TRACE_TASK(tsk, "litmus admit: invalid task parameters "
301 "(%lu, %lu)\n", 307 "(%lu, %lu)\n",
302 get_exec_cost(tsk), get_rt_period(tsk)); 308 get_exec_cost(tsk), get_rt_period(tsk));
303 return -EINVAL; 309 retval = -EINVAL;
310 goto out;
304 } 311 }
305 312
306 if (!cpu_online(get_partition(tsk))) 313 if (!cpu_online(get_partition(tsk))) {
307 {
308 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n", 314 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n",
309 get_partition(tsk)); 315 get_partition(tsk));
310 return -EINVAL; 316 retval = -EINVAL;
317 goto out;
311 } 318 }
312 319
313 INIT_LIST_HEAD(&tsk_rt(tsk)->list); 320 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
@@ -316,17 +323,22 @@ long litmus_admit_task(struct task_struct* tsk)
316 spin_lock_irqsave(&task_transition_lock, flags); 323 spin_lock_irqsave(&task_transition_lock, flags);
317 324
318 /* allocate heap node for this task */ 325 /* allocate heap node for this task */
319 tsk_rt(tsk)->heap_node = heap_node_alloc(GFP_ATOMIC); 326 tsk_rt(tsk)->heap_node = heap_node_alloc(GFP_ATOMIC);
320 if (!tsk_rt(tsk)->heap_node || 327 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
321 !tsk_rt(tsk)->rel_heap) { 328
329 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
322 printk(KERN_WARNING "litmus: no more heap node memory!?\n"); 330 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
323 retval = -ENOMEM; 331
324 heap_node_free(tsk_rt(tsk)->heap_node); 332 heap_node_free(tsk_rt(tsk)->heap_node);
325 } else 333 release_heap_free(tsk_rt(tsk)->rel_heap);
334
335 retval = -ENOMEM;
336 goto out_unlock;
337 } else {
326 heap_node_init(&tsk_rt(tsk)->heap_node, tsk); 338 heap_node_init(&tsk_rt(tsk)->heap_node, tsk);
339 }
327 340
328 if (!retval) 341 retval = litmus->admit_task(tsk);
329 retval = litmus->admit_task(tsk);
330 342
331 if (!retval) { 343 if (!retval) {
332 sched_trace_task_name(tsk); 344 sched_trace_task_name(tsk);
@@ -334,8 +346,9 @@ long litmus_admit_task(struct task_struct* tsk)
334 atomic_inc(&rt_task_count); 346 atomic_inc(&rt_task_count);
335 } 347 }
336 348
349out_unlock:
337 spin_unlock_irqrestore(&task_transition_lock, flags); 350 spin_unlock_irqrestore(&task_transition_lock, flags);
338 351out:
339 return retval; 352 return retval;
340} 353}
341 354
@@ -343,9 +356,13 @@ void litmus_exit_task(struct task_struct* tsk)
343{ 356{
344 if (is_realtime(tsk)) { 357 if (is_realtime(tsk)) {
345 sched_trace_task_completion(tsk, 1); 358 sched_trace_task_completion(tsk, 1);
359
346 litmus->task_exit(tsk); 360 litmus->task_exit(tsk);
361
347 BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node)); 362 BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node));
348 heap_node_free(tsk_rt(tsk)->heap_node); 363 heap_node_free(tsk_rt(tsk)->heap_node);
364 release_heap_free(tsk_rt(tsk)->rel_heap);
365
349 atomic_dec(&rt_task_count); 366 atomic_dec(&rt_task_count);
350 reinit_litmus_state(tsk, 1); 367 reinit_litmus_state(tsk, 1);
351 } 368 }
@@ -632,6 +649,7 @@ static int __init _init_litmus(void)
632 register_sched_plugin(&linux_sched_plugin); 649 register_sched_plugin(&linux_sched_plugin);
633 650
634 heap_node_cache = KMEM_CACHE(heap_node, SLAB_PANIC); 651 heap_node_cache = KMEM_CACHE(heap_node, SLAB_PANIC);
652 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
635 653
636#ifdef CONFIG_MAGIC_SYSRQ 654#ifdef CONFIG_MAGIC_SYSRQ
637 /* offer some debugging help */ 655 /* offer some debugging help */
@@ -650,6 +668,7 @@ static void _exit_litmus(void)
650{ 668{
651 exit_litmus_proc(); 669 exit_litmus_proc();
652 kmem_cache_destroy(heap_node_cache); 670 kmem_cache_destroy(heap_node_cache);
671 kmem_cache_destroy(release_heap_cache);
653} 672}
654 673
655module_init(_init_litmus); 674module_init(_init_litmus);