From 6b3b85da89aee11ed47369833470b9282dd5994f Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Sun, 16 Sep 2012 18:45:05 -0400 Subject: C-EDF support for auxillary tasks. Extended auxillary task support to C-EDF. Modeld after G-EDF. --- litmus/sched_cedf.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++--- litmus/sched_gsn_edf.c | 6 ++-- 2 files changed, 84 insertions(+), 7 deletions(-) (limited to 'litmus') diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index f030f027b486..f5c9807090a1 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -58,6 +58,10 @@ #include #endif +#ifdef CONFIG_REALTIME_AUX_TASKS +#include +#endif + /* to configure the cluster size */ #include @@ -313,7 +317,15 @@ static noinline void requeue(struct task_struct* task) BUG_ON(is_queued(task)); if (is_released(task, litmus_clock())) - __add_ready(&cluster->domain, task); +#ifdef CONFIG_REALTIME_AUX_TASKS + if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) { + /* aux_task probably transitioned to real-time while it was blocked */ + TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid); + unlink(task); /* really needed? */ + } + else +#endif + __add_ready(&cluster->domain, task); else { /* it has got to wait */ add_release(&cluster->domain, task); @@ -1019,9 +1031,14 @@ static void cedf_task_wake_up(struct task_struct *task) set_rt_flags(task, RT_F_RUNNING); // periodic model #endif - if(tsk_rt(task)->linked_on == NO_CPU) - cedf_job_arrival(task); +#ifdef CONFIG_REALTIME_AUX_TASKS + if (tsk_rt(task)->has_aux_tasks) { + TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); + disable_aux_task_owner(task); + } +#endif + cedf_job_arrival(task); raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); } @@ -1036,7 +1053,17 @@ static void cedf_task_block(struct task_struct *t) /* unlink if necessary */ raw_spin_lock_irqsave(&cluster->cluster_lock, flags); + unlink(t); + +#ifdef CONFIG_REALTIME_AUX_TASKS + if (tsk_rt(t)->has_aux_tasks) { + + TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); + enable_aux_task_owner(t); + } +#endif + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); BUG_ON(!is_realtime(t)); @@ -1052,8 +1079,22 @@ static void cedf_task_exit(struct task_struct * t) cedf_change_prio_pai_tasklet(t, NULL); #endif +#ifdef CONFIG_REALTIME_AUX_TASKS + if (tsk_rt(t)->is_aux_task) { + exit_aux_task(t); /* cannot be called with gsnedf_lock held */ + } +#endif + /* unlink if necessary */ raw_spin_lock_irqsave(&cluster->cluster_lock, flags); + +#ifdef CONFIG_REALTIME_AUX_TASKS + /* make sure we clean up on our way out */ + if(tsk_rt(t)->has_aux_tasks) { + disable_aux_task_owner(t); /* must be called witl gsnedf_lock held */ + } +#endif + unlink(t); if (tsk_rt(t)->scheduled_on != NO_CPU) { cpu_entry_t *cpu; @@ -1092,8 +1133,16 @@ static int __increase_priority_inheritance(struct task_struct* t, int success = 1; int linked_on; int check_preempt = 0; + cedf_domain_t* cluster; - cedf_domain_t* cluster = task_cpu_cluster(t); + if (prio_inh && prio_inh == effective_priority(t)) { + /* relationship already established. */ + TRACE_TASK(t, "already has effective priority of %s/%d\n", + prio_inh->comm, prio_inh->pid); + goto out; + } + + cluster = task_cpu_cluster(t); #ifdef CONFIG_LITMUS_NESTED_LOCKING /* this sanity check allows for weaker locking in protocols */ @@ -1155,6 +1204,13 @@ static int __increase_priority_inheritance(struct task_struct* t, &cluster->domain.ready_queue); check_for_preemptions(cluster); } + +#ifdef CONFIG_REALTIME_AUX_TASKS + /* propagate to aux tasks */ + if (tsk_rt(t)->has_aux_tasks) { + aux_task_owner_increase_priority(t); + } +#endif } #ifdef CONFIG_LITMUS_NESTED_LOCKING } @@ -1170,6 +1226,8 @@ static int __increase_priority_inheritance(struct task_struct* t, success = 0; } #endif + +out: return success; } @@ -1211,6 +1269,15 @@ static int __decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { int success = 1; + + if (prio_inh == tsk_rt(t)->inh_task) { + /* relationship already established. */ + TRACE_TASK(t, "already inherits priority from %s/%d\n", + (prio_inh) ? prio_inh->comm : "(nil)", + (prio_inh) ? prio_inh->pid : 0); + goto out; + } + #ifdef CONFIG_LITMUS_NESTED_LOCKING if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { #endif @@ -1248,6 +1315,14 @@ static int __decrease_priority_inheritance(struct task_struct* t, } raw_spin_unlock(&cluster->domain.release_lock); } + +#ifdef CONFIG_REALTIME_AUX_TASKS + /* propagate to aux tasks */ + if (tsk_rt(t)->has_aux_tasks) { + aux_task_owner_decrease_priority(t); + } +#endif + #ifdef CONFIG_LITMUS_NESTED_LOCKING } else { @@ -1261,6 +1336,8 @@ static int __decrease_priority_inheritance(struct task_struct* t, success = 0; } #endif + +out: return success; } diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 5fc330f14a0e..ed9b4697a5a2 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -170,7 +170,6 @@ struct tasklet_head gsnedf_pending_tasklets; * TRACE() log. #define WANT_ALL_SCHED_EVENTS */ -//#define WANT_ALL_SCHED_EVENTS static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) { @@ -370,8 +369,7 @@ static void check_for_preemptions(void) &per_cpu(gsnedf_cpu_entries, task_cpu(task))); if (affinity) last = affinity; - - if (requeue_preempted_job(last->linked)) + else if (requeue_preempted_job(last->linked)) requeue(last->linked); } #else @@ -467,9 +465,11 @@ static void gsnedf_tick(struct task_struct* t) } } + /* if(is_realtime(t)) { TRACE_TASK(t, "tick %llu\n", litmus_clock()); } + */ } -- cgit v1.2.2