From 4ad6ba08f0dab67bbd89a26b27f1cc86e3c45c13 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Fri, 14 Sep 2012 08:34:36 -0400 Subject: checkpoint for aux_tasks. can still deadlock --- litmus/Makefile | 2 +- litmus/aux_tasks.c | 387 +++++++++++++++++++++++++++++++++++++++++++++++++ litmus/edf_common.c | 22 ++- litmus/litmus.c | 111 +++++++------- litmus/nvidia_info.c | 48 ++++++ litmus/rt_domain.c | 13 +- litmus/sched_cedf.c | 12 +- litmus/sched_gsn_edf.c | 161 ++++++++++++++++++-- litmus/sched_plugin.c | 16 ++ 9 files changed, 688 insertions(+), 84 deletions(-) create mode 100644 litmus/aux_tasks.c (limited to 'litmus') diff --git a/litmus/Makefile b/litmus/Makefile index 59c018560ee9..f2dd7be7ae4a 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -31,7 +31,7 @@ obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o -obj-$(CONFIG_LITMUS_LOCKING) += kfmlp_lock.o +obj-$(CONFIG_LITMUS_LOCKING) += aux_tasks.o kfmlp_lock.o obj-$(CONFIG_LITMUS_NESTED_LOCKING) += rsm_lock.o ikglp_lock.o obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o obj-$(CONFIG_LITMUS_PAI_SOFTIRQD) += litmus_pai_softirq.o diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c new file mode 100644 index 000000000000..c197a95fc3a1 --- /dev/null +++ b/litmus/aux_tasks.c @@ -0,0 +1,387 @@ +#ifdef CONFIG_LITMUS_LOCKING + +#include +#include +#include +#include +#include + +static int admit_aux_task(struct task_struct *t) +{ + int retval = 0; + struct task_struct *leader = t->group_leader; + + struct rt_task tp = { + .exec_cost = 0, + .period = MAGIC_AUX_TASK_PERIOD, + .relative_deadline = MAGIC_AUX_TASK_PERIOD, + .phase = 0, + .cpu = task_cpu(leader), /* take CPU of group leader */ + .budget_policy = NO_ENFORCEMENT, + .cls = RT_CLASS_BEST_EFFORT + }; + + struct sched_param param = { .sched_priority = 0}; + + tsk_rt(t)->task_params = tp; + retval = sched_setscheduler_nocheck(t, SCHED_LITMUS, ¶m); + + return retval; +} + +int exit_aux_task(struct task_struct *t) +{ + int retval = 0; + struct task_struct *leader = t->group_leader; + + BUG_ON(!tsk_rt(t)->is_aux_task); + + TRACE_CUR("Aux task %s/%d is exiting from %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); + + list_del(&tsk_rt(t)->aux_task_node); + + tsk_rt(t)->is_aux_task = 0; + + if (tsk_rt(t)->inh_task) { + litmus->decrease_prio(t, NULL); + } + + return retval; +} + +static int aux_tasks_increase_priority(struct task_struct *leader, struct task_struct *hp) +{ + int retval = 0; + struct list_head *pos; + + TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); + + list_for_each(pos, &tsk_aux(leader)->aux_tasks) { + struct task_struct *aux = + container_of(list_entry(pos, struct rt_param, aux_task_node), + struct task_struct, rt_param); + + if (!is_realtime(aux)) { +#if 0 + /* currently can't do this here because of scheduler deadlock on itself */ + TRACE_CUR("aux_tasks_increase_priorityting aux task: %s/%d\n", aux->comm, aux->pid); + retval = admit_aux_task(aux); + + if (retval != 0) { + TRACE_CUR("failed to admit aux task %s/%d\n", aux->comm, aux->pid); + goto out; + } +#endif + TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); + } + + // aux tasks don't touch rt locks, so no nested call needed. + TRACE_CUR("increasing %s/%d.\n", aux->comm, aux->pid); + retval = litmus->__increase_prio(aux, hp); + } + + //out: + return retval; +} + +static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_struct *hp) +{ + int retval = 0; + struct list_head *pos; + + TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); + + list_for_each(pos, &tsk_aux(leader)->aux_tasks) { + struct task_struct *aux = + container_of(list_entry(pos, struct rt_param, aux_task_node), + struct task_struct, rt_param); + + if (!is_realtime(aux)) { +#if 0 + /* currently can't do this here because of scheduler deadlock on itself */ + TRACE_CUR("aux_tasks_increase_priorityting aux task: %s/%d\n", aux->comm, aux->pid); + retval = admit_aux_task(aux); + + if (retval != 0) + goto out; + + if (hp) { + // aux tasks don't touch rt locks, so no nested call needed. + TRACE_CUR("decreasing (actually increasing) %s/%d.\n", aux->comm, aux->pid); + retval = litmus->__increase_prio(aux, hp); + } +#endif + + TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); + } + else { + TRACE_CUR("decreasing %s/%d.\n", aux->comm, aux->pid); + retval = litmus->__decrease_prio(aux, hp); + } + } + + //out: + return retval; +} + +int aux_task_owner_increase_priority(struct task_struct *t) +{ + int retval = 0; + struct task_struct *leader; + struct task_struct *hp = NULL; + + BUG_ON(!tsk_rt(t)->has_aux_tasks); + BUG_ON(!is_realtime(t)); + BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); + + leader = t->group_leader; + + TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid); + + hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), + struct task_struct, rt_param); + + if (hp == t) { + goto out; // already hp, nothing to do. + } + + binheap_decrease(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); + + hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), + struct task_struct, rt_param); + + if (hp == t) { + TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); + retval = aux_tasks_increase_priority(leader, + (tsk_rt(hp)->inh_task) ? tsk_rt(hp)->inh_task : hp); + } + +out: + return retval; +} + +int aux_task_owner_decrease_priority(struct task_struct *t) +{ + int retval = 0; + struct task_struct *leader; + struct task_struct *hp = NULL; + struct task_struct *new_hp = NULL; + + BUG_ON(!tsk_rt(t)->has_aux_tasks); + BUG_ON(!is_realtime(t)); + BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); + + leader = t->group_leader; + + TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid); + + hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), + struct task_struct, rt_param); + binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); + binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, + struct rt_param, aux_task_owner_node); + new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), + struct task_struct, rt_param); + + if (hp == t && new_hp != t) { + TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); + retval = aux_tasks_decrease_priority(leader, + (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp); + } + + return retval; +} + + + +long enable_aux_task_owner(struct task_struct *t) +{ + long retval = 0; + struct task_struct *leader = t->group_leader; + struct task_struct *hp; + + if (!tsk_rt(t)->has_aux_tasks) { + TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid); + return -1; + } + + BUG_ON(!is_realtime(t)); + + if (binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { + TRACE_CUR("task %s/%d is already active\n", t->comm, t->pid); + goto out; + } + + binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, + struct rt_param, aux_task_owner_node); + + hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), + struct task_struct, rt_param); + if (hp == t) { + /* we're the new hp */ + TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); + + retval = aux_tasks_increase_priority(leader, + (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); + } + + +out: + return retval; +} + +long disable_aux_task_owner(struct task_struct *t) +{ + long retval = 0; + struct task_struct *leader = t->group_leader; + struct task_struct *hp; + struct task_struct *new_hp = NULL; + + if (!tsk_rt(t)->has_aux_tasks) { + TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid); + return -1; + } + + BUG_ON(!is_realtime(t)); + + if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { + TRACE_CUR("task %s/%d is already not active\n", t->comm, t->pid); + goto out; + } + + TRACE_CUR("task %s/%d exiting from group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); + + hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), + struct task_struct, rt_param); + binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); + + if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { + new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), + struct task_struct, rt_param); + } + + if (hp == t && new_hp != t) { + struct task_struct *to_inh = NULL; + + TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); + + if (new_hp) { + to_inh = (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp; + } + + retval = aux_tasks_decrease_priority(leader, to_inh); + } + +out: + return retval; +} + + +static int aux_task_owner_max_priority_order(struct binheap_node *a, + struct binheap_node *b) +{ + struct task_struct *d_a = container_of(binheap_entry(a, struct rt_param, aux_task_owner_node), + struct task_struct, rt_param); + struct task_struct *d_b = container_of(binheap_entry(b, struct rt_param, aux_task_owner_node), + struct task_struct, rt_param); + + BUG_ON(!d_a); + BUG_ON(!d_b); + + return litmus->compare(d_a, d_b); +} + + +asmlinkage long sys_slave_non_rt_threads(void) +{ + long retval = 0; + struct task_struct *leader; + struct task_struct *t; + + read_lock_irq(&tasklist_lock); + + leader = current->group_leader; + +#if 0 + t = leader; + do { + if (tsk_rt(t)->has_aux_tasks || tsk_rt(t)->is_aux_task) { + printk("slave_non_rt_tasks may only be called once per process.\n"); + retval = -EINVAL; + goto out_unlock; + } + } while (t != leader); +#endif + + if (!tsk_aux(leader)->initialized) { + INIT_LIST_HEAD(&tsk_aux(leader)->aux_tasks); + INIT_BINHEAP_HANDLE(&tsk_aux(leader)->aux_task_owners, aux_task_owner_max_priority_order); + tsk_aux(leader)->initialized = 1; + } + + t = leader; + do { + /* doesn't hurt to initialize them both */ + INIT_LIST_HEAD(&tsk_rt(t)->aux_task_node); + INIT_BINHEAP_NODE(&tsk_rt(t)->aux_task_owner_node); + + TRACE_CUR("Checking task in %s/%d: %s/%d = (p = %llu):\n", + leader->comm, leader->pid, t->comm, t->pid, + tsk_rt(t)->task_params.period); + + /* inspect heap_node to see if it is an rt task */ + if (tsk_rt(t)->task_params.period == 0 || + tsk_rt(t)->task_params.period == MAGIC_AUX_TASK_PERIOD) { + if (!tsk_rt(t)->is_aux_task) { + TRACE_CUR("AUX task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); + /* hasn't been aux_tasks_increase_priorityted into rt. make it a aux. */ + tsk_rt(t)->is_aux_task = 1; + list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks); + + (void)admit_aux_task(t); + } + else { + TRACE_CUR("AUX task in %s/%d is already set up: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid); + } + } + else { + if (!tsk_rt(t)->has_aux_tasks) { + TRACE_CUR("task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); + tsk_rt(t)->has_aux_tasks = 1; + if (is_realtime(t)) { + binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, + struct rt_param, aux_task_owner_node); + } + } + else { + TRACE_CUR("task in %s/%d is already set up: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid); + } + } + + t = next_thread(t); + } while(t != leader); + + + if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { + struct task_struct *hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), + struct task_struct, rt_param); + TRACE_CUR("found hp in group: %s/%d\n", hp->comm, hp->pid); + retval = aux_tasks_increase_priority(leader, + (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); + } + + //out_unlock: + read_unlock_irq(&tasklist_lock); + + return retval; +} + +#else + +asmlinkage long sys_slave_non_rt_tasks(void) +{ + printk("Unsupported. Recompile with CONFIG_LITMUS_LOCKING.\n"); + return -EINVAL; +} + +#endif diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 39ce1816ee04..9b439299e5fc 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c @@ -74,6 +74,23 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) } #ifdef CONFIG_LITMUS_LOCKING + /* aux threads with no inheritance have lowest priority; however, do a PID + * tie break if both threads are aux threads with no inheritance. + */ + if (unlikely(first->rt_param.is_aux_task && !first->rt_param.inh_task)) { + if (second->rt_param.is_aux_task && !second->rt_param.inh_task) { + /* pid break */ + if (first->pid < second->pid) { + return 1; + } + } + return 0; + } + if (unlikely(second->rt_param.is_aux_task && !second->rt_param.inh_task)) { + /* no need for pid break -- case already tested */ + return 1; + } + /* Check for EFFECTIVE priorities. Change task * used for comparison in such a case. */ @@ -191,7 +208,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) /* Both tasks have the same inherited priority. * Likely in a bug-condition. */ - if (likely(first->pid < second->pid)) { + if (first->pid < second->pid) { return 1; } else if (first->pid == second->pid) { @@ -205,6 +222,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) /* The task with the inherited priority wins. */ if (!second->rt_param.inh_task) { + /* + * common with aux tasks. TRACE_CUR("unusual comparison: " "first = %s/%d first_task = %s/%d " "second = %s/%d second_task = %s/%d\n", @@ -214,6 +233,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) second->comm, second->pid, (second->rt_param.inh_task) ? second->rt_param.inh_task->comm : "(nil)", (second->rt_param.inh_task) ? second->rt_param.inh_task->pid : 0); + */ return 1; } } diff --git a/litmus/litmus.c b/litmus/litmus.c index 83e8ef3f42af..1b4182ac3337 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -25,6 +25,10 @@ #include #endif +#ifdef CONFIG_LITMUS_LOCKING +#include +#endif + /* Number of RT tasks that exist in the system */ atomic_t rt_task_count = ATOMIC_INIT(0); static DEFINE_RAW_SPINLOCK(task_transition_lock); @@ -327,60 +331,6 @@ asmlinkage long sys_null_call(cycles_t __user *ts) return ret; } - -long __litmus_admit_task(struct task_struct* tsk); - -asmlinkage long sys_slave_non_rt_threads(void) -{ - long retval = 0; - struct task_struct *leader = current->group_leader; - struct task_struct *t; - struct task_struct *hp = NULL; - - read_lock_irq(&tasklist_lock); - - t = leader; - do { - TRACE_CUR("threads in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); - - if (tsk_rt(t)->heap_node == NULL) { - retval = __litmus_admit_task(t); - - if (retval != 0) break; - - /* hasn't been admitted into rt. make it a slave. */ - tsk_rt(t)->slave = 1; - } - else { - tsk_rt(t)->has_slaves = 1; - - if (is_realtime(t) && litmus->compare(t, hp)) { - hp = t; - } - } - - t = next_thread(t); - } while(t != leader); - - if (hp) { - TRACE_CUR("found hp in group: %s/%d\n", hp->comm, hp->pid); - - /* set up inheritance */ - leader->hp_group = hp; - - t = leader; - do { - if (tsk_rt(t)->slave) { - litmus->increase_prio(t); - } - } while(t != leader); - } - - read_unlock_irq(&tasklist_lock); - - return 0; -} - #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) void init_gpu_affinity_state(struct task_struct* p) { @@ -412,11 +362,13 @@ static void reinit_litmus_state(struct task_struct* p, int restore) { struct rt_task user_config = {}; void* ctrl_page = NULL; - + #ifdef CONFIG_LITMUS_NESTED_LOCKING binheap_order_t prio_order = NULL; #endif + TRACE_TASK(p, "reinit_litmus_state: restore = %d\n", restore); + if (restore) { /* Safe user-space provided configuration data. * and allocated page. */ @@ -428,10 +380,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore) prio_order = p->rt_param.hp_blocked_tasks.compare; #endif +#ifdef CONFIG_LITMUS_LOCKING /* We probably should not be inheriting any task's priority * at this point in time. */ WARN_ON(p->rt_param.inh_task); +#endif #ifdef CONFIG_LITMUS_NESTED_LOCKING WARN_ON(p->rt_param.blocked_lock); @@ -459,6 +413,13 @@ static void reinit_litmus_state(struct task_struct* p, int restore) /* Cleanup everything else. */ memset(&p->rt_param, 0, sizeof(p->rt_param)); +#ifdef CONFIG_LITMUS_LOCKING + /* also clear out the aux_data. the !restore case is only called on + * fork (initial thread creation). */ + if (!restore) + memset(&p->aux_data, 0, sizeof(p->aux_data)); +#endif + /* Restore preserved fields. */ if (restore) { p->rt_param.task_params = user_config; @@ -475,7 +436,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore) #endif } + +#ifdef CONFIG_LITMUS_LOCKING +long __litmus_admit_task(struct task_struct* tsk, int clear_aux) +#else long __litmus_admit_task(struct task_struct* tsk) +#endif { long retval = 0; unsigned long flags; @@ -520,6 +486,14 @@ long __litmus_admit_task(struct task_struct* tsk) atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); #endif +#ifdef CONFIG_LITMUS_LOCKING + /* turns out our aux thread isn't really an aux thread. */ + if (clear_aux && tsk_rt(tsk)->is_aux_task) { + exit_aux_task(tsk); + tsk_rt(tsk)->has_aux_tasks = 1; + } +#endif + retval = litmus->admit_task(tsk); if (!retval) { @@ -537,8 +511,7 @@ out_unlock: long litmus_admit_task(struct task_struct* tsk) { long retval = 0; - unsigned long flags; - + BUG_ON(is_realtime(tsk)); if (get_rt_relative_deadline(tsk) == 0 || @@ -560,8 +533,12 @@ long litmus_admit_task(struct task_struct* tsk) goto out; } +#ifdef CONFIG_LITMUS_LOCKING + retval = __litmus_admit_task(tsk, (tsk_rt(tsk)->task_params.period != MAGIC_AUX_TASK_PERIOD)); +#else retval = __litmus_admit_task(tsk); - +#endif + out: return retval; } @@ -574,7 +551,7 @@ void litmus_exit_task(struct task_struct* tsk) litmus->task_exit(tsk); BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); - bheap_node_free(tsk_rt(tsk)->heap_node); + bheap_node_free(tsk_rt(tsk)->heap_node); release_heap_free(tsk_rt(tsk)->rel_heap); atomic_dec(&rt_task_count); @@ -647,14 +624,22 @@ out: */ void litmus_fork(struct task_struct* p) { + reinit_litmus_state(p, 0); + if (is_realtime(p)) { + TRACE_TASK(p, "fork, is real-time\n"); /* clean out any litmus related state, don't preserve anything */ - reinit_litmus_state(p, 0); + //reinit_litmus_state(p, 0); /* Don't let the child be a real-time task. */ p->sched_reset_on_fork = 1; - } else + } else { /* non-rt tasks might have ctrl_page set */ tsk_rt(p)->ctrl_page = NULL; + + /* still don't inherit any parental parameters */ + //memset(&p->rt_param, 0, sizeof(p->rt_param)); + //memset(&p->aux_data, 0, sizeof(p->aux_data)); + } /* od tables are never inherited across a fork */ p->od_table = NULL; @@ -751,6 +736,10 @@ static int __init _init_litmus(void) init_topology(); #endif +#ifdef CONFIG_LITMUS_NVIDIA + //init_nvidia_info(); +#endif + return 0; } diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c index 4b86a50d3bd1..b6ead58802f6 100644 --- a/litmus/nvidia_info.c +++ b/litmus/nvidia_info.c @@ -244,9 +244,56 @@ void dump_nvidia_info(const struct tasklet_struct *t) #endif } + + static struct module* nvidia_mod = NULL; + + +#if 0 +static int nvidia_ready_module_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + mutex_lock(&module_mutex); + nvidia_mod = find_module("nvidia"); + mutex_unlock(&module_mutex); + + if(nvidia_mod != NULL) + { + TRACE("%s : Found NVIDIA module. Core Code: %p to %p\n", __FUNCTION__, + (void*)(nvidia_mod->module_core), + (void*)(nvidia_mod->module_core) + nvidia_mod->core_size); + init_nv_device_reg(); + return(0); + } + else + { + TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); + } +} + +static int nvidia_going_module_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + nvidia_mod = NULL; + mb(); + + return 0; +} + +static struct notifier_block nvidia_ready = { + .notifier_call = nvidia_ready_module_notify, + .priority = 1, +}; + +static struct notifier_block nvidia_going = { + .notifier_call = nvidia_going_module_notify, + .priority = 1, +}; +#endif + int init_nvidia_info(void) { +#if 1 mutex_lock(&module_mutex); nvidia_mod = find_module("nvidia"); mutex_unlock(&module_mutex); @@ -263,6 +310,7 @@ int init_nvidia_info(void) TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); return(-1); } +#endif } void shutdown_nvidia_info(void) diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index d0b796611bea..d4f030728d3c 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c @@ -300,10 +300,15 @@ void rt_domain_init(rt_domain_t *rt, */ void __add_ready(rt_domain_t* rt, struct task_struct *new) { - TRACE("rt: adding %s/%d (%llu, %llu, %llu) rel=%llu " - "to ready queue at %llu\n", - new->comm, new->pid, - get_exec_cost(new), get_rt_period(new), get_rt_relative_deadline(new), + TRACE("rt: adding %s/%d (%llu, %llu, %llu) " + "[inh_task: %s/%d (%llu, %llu %llu)] " + "rel=%llu to ready queue at %llu\n", + new->comm, new->pid, get_exec_cost(new), get_rt_period(new), get_rt_relative_deadline(new), + (tsk_rt(new)->inh_task) ? tsk_rt(new)->inh_task->comm : "(nil)", + (tsk_rt(new)->inh_task) ? tsk_rt(new)->inh_task->pid : 0, + (tsk_rt(new)->inh_task) ? get_exec_cost(tsk_rt(new)->inh_task) : 0, + (tsk_rt(new)->inh_task) ? get_rt_period(tsk_rt(new)->inh_task) : 0, + (tsk_rt(new)->inh_task) ? get_rt_relative_deadline(tsk_rt(new)->inh_task) : 0, get_release(new), litmus_clock()); BUG_ON(bheap_node_in_heap(tsk_rt(new)->heap_node)); diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index d98de4579394..f030f027b486 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -1086,9 +1086,10 @@ static long cedf_admit_task(struct task_struct* tsk) /* called with IRQs off */ -static void __increase_priority_inheritance(struct task_struct* t, +static int __increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { + int success = 1; int linked_on; int check_preempt = 0; @@ -1166,8 +1167,10 @@ static void __increase_priority_inheritance(struct task_struct* t, (prio_inh) ? prio_inh->comm : "nil", (prio_inh) ? prio_inh->pid : -1); WARN_ON(!prio_inh); + success = 0; } #endif + return success; } /* called with IRQs off */ @@ -1204,9 +1207,10 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str } /* called with IRQs off */ -static void __decrease_priority_inheritance(struct task_struct* t, +static int __decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { + int success = 1; #ifdef CONFIG_LITMUS_NESTED_LOCKING if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { #endif @@ -1254,8 +1258,10 @@ static void __decrease_priority_inheritance(struct task_struct* t, effective_priority(t)->comm, effective_priority(t)->pid, (prio_inh) ? prio_inh->comm : "nil", (prio_inh) ? prio_inh->pid : -1); + success = 0; } #endif + return success; } static void decrease_priority_inheritance(struct task_struct* t, @@ -1812,6 +1818,8 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { .allocate_lock = cedf_allocate_lock, .increase_prio = increase_priority_inheritance, .decrease_prio = decrease_priority_inheritance, + .__increase_prio = __increase_priority_inheritance, + .__decrease_prio = __decrease_priority_inheritance, #endif #ifdef CONFIG_LITMUS_NESTED_LOCKING .nested_increase_prio = nested_increase_priority_inheritance, diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 83b2f04b1532..5b8ca6698423 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -29,6 +29,7 @@ #ifdef CONFIG_LITMUS_LOCKING #include +#include #endif #ifdef CONFIG_LITMUS_NESTED_LOCKING @@ -295,11 +296,37 @@ static noinline void requeue(struct task_struct* task) /* sanity check before insertion */ BUG_ON(is_queued(task)); - if (is_released(task, litmus_clock())) - __add_ready(&gsnedf, task); + if (is_released(task, litmus_clock())) { + + if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) { + /* aux_task probably transitioned to real-time while it was blocked */ + TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid); + } + else { + __add_ready(&gsnedf, task); + +#if 0 + if (tsk_rt(task)->has_aux_tasks) { + + TRACE_CUR("%s/%d is ready and has aux tasks.\n", task->comm, task->pid); + /* allow it's prio inheritance to act on aux threads */ + enable_aux_task_owner(task); + } +#endif + } + } else { /* it has got to wait */ add_release(&gsnedf, task); + +#if 0 + if (tsk_rt(task)->has_aux_tasks) { + + TRACE_CUR("%s/%d is waiting for release and has aux tasks.\n", task->comm, task->pid); + /* prevent prio inheritance from acting while it's not ready */ + disable_aux_task_owner(task); + } +#endif } } @@ -366,10 +393,45 @@ static noinline void gsnedf_job_arrival(struct task_struct* task) static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) { unsigned long flags; + //struct bheap_node* node; raw_spin_lock_irqsave(&gsnedf_lock, flags); +#if 0 + node = tasks->head; + while(node) { + struct task_struct *task = bheap2task(node); + + if (tsk_rt(task)->has_aux_tasks) { + + TRACE_CUR("%s/%d is ready and has aux tasks.\n", task->comm, task->pid); + + /* allow it's prio inheritance to act on aux threads */ + enable_aux_task_owner(task); + } + + /* pre-order sub-tree traversal */ + if (node->child) { + /* go down */ + node = node->child; + } + else if(node->parent && node->parent->next) { + /* go up a level and across */ + node = node->parent->next; + } + else if(!node->parent && node->next) { + /* go to the next binomial tree */ + node = node->next; + } + else { + /* the end! */ + node = NULL; + } + } +#endif + __merge_ready(rt, tasks); + check_for_preemptions(); raw_spin_unlock_irqrestore(&gsnedf_lock, flags); @@ -387,11 +449,12 @@ static noinline void job_completion(struct task_struct *t, int forced) #endif TRACE_TASK(t, "job_completion().\n"); - + /* set flags */ set_rt_flags(t, RT_F_SLEEP); /* prepare for next period */ prepare_for_next_period(t); + if (is_released(t, litmus_clock())) sched_trace_task_release(t); /* unlink */ @@ -902,8 +965,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) else if (exists && !next) TRACE("becomes idle at %llu.\n", litmus_clock()); #endif - - + return next; } @@ -997,13 +1059,18 @@ static void gsnedf_task_wake_up(struct task_struct *task) set_rt_flags(task, RT_F_RUNNING); #endif + if (tsk_rt(task)->has_aux_tasks) { + + TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); + disable_aux_task_owner(task); + } + gsnedf_job_arrival(task); raw_spin_unlock_irqrestore(&gsnedf_lock, flags); } static void gsnedf_task_block(struct task_struct *t) { - // TODO: is this called on preemption?? unsigned long flags; TRACE_TASK(t, "block at %llu\n", litmus_clock()); @@ -1013,6 +1080,12 @@ static void gsnedf_task_block(struct task_struct *t) unlink(t); + if (tsk_rt(t)->has_aux_tasks) { + + TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); + enable_aux_task_owner(t); + } + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); BUG_ON(!is_realtime(t)); @@ -1027,8 +1100,22 @@ static void gsnedf_task_exit(struct task_struct * t) gsnedf_change_prio_pai_tasklet(t, NULL); #endif +#ifdef CONFIG_LITMUS_LOCKING + if (tsk_rt(t)->is_aux_task) { + exit_aux_task(t); /* cannot be called with gsnedf_lock held */ + } +#endif + /* unlink if necessary */ raw_spin_lock_irqsave(&gsnedf_lock, flags); + +#ifdef CONFIG_LITMUS_LOCKING + /* make sure we clean up on our way out */ + if(tsk_rt(t)->has_aux_tasks) { + disable_aux_task_owner(t); /* must be called witl gsnedf_lock held */ + } +#endif + unlink(t); if (tsk_rt(t)->scheduled_on != NO_CPU) { gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; @@ -1037,7 +1124,7 @@ static void gsnedf_task_exit(struct task_struct * t) raw_spin_unlock_irqrestore(&gsnedf_lock, flags); BUG_ON(!is_realtime(t)); - TRACE_TASK(t, "RIP\n"); + TRACE_TASK(t, "RIP\n"); } @@ -1061,12 +1148,20 @@ static long gsnedf_admit_task(struct task_struct* tsk) #include /* called with IRQs off */ -static void __increase_priority_inheritance(struct task_struct* t, +static int __increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { + int success = 1; int linked_on; int check_preempt = 0; + if (prio_inh && prio_inh == effective_priority(t)) { + /* relationship already established. */ + TRACE_TASK(t, "already has effective priority of %s/%d\n", + prio_inh->comm, prio_inh->pid); + goto out; + } + #ifdef CONFIG_LITMUS_NESTED_LOCKING /* this sanity check allows for weaker locking in protocols */ /* TODO (klitirqd): Skip this check if 't' is a proxy thread (???) */ @@ -1126,28 +1221,40 @@ static void __increase_priority_inheritance(struct task_struct* t, &gsnedf.ready_queue); check_for_preemptions(); } + + + /* propagate to aux tasks */ + if (tsk_rt(t)->has_aux_tasks) { + aux_task_owner_increase_priority(t); + } } #ifdef CONFIG_LITMUS_NESTED_LOCKING } else { TRACE_TASK(t, "Spurious invalid priority increase. " - "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" + "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", t->comm, t->pid, effective_priority(t)->comm, effective_priority(t)->pid, (prio_inh) ? prio_inh->comm : "nil", (prio_inh) ? prio_inh->pid : -1); WARN_ON(!prio_inh); + success = 0; } #endif + +out: + return success; } /* called with IRQs off */ static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { + int success; + raw_spin_lock(&gsnedf_lock); - __increase_priority_inheritance(t, prio_inh); + success = __increase_priority_inheritance(t, prio_inh); #ifdef CONFIG_LITMUS_SOFTIRQD if(tsk_rt(t)->cur_klitirqd != NULL) @@ -1160,7 +1267,7 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str #endif raw_spin_unlock(&gsnedf_lock); - + #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) if(tsk_rt(t)->held_gpus) { int i; @@ -1175,9 +1282,19 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str /* called with IRQs off */ -static void __decrease_priority_inheritance(struct task_struct* t, +static int __decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { + int success = 1; + + if (prio_inh == tsk_rt(t)->inh_task) { + /* relationship already established. */ + TRACE_TASK(t, "already inherits priority from %s/%d\n", + (prio_inh) ? prio_inh->comm : "(nil)", + (prio_inh) ? prio_inh->pid : 0); + goto out; + } + #ifdef CONFIG_LITMUS_NESTED_LOCKING if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { #endif @@ -1214,6 +1331,11 @@ static void __decrease_priority_inheritance(struct task_struct* t, } raw_spin_unlock(&gsnedf.release_lock); } + + /* propagate to aux tasks */ + if (tsk_rt(t)->has_aux_tasks) { + aux_task_owner_decrease_priority(t); + } #ifdef CONFIG_LITMUS_NESTED_LOCKING } else { @@ -1224,16 +1346,23 @@ static void __decrease_priority_inheritance(struct task_struct* t, effective_priority(t)->comm, effective_priority(t)->pid, (prio_inh) ? prio_inh->comm : "nil", (prio_inh) ? prio_inh->pid : -1); + success = 0; } #endif + +out: + return success; } static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { + int success; + raw_spin_lock(&gsnedf_lock); - __decrease_priority_inheritance(t, prio_inh); - + + success = __decrease_priority_inheritance(t, prio_inh); + #ifdef CONFIG_LITMUS_SOFTIRQD if(tsk_rt(t)->cur_klitirqd != NULL) { @@ -1245,7 +1374,7 @@ static void decrease_priority_inheritance(struct task_struct* t, #endif raw_spin_unlock(&gsnedf_lock); - + #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) if(tsk_rt(t)->held_gpus) { int i; @@ -1828,6 +1957,8 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { .allocate_lock = gsnedf_allocate_lock, .increase_prio = increase_priority_inheritance, .decrease_prio = decrease_priority_inheritance, + .__increase_prio = __increase_priority_inheritance, + .__decrease_prio = __decrease_priority_inheritance, #endif #ifdef CONFIG_LITMUS_NESTED_LOCKING .nested_increase_prio = nested_increase_priority_inheritance, diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 245e41c25a5d..d24c9167cff8 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c @@ -137,6 +137,18 @@ static void litmus_dummy_increase_prio(struct task_struct* t, struct task_struct static void litmus_dummy_decrease_prio(struct task_struct* t, struct task_struct* prio_inh) { } + +static int litmus_dummy___increase_prio(struct task_struct* t, struct task_struct* prio_inh) +{ + TRACE_CUR("WARNING: Dummy litmus_dummy___increase_prio called!\n"); + return 0; +} + +static int litmus_dummy___decrease_prio(struct task_struct* t, struct task_struct* prio_inh) +{ + TRACE_CUR("WARNING: Dummy litmus_dummy___decrease_prio called!\n"); + return 0; +} #endif #ifdef CONFIG_LITMUS_SOFTIRQD @@ -227,6 +239,8 @@ struct sched_plugin linux_sched_plugin = { .allocate_lock = litmus_dummy_allocate_lock, .increase_prio = litmus_dummy_increase_prio, .decrease_prio = litmus_dummy_decrease_prio, + .__increase_prio = litmus_dummy___increase_prio, + .__decrease_prio = litmus_dummy___decrease_prio, #endif #ifdef CONFIG_LITMUS_NESTED_LOCKING .nested_increase_prio = litmus_dummy_nested_increase_prio, @@ -289,6 +303,8 @@ int register_sched_plugin(struct sched_plugin* plugin) CHECK(allocate_lock); CHECK(increase_prio); CHECK(decrease_prio); + CHECK(__increase_prio); + CHECK(__decrease_prio); #endif #ifdef CONFIG_LITMUS_NESTED_LOCKING CHECK(nested_increase_prio); -- cgit v1.2.2