From 4e8f9b7c2e9134ca31feb91dee3609a95df6de56 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Sun, 16 Sep 2012 17:44:37 -0400 Subject: Implement real-time aux threads. G-EDF only. --- litmus/aux_tasks.c | 243 +++++++++++++++++++++++++++---------------------- litmus/edf_common.c | 83 ++++++++++++----- litmus/litmus.c | 44 +++------ litmus/preempt.c | 25 ++++- litmus/sched_gsn_edf.c | 169 ++++++++++++++++------------------ litmus/sched_litmus.c | 4 +- litmus/sched_plugin.c | 22 ++++- 7 files changed, 330 insertions(+), 260 deletions(-) (limited to 'litmus') diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c index c197a95fc3a1..5057137bbbea 100644 --- a/litmus/aux_tasks.c +++ b/litmus/aux_tasks.c @@ -10,22 +10,37 @@ static int admit_aux_task(struct task_struct *t) { int retval = 0; struct task_struct *leader = t->group_leader; - + + + /* budget enforcement increments job numbers. job numbers are used in + * tie-breaking of aux_tasks. method helps ensure: + * 1) aux threads with no inherited priority can starve another (they share + * the CPUs equally. + * 2) aux threads that inherit the same priority cannot starve each other. + * + * Assuming aux threads are well-behavied (they do very little work and + * suspend), risk of starvation should not be an issue, but this is a + * fail-safe. + */ struct rt_task tp = { - .exec_cost = 0, - .period = MAGIC_AUX_TASK_PERIOD, - .relative_deadline = MAGIC_AUX_TASK_PERIOD, + //.period = MAGIC_AUX_TASK_PERIOD, + //.relative_deadline = MAGIC_AUX_TASK_PERIOD, + .period = 1000000, /* has to wait 1 ms before it can run again once it has exhausted budget */ + .relative_deadline = 1000000, + .exec_cost = 1000000, /* allow full utilization */ .phase = 0, .cpu = task_cpu(leader), /* take CPU of group leader */ - .budget_policy = NO_ENFORCEMENT, + //.budget_policy = NO_ENFORCEMENT, + .budget_policy = QUANTUM_ENFORCEMENT, + .budget_signal_policy = NO_SIGNALS, .cls = RT_CLASS_BEST_EFFORT }; - + struct sched_param param = { .sched_priority = 0}; - + tsk_rt(t)->task_params = tp; retval = sched_setscheduler_nocheck(t, SCHED_LITMUS, ¶m); - + return retval; } @@ -33,19 +48,19 @@ int exit_aux_task(struct task_struct *t) { int retval = 0; struct task_struct *leader = t->group_leader; - + BUG_ON(!tsk_rt(t)->is_aux_task); - + TRACE_CUR("Aux task %s/%d is exiting from %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); - + list_del(&tsk_rt(t)->aux_task_node); - + tsk_rt(t)->is_aux_task = 0; - + if (tsk_rt(t)->inh_task) { litmus->decrease_prio(t, NULL); } - + return retval; } @@ -53,34 +68,23 @@ static int aux_tasks_increase_priority(struct task_struct *leader, struct task_s { int retval = 0; struct list_head *pos; - + TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); - + list_for_each(pos, &tsk_aux(leader)->aux_tasks) { struct task_struct *aux = container_of(list_entry(pos, struct rt_param, aux_task_node), struct task_struct, rt_param); if (!is_realtime(aux)) { -#if 0 - /* currently can't do this here because of scheduler deadlock on itself */ - TRACE_CUR("aux_tasks_increase_priorityting aux task: %s/%d\n", aux->comm, aux->pid); - retval = admit_aux_task(aux); - - if (retval != 0) { - TRACE_CUR("failed to admit aux task %s/%d\n", aux->comm, aux->pid); - goto out; - } -#endif TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); } - + // aux tasks don't touch rt locks, so no nested call needed. TRACE_CUR("increasing %s/%d.\n", aux->comm, aux->pid); retval = litmus->__increase_prio(aux, hp); } - - //out: + return retval; } @@ -88,30 +92,15 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s { int retval = 0; struct list_head *pos; - + TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); - + list_for_each(pos, &tsk_aux(leader)->aux_tasks) { struct task_struct *aux = container_of(list_entry(pos, struct rt_param, aux_task_node), struct task_struct, rt_param); - + if (!is_realtime(aux)) { -#if 0 - /* currently can't do this here because of scheduler deadlock on itself */ - TRACE_CUR("aux_tasks_increase_priorityting aux task: %s/%d\n", aux->comm, aux->pid); - retval = admit_aux_task(aux); - - if (retval != 0) - goto out; - - if (hp) { - // aux tasks don't touch rt locks, so no nested call needed. - TRACE_CUR("decreasing (actually increasing) %s/%d.\n", aux->comm, aux->pid); - retval = litmus->__increase_prio(aux, hp); - } -#endif - TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); } else { @@ -119,8 +108,7 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s retval = litmus->__decrease_prio(aux, hp); } } - - //out: + return retval; } @@ -133,20 +121,20 @@ int aux_task_owner_increase_priority(struct task_struct *t) BUG_ON(!tsk_rt(t)->has_aux_tasks); BUG_ON(!is_realtime(t)); BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); - + leader = t->group_leader; - + TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid); hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), struct task_struct, rt_param); - + if (hp == t) { goto out; // already hp, nothing to do. } - + binheap_decrease(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); - + hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), struct task_struct, rt_param); @@ -155,7 +143,7 @@ int aux_task_owner_increase_priority(struct task_struct *t) retval = aux_tasks_increase_priority(leader, (tsk_rt(hp)->inh_task) ? tsk_rt(hp)->inh_task : hp); } - + out: return retval; } @@ -166,15 +154,15 @@ int aux_task_owner_decrease_priority(struct task_struct *t) struct task_struct *leader; struct task_struct *hp = NULL; struct task_struct *new_hp = NULL; - + BUG_ON(!tsk_rt(t)->has_aux_tasks); BUG_ON(!is_realtime(t)); BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); - + leader = t->group_leader; - + TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid); - + hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), struct task_struct, rt_param); binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); @@ -182,7 +170,7 @@ int aux_task_owner_decrease_priority(struct task_struct *t) struct rt_param, aux_task_owner_node); new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), struct task_struct, rt_param); - + if (hp == t && new_hp != t) { TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); retval = aux_tasks_decrease_priority(leader, @@ -204,28 +192,28 @@ long enable_aux_task_owner(struct task_struct *t) TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid); return -1; } - + BUG_ON(!is_realtime(t)); - + if (binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { TRACE_CUR("task %s/%d is already active\n", t->comm, t->pid); goto out; } - + binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node); - + hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), struct task_struct, rt_param); if (hp == t) { /* we're the new hp */ TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); - + retval = aux_tasks_increase_priority(leader, (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); } - + out: return retval; } @@ -236,42 +224,42 @@ long disable_aux_task_owner(struct task_struct *t) struct task_struct *leader = t->group_leader; struct task_struct *hp; struct task_struct *new_hp = NULL; - + if (!tsk_rt(t)->has_aux_tasks) { TRACE_CUR("task %s/%d is not an aux owner\n", t->comm, t->pid); return -1; } - + BUG_ON(!is_realtime(t)); - + if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { TRACE_CUR("task %s/%d is already not active\n", t->comm, t->pid); goto out; } - + TRACE_CUR("task %s/%d exiting from group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); - + hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), struct task_struct, rt_param); binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); - + if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), struct task_struct, rt_param); } - + if (hp == t && new_hp != t) { struct task_struct *to_inh = NULL; - + TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); - + if (new_hp) { to_inh = (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp; } - + retval = aux_tasks_decrease_priority(leader, to_inh); } - + out: return retval; } @@ -284,60 +272,47 @@ static int aux_task_owner_max_priority_order(struct binheap_node *a, struct task_struct, rt_param); struct task_struct *d_b = container_of(binheap_entry(b, struct rt_param, aux_task_owner_node), struct task_struct, rt_param); - + BUG_ON(!d_a); BUG_ON(!d_b); - + return litmus->compare(d_a, d_b); } -asmlinkage long sys_slave_non_rt_threads(void) +static long __do_enable_slave_non_rt_threads(void) { long retval = 0; struct task_struct *leader; struct task_struct *t; - read_lock_irq(&tasklist_lock); - leader = current->group_leader; - -#if 0 - t = leader; - do { - if (tsk_rt(t)->has_aux_tasks || tsk_rt(t)->is_aux_task) { - printk("slave_non_rt_tasks may only be called once per process.\n"); - retval = -EINVAL; - goto out_unlock; - } - } while (t != leader); -#endif - + if (!tsk_aux(leader)->initialized) { INIT_LIST_HEAD(&tsk_aux(leader)->aux_tasks); INIT_BINHEAP_HANDLE(&tsk_aux(leader)->aux_task_owners, aux_task_owner_max_priority_order); tsk_aux(leader)->initialized = 1; } - + t = leader; do { /* doesn't hurt to initialize them both */ INIT_LIST_HEAD(&tsk_rt(t)->aux_task_node); INIT_BINHEAP_NODE(&tsk_rt(t)->aux_task_owner_node); - + TRACE_CUR("Checking task in %s/%d: %s/%d = (p = %llu):\n", leader->comm, leader->pid, t->comm, t->pid, tsk_rt(t)->task_params.period); - + /* inspect heap_node to see if it is an rt task */ - if (tsk_rt(t)->task_params.period == 0 || - tsk_rt(t)->task_params.period == MAGIC_AUX_TASK_PERIOD) { + if (tsk_rt(t)->task_params.period == 0) { //|| + // tsk_rt(t)->task_params.period == MAGIC_AUX_TASK_PERIOD) { if (!tsk_rt(t)->is_aux_task) { TRACE_CUR("AUX task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); /* hasn't been aux_tasks_increase_priorityted into rt. make it a aux. */ tsk_rt(t)->is_aux_task = 1; list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks); - + (void)admit_aux_task(t); } else { @@ -348,10 +323,6 @@ asmlinkage long sys_slave_non_rt_threads(void) if (!tsk_rt(t)->has_aux_tasks) { TRACE_CUR("task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); tsk_rt(t)->has_aux_tasks = 1; - if (is_realtime(t)) { - binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, - struct rt_param, aux_task_owner_node); - } } else { TRACE_CUR("task in %s/%d is already set up: %s/%d\n", leader->comm, leader->pid, t->comm, t->pid); @@ -361,16 +332,72 @@ asmlinkage long sys_slave_non_rt_threads(void) t = next_thread(t); } while(t != leader); - + if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { struct task_struct *hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), struct task_struct, rt_param); TRACE_CUR("found hp in group: %s/%d\n", hp->comm, hp->pid); retval = aux_tasks_increase_priority(leader, - (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); + (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); + } + + return retval; +} + +static long __do_disable_slave_non_rt_threads(void) +{ + long retval = 0; + struct task_struct *leader; + struct task_struct *t; + + leader = current->group_leader; + + t = leader; + do { + if (tsk_rt(t)->is_aux_task) { + + TRACE_CUR("%s/%d is an aux task.\n", t->comm, t->pid); + + if (is_realtime(t)) { + long temp_retval; + struct sched_param param = { .sched_priority = 0}; + + TRACE_CUR("%s/%d is real-time. Changing policy to SCHED_NORMAL.\n", t->comm, t->pid); + + temp_retval = sched_setscheduler_nocheck(t, SCHED_NORMAL, ¶m); + + if (temp_retval != 0) { + TRACE_CUR("error changing policy of %s/%d to SCHED_NORMAL\n", t->comm, t->pid); + if (retval == 0) { + retval = temp_retval; + } + else { + TRACE_CUR("prior error (%d) masks new error (%d)\n", retval, temp_retval); + } + } + } + + tsk_rt(t)->is_aux_task = 0; + } + t = next_thread(t); + } while(t != leader); + + return retval; +} + +asmlinkage long sys_slave_non_rt_threads(int enable) +{ + long retval; + + read_lock_irq(&tasklist_lock); + + if (enable) { + retval = __do_enable_slave_non_rt_threads(); + } + else { + retval = __do_disable_slave_non_rt_threads(); } - //out_unlock: read_unlock_irq(&tasklist_lock); return retval; @@ -378,7 +405,7 @@ asmlinkage long sys_slave_non_rt_threads(void) #else -asmlinkage long sys_slave_non_rt_tasks(void) +asmlinkage long sys_slave_non_rt_tasks(int enable) { printk("Unsupported. Recompile with CONFIG_LITMUS_LOCKING.\n"); return -EINVAL; diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 9b439299e5fc..ca06f6ec103e 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c @@ -22,7 +22,7 @@ #include #endif -#ifdef CONFIG_EDF_TIE_BREAK_HASH +//#ifdef CONFIG_EDF_TIE_BREAK_HASH #include static inline long edf_hash(struct task_struct *t) { @@ -41,7 +41,22 @@ static inline long edf_hash(struct task_struct *t) */ return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32); } -#endif +//#endif + +int aux_tie_break(struct task_struct *first, struct task_struct *second) +{ + long fhash = edf_hash(first); + long shash = edf_hash(second); + if (fhash < shash) { + TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, 1); + return 1; + } + else if(fhash == shash) { + TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, (first->pid < second->pid)); + return first->pid < second->pid; + } + return 0; +} /* edf_higher_prio - returns true if first has a higher EDF priority @@ -60,6 +75,11 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) struct task_struct *first_task = first; struct task_struct *second_task = second; + int first_lo_aux; + int second_lo_aux; + int first_hi_aux; + int second_hi_aux; + /* There is no point in comparing a task to itself. */ if (first && first == second) { TRACE_CUR("WARNING: pointless edf priority comparison: %s/%d\n", first->comm, first->pid); @@ -74,23 +94,34 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) } #ifdef CONFIG_LITMUS_LOCKING - /* aux threads with no inheritance have lowest priority; however, do a PID - * tie break if both threads are aux threads with no inheritance. - */ - if (unlikely(first->rt_param.is_aux_task && !first->rt_param.inh_task)) { - if (second->rt_param.is_aux_task && !second->rt_param.inh_task) { - /* pid break */ - if (first->pid < second->pid) { - return 1; - } - } + + first_lo_aux = first->rt_param.is_aux_task && !first->rt_param.inh_task; + second_lo_aux = second->rt_param.is_aux_task && !second->rt_param.inh_task; + + if (first_lo_aux && !second_lo_aux) { + TRACE_CUR("%s/%d >> %s/%d --- 0\n", first->comm, first->pid, second->comm, second->pid); return 0; } - if (unlikely(second->rt_param.is_aux_task && !second->rt_param.inh_task)) { - /* no need for pid break -- case already tested */ + else if (second_lo_aux && !first_lo_aux) { + TRACE_CUR("%s/%d >> %s/%d --- 1\n", first->comm, first->pid, second->comm, second->pid); return 1; } - + else if (first_lo_aux && second_lo_aux) { + int aux_lo_tie_break = aux_tie_break(first, second); + TRACE_CUR("low aux tie break: %s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, aux_lo_tie_break); + return aux_lo_tie_break; + } + + first_hi_aux = first->rt_param.is_aux_task && first->rt_param.inh_task; + second_hi_aux = second->rt_param.is_aux_task && second->rt_param.inh_task; + + if (first_hi_aux && second_hi_aux && first->rt_param.inh_task == second->rt_param.inh_task) { + int aux_hi_tie_break = aux_tie_break(first, second); + TRACE_CUR("hi aux tie break: %s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, aux_hi_tie_break); + return aux_hi_tie_break; + } + + /* Check for EFFECTIVE priorities. Change task * used for comparison in such a case. */ @@ -149,7 +180,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) */ if (get_lateness(first_task) > get_lateness(second_task)) { return 1; - } + } pid_break = (get_lateness(first_task) == get_lateness(second_task)); @@ -171,8 +202,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) return 1; } pid_break = _eq(fnorm, snorm); - - + + #elif defined(CONFIG_EDF_TIE_BREAK_HASH) /* Tie break by comparing hashs of (pid, job#) tuple. There should be * a 50% chance that first_task has a higher priority than second_task. @@ -184,8 +215,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) } pid_break = (fhash == shash); #else - - + + /* CONFIG_EDF_PID_TIE_BREAK */ pid_break = 1; // fall through to tie-break by pid; #endif @@ -197,11 +228,17 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) } else if (first_task->pid == second_task->pid) { #ifdef CONFIG_LITMUS_SOFTIRQD - if (first_task->rt_param.is_proxy_thread < + if (first_task->rt_param.is_proxy_thread < second_task->rt_param.is_proxy_thread) { return 1; } #endif + if (tsk_rt(first)->is_aux_task < tsk_rt(second)->is_aux_task) { + TRACE_CUR("AUX BREAK!\n"); + return 1; + } + + /* Something could be wrong if you get this far. */ if (unlikely(first->rt_param.inh_task == second->rt_param.inh_task)) { @@ -220,8 +257,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) BUG_ON(!first->rt_param.inh_task && !second->rt_param.inh_task); - /* The task with the inherited priority wins. */ - if (!second->rt_param.inh_task) { + /* The task withOUT the inherited priority wins. */ + if (second->rt_param.inh_task) { /* * common with aux tasks. TRACE_CUR("unusual comparison: " diff --git a/litmus/litmus.c b/litmus/litmus.c index 1b4182ac3337..e2bf2a7ad01b 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -338,7 +338,7 @@ void init_gpu_affinity_state(struct task_struct* p) //p->rt_param.gpu_fb_param_a = _frac(14008, 10000); //p->rt_param.gpu_fb_param_b = _frac(16024, 10000); -#if 0 +#if 0 // emperical; p->rt_param.gpu_fb_param_a[0] = _frac(7550, 10000); p->rt_param.gpu_fb_param_b[0] = _frac(45800, 10000); @@ -362,13 +362,13 @@ static void reinit_litmus_state(struct task_struct* p, int restore) { struct rt_task user_config = {}; void* ctrl_page = NULL; - + #ifdef CONFIG_LITMUS_NESTED_LOCKING binheap_order_t prio_order = NULL; #endif TRACE_TASK(p, "reinit_litmus_state: restore = %d\n", restore); - + if (restore) { /* Safe user-space provided configuration data. * and allocated page. */ @@ -419,7 +419,7 @@ static void reinit_litmus_state(struct task_struct* p, int restore) if (!restore) memset(&p->aux_data, 0, sizeof(p->aux_data)); #endif - + /* Restore preserved fields. */ if (restore) { p->rt_param.task_params = user_config; @@ -437,11 +437,8 @@ static void reinit_litmus_state(struct task_struct* p, int restore) } -#ifdef CONFIG_LITMUS_LOCKING -long __litmus_admit_task(struct task_struct* tsk, int clear_aux) -#else + long __litmus_admit_task(struct task_struct* tsk) -#endif { long retval = 0; unsigned long flags; @@ -486,14 +483,6 @@ long __litmus_admit_task(struct task_struct* tsk) atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); #endif -#ifdef CONFIG_LITMUS_LOCKING - /* turns out our aux thread isn't really an aux thread. */ - if (clear_aux && tsk_rt(tsk)->is_aux_task) { - exit_aux_task(tsk); - tsk_rt(tsk)->has_aux_tasks = 1; - } -#endif - retval = litmus->admit_task(tsk); if (!retval) { @@ -511,7 +500,7 @@ out_unlock: long litmus_admit_task(struct task_struct* tsk) { long retval = 0; - + BUG_ON(is_realtime(tsk)); if (get_rt_relative_deadline(tsk) == 0 || @@ -533,12 +522,8 @@ long litmus_admit_task(struct task_struct* tsk) goto out; } -#ifdef CONFIG_LITMUS_LOCKING - retval = __litmus_admit_task(tsk, (tsk_rt(tsk)->task_params.period != MAGIC_AUX_TASK_PERIOD)); -#else retval = __litmus_admit_task(tsk); -#endif - + out: return retval; } @@ -624,18 +609,21 @@ out: */ void litmus_fork(struct task_struct* p) { - reinit_litmus_state(p, 0); - if (is_realtime(p)) { TRACE_TASK(p, "fork, is real-time\n"); + /* clean out any litmus related state, don't preserve anything */ - //reinit_litmus_state(p, 0); + reinit_litmus_state(p, 0); + /* Don't let the child be a real-time task. */ p->sched_reset_on_fork = 1; + } else { /* non-rt tasks might have ctrl_page set */ tsk_rt(p)->ctrl_page = NULL; - + + reinit_litmus_state(p, 0); + /* still don't inherit any parental parameters */ //memset(&p->rt_param, 0, sizeof(p->rt_param)); //memset(&p->aux_data, 0, sizeof(p->aux_data)); @@ -736,10 +724,6 @@ static int __init _init_litmus(void) init_topology(); #endif -#ifdef CONFIG_LITMUS_NVIDIA - //init_nvidia_info(); -#endif - return 0; } diff --git a/litmus/preempt.c b/litmus/preempt.c index a2cae3648e15..c9ccc80c1df9 100644 --- a/litmus/preempt.c +++ b/litmus/preempt.c @@ -74,25 +74,37 @@ void litmus_reschedule(int cpu) * is not aware of the need to reschedule at this point. */ /* is a context switch in progress? */ - if (cpu_is_in_sched_state(cpu, TASK_PICKED)) + if (cpu_is_in_sched_state(cpu, TASK_PICKED)) { picked_transition_ok = sched_state_transition_on( cpu, TASK_PICKED, PICKED_WRONG_TASK); + TRACE_CUR("cpu %d: picked_transition_ok = %d\n", cpu, picked_transition_ok); + } + else { + TRACE_CUR("cpu %d: picked_transition_ok = 0 (static)\n", cpu); + } + if (!picked_transition_ok && cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) { /* We either raced with the end of the context switch, or the * CPU was in TASK_SCHEDULED anyway. */ scheduled_transition_ok = sched_state_transition_on( cpu, TASK_SCHEDULED, SHOULD_SCHEDULE); + TRACE_CUR("cpu %d: scheduled_transition_ok = %d\n", cpu, scheduled_transition_ok); + } + else { + TRACE_CUR("cpu %d: scheduled_transition_ok = 0 (static)\n", cpu); } /* If the CPU was in state TASK_SCHEDULED, then we need to cause the * scheduler to be invoked. */ if (scheduled_transition_ok) { - if (smp_processor_id() == cpu) + if (smp_processor_id() == cpu) { set_tsk_need_resched(current); - else + } + else { smp_send_reschedule(cpu); + } } TRACE_STATE("%s picked-ok:%d sched-ok:%d\n", @@ -103,11 +115,16 @@ void litmus_reschedule(int cpu) void litmus_reschedule_local(void) { - if (is_in_sched_state(TASK_PICKED)) + if (is_in_sched_state(TASK_PICKED)) { set_sched_state(PICKED_WRONG_TASK); + + TRACE_CUR("cpu %d: transitioned to PICKED_WRONG_TASK\n", smp_processor_id()); + } else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) { set_sched_state(WILL_SCHEDULE); set_tsk_need_resched(current); + + TRACE_CUR("cpu %d: transitioned to WILL_SCHEDULE\n", smp_processor_id()); } } diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 5b8ca6698423..270e06c20bbf 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -167,6 +167,7 @@ struct tasklet_head gsnedf_pending_tasklets; * TRACE() log. #define WANT_ALL_SCHED_EVENTS */ +//#define WANT_ALL_SCHED_EVENTS static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) { @@ -209,8 +210,17 @@ static noinline void link_task_to_cpu(struct task_struct* linked, struct task_struct* tmp; int on_cpu; + //int print = (linked != NULL || entry->linked != NULL); + BUG_ON(linked && !is_realtime(linked)); + /* + if (print) { + TRACE_CUR("linked = %s/%d\n", (linked) ? linked->comm : "(nil)", (linked)? linked->pid : 0); + TRACE_CUR("entry->linked = %s/%d\n", (entry->linked) ? entry->linked->comm : "(nil)", (entry->linked)? entry->linked->pid : 0); + } + */ + /* Currently linked task is set to be unlinked. */ if (entry->linked) { entry->linked->rt_param.linked_on = NO_CPU; @@ -246,12 +256,18 @@ static noinline void link_task_to_cpu(struct task_struct* linked, linked->rt_param.linked_on = entry->cpu; } entry->linked = linked; -#ifdef WANT_ALL_SCHED_EVENTS - if (linked) - TRACE_TASK(linked, "linked to %d.\n", entry->cpu); - else - TRACE("NULL linked to %d.\n", entry->cpu); -#endif + + /* + if (print) { + //#ifdef WANT_ALL_SCHED_EVENTS + if (linked) + TRACE_TASK(linked, "linked to %d.\n", entry->cpu); + else + TRACE("NULL linked to %d.\n", entry->cpu); + //#endif + } + */ + update_cpu_position(entry); } @@ -297,36 +313,19 @@ static noinline void requeue(struct task_struct* task) BUG_ON(is_queued(task)); if (is_released(task, litmus_clock())) { - + if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) { /* aux_task probably transitioned to real-time while it was blocked */ TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid); + unlink(task); /* really needed? */ } else { __add_ready(&gsnedf, task); - -#if 0 - if (tsk_rt(task)->has_aux_tasks) { - - TRACE_CUR("%s/%d is ready and has aux tasks.\n", task->comm, task->pid); - /* allow it's prio inheritance to act on aux threads */ - enable_aux_task_owner(task); - } -#endif } } else { /* it has got to wait */ add_release(&gsnedf, task); - -#if 0 - if (tsk_rt(task)->has_aux_tasks) { - - TRACE_CUR("%s/%d is waiting for release and has aux tasks.\n", task->comm, task->pid); - /* prevent prio inheritance from acting while it's not ready */ - disable_aux_task_owner(task); - } -#endif } } @@ -368,7 +367,8 @@ static void check_for_preemptions(void) &per_cpu(gsnedf_cpu_entries, task_cpu(task))); if (affinity) last = affinity; - else if (requeue_preempted_job(last->linked)) + + if (requeue_preempted_job(last->linked)) requeue(last->linked); } #else @@ -393,45 +393,11 @@ static noinline void gsnedf_job_arrival(struct task_struct* task) static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) { unsigned long flags; - //struct bheap_node* node; raw_spin_lock_irqsave(&gsnedf_lock, flags); -#if 0 - node = tasks->head; - while(node) { - struct task_struct *task = bheap2task(node); - - if (tsk_rt(task)->has_aux_tasks) { - - TRACE_CUR("%s/%d is ready and has aux tasks.\n", task->comm, task->pid); - - /* allow it's prio inheritance to act on aux threads */ - enable_aux_task_owner(task); - } - - /* pre-order sub-tree traversal */ - if (node->child) { - /* go down */ - node = node->child; - } - else if(node->parent && node->parent->next) { - /* go up a level and across */ - node = node->parent->next; - } - else if(!node->parent && node->next) { - /* go to the next binomial tree */ - node = node->next; - } - else { - /* the end! */ - node = NULL; - } - } -#endif - __merge_ready(rt, tasks); - + check_for_preemptions(); raw_spin_unlock_irqrestore(&gsnedf_lock, flags); @@ -449,12 +415,12 @@ static noinline void job_completion(struct task_struct *t, int forced) #endif TRACE_TASK(t, "job_completion().\n"); - + /* set flags */ set_rt_flags(t, RT_F_SLEEP); /* prepare for next period */ prepare_for_next_period(t); - + if (is_released(t, litmus_clock())) sched_trace_task_release(t); /* unlink */ @@ -497,6 +463,10 @@ static void gsnedf_tick(struct task_struct* t) } } } + + if(is_realtime(t)) { + TRACE_TASK(t, "tick %llu\n", litmus_clock()); + } } @@ -838,6 +808,8 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) int out_of_time, signal_budget, sleep, preempt, np, exists, blocks; struct task_struct* next = NULL; + //int completion = 0; + #ifdef CONFIG_RELEASE_MASTER /* Bail out early if we are the release master. * The release master never schedules any real-time tasks. @@ -873,22 +845,22 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); #endif - /* - if (exists) + if (exists) { TRACE_TASK(prev, "blocks:%d out_of_time:%d signal_budget: %d np:%d sleep:%d preempt:%d " "state:%d sig:%d\n", blocks, out_of_time, signal_budget, np, sleep, preempt, prev->state, signal_pending(prev)); - */ + } if (entry->linked && preempt) TRACE_TASK(prev, "will be preempted by %s/%d\n", entry->linked->comm, entry->linked->pid); /* Send the signal that the budget has been exhausted */ - if (signal_budget) + if (signal_budget) { send_sigbudget(entry->scheduled); + } /* If a task blocks we have no choice but to reschedule. */ @@ -919,8 +891,10 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) * this. Don't do a job completion if we block (can't have timers running * for blocked jobs). */ - if (!np && (out_of_time || sleep) && !blocks) + if (!np && (out_of_time || sleep) && !blocks) { job_completion(entry->scheduled, !sleep); + //completion = 1; + } /* Link pending task if we became unlinked. */ @@ -953,8 +927,21 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) next = prev; } +#if 0 + if (completion) { + TRACE_CUR("switching away from a completion\n"); + } +#endif + sched_state_task_picked(); +#if 0 + if (next && is_realtime(next) && tsk_rt(next)->is_aux_task && !tsk_rt(next)->inh_task) { + TRACE_TASK(next, "is aux with no inheritance. preventing it from actually running.\n"); + next = NULL; + } +#endif + raw_spin_unlock(&gsnedf_lock); #ifdef WANT_ALL_SCHED_EVENTS @@ -965,7 +952,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) else if (exists && !next) TRACE("becomes idle at %llu.\n", litmus_clock()); #endif - + return next; } @@ -991,7 +978,7 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) unsigned long flags; cpu_entry_t* entry; - TRACE("gsn edf: task new %d\n", t->pid); + TRACE("gsn edf: task new = %d on_rq = %d running = %d\n", t->pid, on_rq, running); raw_spin_lock_irqsave(&gsnedf_lock, flags); @@ -1060,11 +1047,11 @@ static void gsnedf_task_wake_up(struct task_struct *task) #endif if (tsk_rt(task)->has_aux_tasks) { - + TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); disable_aux_task_owner(task); } - + gsnedf_job_arrival(task); raw_spin_unlock_irqrestore(&gsnedf_lock, flags); } @@ -1081,11 +1068,11 @@ static void gsnedf_task_block(struct task_struct *t) unlink(t); if (tsk_rt(t)->has_aux_tasks) { - + TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); enable_aux_task_owner(t); } - + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); BUG_ON(!is_realtime(t)); @@ -1105,17 +1092,17 @@ static void gsnedf_task_exit(struct task_struct * t) exit_aux_task(t); /* cannot be called with gsnedf_lock held */ } #endif - + /* unlink if necessary */ raw_spin_lock_irqsave(&gsnedf_lock, flags); - + #ifdef CONFIG_LITMUS_LOCKING /* make sure we clean up on our way out */ if(tsk_rt(t)->has_aux_tasks) { disable_aux_task_owner(t); /* must be called witl gsnedf_lock held */ } #endif - + unlink(t); if (tsk_rt(t)->scheduled_on != NO_CPU) { gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; @@ -1161,7 +1148,7 @@ static int __increase_priority_inheritance(struct task_struct* t, prio_inh->comm, prio_inh->pid); goto out; } - + #ifdef CONFIG_LITMUS_NESTED_LOCKING /* this sanity check allows for weaker locking in protocols */ /* TODO (klitirqd): Skip this check if 't' is a proxy thread (???) */ @@ -1221,8 +1208,8 @@ static int __increase_priority_inheritance(struct task_struct* t, &gsnedf.ready_queue); check_for_preemptions(); } - - + + /* propagate to aux tasks */ if (tsk_rt(t)->has_aux_tasks) { aux_task_owner_increase_priority(t); @@ -1242,7 +1229,7 @@ static int __increase_priority_inheritance(struct task_struct* t, success = 0; } #endif - + out: return success; } @@ -1251,7 +1238,7 @@ out: static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { int success; - + raw_spin_lock(&gsnedf_lock); success = __increase_priority_inheritance(t, prio_inh); @@ -1267,7 +1254,7 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str #endif raw_spin_unlock(&gsnedf_lock); - + #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) if(tsk_rt(t)->held_gpus) { int i; @@ -1286,7 +1273,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { int success = 1; - + if (prio_inh == tsk_rt(t)->inh_task) { /* relationship already established. */ TRACE_TASK(t, "already inherits priority from %s/%d\n", @@ -1294,7 +1281,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, (prio_inh) ? prio_inh->pid : 0); goto out; } - + #ifdef CONFIG_LITMUS_NESTED_LOCKING if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { #endif @@ -1331,7 +1318,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, } raw_spin_unlock(&gsnedf.release_lock); } - + /* propagate to aux tasks */ if (tsk_rt(t)->has_aux_tasks) { aux_task_owner_decrease_priority(t); @@ -1349,7 +1336,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, success = 0; } #endif - + out: return success; } @@ -1358,11 +1345,11 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) { int success; - + raw_spin_lock(&gsnedf_lock); - + success = __decrease_priority_inheritance(t, prio_inh); - + #ifdef CONFIG_LITMUS_SOFTIRQD if(tsk_rt(t)->cur_klitirqd != NULL) { @@ -1374,7 +1361,7 @@ static void decrease_priority_inheritance(struct task_struct* t, #endif raw_spin_unlock(&gsnedf_lock); - + #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) if(tsk_rt(t)->held_gpus) { int i; diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 9a6fe487718e..62854b576796 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c @@ -177,8 +177,10 @@ static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, litmus->task_wake_up(p); rq->litmus.nr_running++; - } else + } else { TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n"); + //WARN_ON(1); + } } static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index d24c9167cff8..f9423861eb1f 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c @@ -31,11 +31,19 @@ void preempt_if_preemptable(struct task_struct* t, int cpu) int reschedule = 0; - if (!t) + TRACE_CUR("preempt_if_preemptable: %s/%d\n", + (t) ? t->comm : "(nil)", + (t) ? t->pid : 0); + + if (!t) { + TRACE_CUR("unconditionally reshcedule\n"); /* move non-real-time task out of the way */ reschedule = 1; + } else { if (smp_processor_id() == cpu) { + TRACE_CUR("preempt local cpu.\n"); + /* local CPU case */ /* check if we need to poke userspace */ if (is_user_np(t)) @@ -47,14 +55,22 @@ void preempt_if_preemptable(struct task_struct* t, int cpu) * currently-executing task */ reschedule = 1; } else { + int is_knp = is_kernel_np(t); + int reqexit = request_exit_np_atomic(t); + TRACE_CUR("preempt remote cpu: isknp = %d reqexit = %d\n", is_knp, reqexit); + /* Remote CPU case. Only notify if it's not a kernel * NP section and if we didn't set the userspace * flag. */ - reschedule = !(is_kernel_np(t) || request_exit_np_atomic(t)); + //reschedule = !(is_kernel_np(t) || request_exit_np_atomic(t)); + reschedule = !(is_knp || reqexit); } } - if (likely(reschedule)) + + if (likely(reschedule)) { + TRACE_CUR("calling litmus_reschedule()\n"); litmus_reschedule(cpu); + } } -- cgit v1.2.2