From d6ddecb0d2cee3880a2785c2b4345336855dc6e5 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Wed, 20 Feb 2013 09:59:45 -0500 Subject: Minor fixes and cleanup. --- include/litmus/locking.h | 6 ++-- litmus/aux_tasks.c | 16 +++++----- litmus/edf_common.c | 10 +++--- litmus/fifo_lock.c | 7 +++-- litmus/gpu_affinity.c | 79 +++++++----------------------------------------- litmus/jobs.c | 2 ++ litmus/litmus.c | 11 +++++-- litmus/locking.c | 79 +++++++++++++++++++++++++++--------------------- litmus/sched_cedf.c | 18 ++++++----- 9 files changed, 97 insertions(+), 131 deletions(-) diff --git a/include/litmus/locking.h b/include/litmus/locking.h index b9c6a2b1d01e..02cc9cf4bb55 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h @@ -153,11 +153,11 @@ struct litmus_lock_ops { /* all flags at the end */ #ifdef CONFIG_LITMUS_NESTED_LOCKING - int supports_nesting:1; + unsigned int supports_nesting:1; #endif #ifdef CONFIG_LITMUS_DGL_SUPPORT - int supports_dgl:1; - int requires_atomic_dgl:1; + unsigned int supports_dgl:1; + unsigned int requires_atomic_dgl:1; #endif }; diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c index 5aa9f7634fbf..a9fa9df5ef8b 100644 --- a/litmus/aux_tasks.c +++ b/litmus/aux_tasks.c @@ -74,7 +74,7 @@ static int aux_tasks_increase_priority(struct task_struct *leader, struct task_s struct list_head *pos; - TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); +// TRACE_CUR("Increasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); list_for_each(pos, &tsk_aux(leader)->aux_tasks) { struct task_struct *aux = @@ -90,7 +90,7 @@ static int aux_tasks_increase_priority(struct task_struct *leader, struct task_s } else { // aux tasks don't touch rt locks, so no nested call needed. - TRACE_CUR("increasing %s/%d.\n", aux->comm, aux->pid); +// TRACE_CUR("increasing %s/%d.\n", aux->comm, aux->pid); retval = litmus->__increase_prio(aux, hp); } } @@ -104,7 +104,7 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s struct list_head *pos; - TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); +// TRACE_CUR("Decreasing priority of aux tasks in group %s/%d.\n", leader->comm, leader->pid); list_for_each(pos, &tsk_aux(leader)->aux_tasks) { struct task_struct *aux = @@ -115,7 +115,7 @@ static int aux_tasks_decrease_priority(struct task_struct *leader, struct task_s TRACE_CUR("skipping non-real-time aux task %s/%d\n", aux->comm, aux->pid); } else { - TRACE_CUR("decreasing %s/%d.\n", aux->comm, aux->pid); +// TRACE_CUR("decreasing %s/%d.\n", aux->comm, aux->pid); retval = litmus->__decrease_prio(aux, hp); } } @@ -147,7 +147,7 @@ int aux_task_owner_increase_priority(struct task_struct *t) goto out; } - TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid); +// TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid); hp = container_of( binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), @@ -171,7 +171,7 @@ int aux_task_owner_increase_priority(struct task_struct *t) /* check if the eff. prio. of hp has changed */ if (increase_aux || (effective_priority(hp) != hp_eff)) { hp_eff = effective_priority(hp); - TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); +// TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); retval = aux_tasks_increase_priority(leader, hp_eff); } out: @@ -201,7 +201,7 @@ int aux_task_owner_decrease_priority(struct task_struct *t) goto out; } - TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid); +// TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid); hp = container_of( binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), @@ -219,7 +219,7 @@ int aux_task_owner_decrease_priority(struct task_struct *t) /* if the new_hp is still t, or if the effective priority has changed */ if ((new_hp == t) || (effective_priority(new_hp) != hp_eff)) { hp_eff = effective_priority(new_hp); - TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); +// TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); retval = aux_tasks_decrease_priority(leader, hp_eff); } } diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 32ee5f464ef8..52ccac998142 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c @@ -108,12 +108,11 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) // one of these is an aux task without inheritance. if (first_lo_aux != second_lo_aux) { int temp = (first_lo_aux < second_lo_aux); // non-lo-aux has higher priority. - TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, temp); return temp; } else { /* both MUST be lo_aux. tie-break. */ - TRACE_CUR("aux tie break!\n"); + //TRACE_CUR("aux tie break!\n"); goto aux_tie_break; } } @@ -123,7 +122,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) // inh_task is !NULL for both tasks since neither was a lo_aux task. // Both aux tasks inherit from the same task, so tie-break // by base priority of the aux tasks. - TRACE_CUR("aux tie break!\n"); + //TRACE_CUR("aux tie break!\n"); goto aux_tie_break; } } @@ -139,12 +138,11 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) // one of these is an klmirqd thread without inheritance. if (first_lo_klmirqd != second_lo_klmirqd) { int temp = (first_lo_klmirqd < second_lo_klmirqd); // non-klmirqd has higher priority - TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, temp); return temp; } else { /* both MUST be klmirqd. tie-break. */ - TRACE_CUR("klmirqd tie break!\n"); + //TRACE_CUR("klmirqd tie break!\n"); goto klmirqd_tie_break; } } @@ -154,7 +152,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) // inh_task is !NULL for both tasks since neither was a lo_klmirqd task. // Both klmirqd tasks inherit from the same task, so tie-break // by base priority of the klmirqd tasks. - TRACE_CUR("klmirqd tie break!\n"); + //TRACE_CUR("klmirqd tie break!\n"); goto klmirqd_tie_break; } } diff --git a/litmus/fifo_lock.c b/litmus/fifo_lock.c index dfe56bface6d..b3e956f5a93a 100644 --- a/litmus/fifo_lock.c +++ b/litmus/fifo_lock.c @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -416,6 +417,10 @@ int fifo_mutex_unlock(struct litmus_lock* l) #ifdef CONFIG_LITMUS_DGL_SUPPORT if(dgl_wait) { + // we normally do this tracing in locking.c, but that code + // doesn't have visibility into this hand-off. + sched_trace_lock(dgl_wait->task, l->ident, 1); + select_next_lock_if_primary(l, dgl_wait); --(dgl_wait->nr_remaining); wake_up_task = (dgl_wait->nr_remaining == 0); @@ -504,8 +509,6 @@ out: #endif unlock_global_irqrestore(dgl_lock, flags); - TRACE_TASK(t, "-- Freed lock %d --\n", l->ident); - return err; } diff --git a/litmus/gpu_affinity.c b/litmus/gpu_affinity.c index 9e421ce9efc2..f4bfb1a67097 100644 --- a/litmus/gpu_affinity.c +++ b/litmus/gpu_affinity.c @@ -20,6 +20,7 @@ #define MIN(a, b) ((a < b) ? a : b) #if 0 +/* PID feedback controller */ static fp_t update_estimate(feedback_est_t* fb, fp_t a, fp_t b, lt_t observed) { fp_t relative_err; @@ -78,14 +79,22 @@ lt_t isqrt(lt_t n) void update_gpu_estimate(struct task_struct *t, lt_t observed) { - //feedback_est_t *fb = &(tsk_rt(t)->gpu_migration_est[tsk_rt(t)->gpu_migration]); avg_est_t *est; - struct migration_info mig_info; + BUG_ON(tsk_rt(t)->gpu_migration > MIG_LAST); est = &(tsk_rt(t)->gpu_migration_est[tsk_rt(t)->gpu_migration]); + { + /* log the migration event */ + struct migration_info mig_info; + mig_info.observed = observed; + mig_info.estimated = est->avg; + mig_info.distance = tsk_rt(t)->gpu_migration; + sched_trace_migration(t, &mig_info); + } + if (unlikely(observed > OBSERVATION_CAP)) { TRACE_TASK(t, "Crazy observation greater than was dropped: %llu > %llu\n", observed, @@ -93,22 +102,6 @@ void update_gpu_estimate(struct task_struct *t, lt_t observed) return; } -#if 0 - // filter out values that are HI_THRESHOLDx or (1/LO_THRESHOLD)x out - // of range of the average, but only filter if enough samples - // have been taken. - if (likely((est->count > MIN(10, AVG_EST_WINDOW_SIZE/2)))) { - if (unlikely(observed < est->avg/LO_THRESHOLD)) { - TRACE_TASK(t, "Observation is too small: %llu\n", - observed); - return; - } - else if (unlikely(observed > est->avg*HI_THRESHOLD)) { - TRACE_TASK(t, "Observation is too large: %llu\n", - observed); - return; - } -#endif // filter values outside NUM_STDEVx the standard deviation, // but only filter if enough samples have been taken. if (likely((est->count > MIN(10, AVG_EST_WINDOW_SIZE/2)))) { @@ -129,8 +122,6 @@ void update_gpu_estimate(struct task_struct *t, lt_t observed) } } - - if (unlikely(est->count < AVG_EST_WINDOW_SIZE)) { ++est->count; } @@ -138,60 +129,12 @@ void update_gpu_estimate(struct task_struct *t, lt_t observed) est->sum -= est->history[est->idx]; } - mig_info.observed = observed; - mig_info.estimated = est->avg; - mig_info.distance = tsk_rt(t)->gpu_migration; - sched_trace_migration(t, &mig_info); - est->history[est->idx] = observed; est->sum += observed; est->avg = est->sum/est->count; est->std = isqrt(varience(est->history, est->avg, est->count)); est->idx = (est->idx + 1) % AVG_EST_WINDOW_SIZE; - -#if 0 - if(unlikely(fb->est.val == 0)) { - // kludge-- cap observed values to prevent whacky estimations. - // whacky stuff happens during the first few jobs. - if(unlikely(observed > OBSERVATION_CAP)) { - TRACE_TASK(t, "Crazy observation was capped: %llu -> %llu\n", - observed, OBSERVATION_CAP); - observed = OBSERVATION_CAP; - } - - // take the first observation as our estimate - // (initial value of 0 was bogus anyhow) - fb->est = _integer_to_fp(observed); - fb->accum_err = _div(fb->est, _integer_to_fp(2)); // ...seems to work. - } - else { - fp_t rel_err = update_estimate(fb, - tsk_rt(t)->gpu_fb_param_a[tsk_rt(t)->gpu_migration], - tsk_rt(t)->gpu_fb_param_b[tsk_rt(t)->gpu_migration], - observed); - - if(unlikely(_fp_to_integer(fb->est) <= 0)) { - TRACE_TASK(t, "Invalid estimate. Patching.\n"); - fb->est = _integer_to_fp(observed); - fb->accum_err = _div(fb->est, _integer_to_fp(2)); // ...seems to work. - } - else { - struct migration_info mig_info; - - sched_trace_prediction_err(t, - &(tsk_rt(t)->gpu_migration), - &rel_err); - - mig_info.observed = observed; - mig_info.estimated = get_gpu_estimate(t, tsk_rt(t)->gpu_migration); - mig_info.distance = tsk_rt(t)->gpu_migration; - - sched_trace_migration(t, &mig_info); - } - } -#endif - TRACE_TASK(t, "GPU est update after (dist = %d, obs = %llu): %llu\n", tsk_rt(t)->gpu_migration, observed, diff --git a/litmus/jobs.c b/litmus/jobs.c index bdfc41004d38..659625433867 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c @@ -28,6 +28,8 @@ static inline void setup_release(struct task_struct *t, lt_t release) /* don't confuse Linux */ t->rt.time_slice = 1; + + TRACE_TASK(t, "preparing for next job: %d\n", t->rt_param.job_params.job_no); } void prepare_for_next_period(struct task_struct *t) diff --git a/litmus/litmus.c b/litmus/litmus.c index a69a3d0e9128..3e15ea432293 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -316,8 +316,8 @@ asmlinkage long sys_sched_trace_event(int event, struct st_inject_args __user *_ struct st_inject_args args; - if (is_realtime(t)) { - printk(KERN_WARNING "Only non-real-time tasks may inject sched_trace events.\n"); + if ((event != ST_INJECT_ACTION) && is_realtime(t)) { + printk(KERN_WARNING "Only non-real-time tasks may inject sched_trace events (except for ST_INJECT_ACTION).\n"); retval = -EINVAL; goto out; } @@ -368,6 +368,13 @@ asmlinkage long sys_sched_trace_event(int event, struct st_inject_args __user *_ sched_trace_task_release(t); break; + case ST_INJECT_ACTION: + if (!__args) { + retval = -EINVAL; + goto out; + } + sched_trace_action(t, args.action); + break; /**********************/ /* unsupported events */ diff --git a/litmus/locking.c b/litmus/locking.c index 8ba46f85f5c6..73ebde3e8957 100644 --- a/litmus/locking.c +++ b/litmus/locking.c @@ -10,6 +10,7 @@ #include #include #include +#include #ifdef CONFIG_LITMUS_DGL_SUPPORT #include @@ -124,6 +125,8 @@ asmlinkage long sys_litmus_lock(int lock_od) TRACE_CUR("Attempts to lock %d\n", l->ident); err = l->ops->lock(l); if (!err) { + sched_trace_lock(current, l->ident, 1); + TRACE_CUR("Got lock %d\n", l->ident); } } @@ -156,6 +159,8 @@ asmlinkage long sys_litmus_unlock(int lock_od) TRACE_CUR("Attempts to unlock %d\n", l->ident); err = l->ops->unlock(l); if (!err) { + sched_trace_lock(current, l->ident, 0); + TRACE_CUR("Unlocked %d\n", l->ident); } } @@ -376,6 +381,9 @@ int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t for(i = 0; i < dgl_wait->size; ++i) { struct litmus_lock *l = dgl_wait->locks[i]; l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); + + sched_trace_lock(dgl_wait->task, l->ident, 1); + BUG_ON(!(l->ops->is_owner(l, dgl_wait->task))); } @@ -390,9 +398,11 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) raw_spinlock_t *dgl_lock; #ifdef CONFIG_SCHED_DEBUG_TRACE - char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; - snprintf_dgl(dglstr, sizeof(dglstr), dgl_wait->locks, dgl_wait->size); - TRACE_CUR("Locking DGL with size %d: %s\n", dgl_wait->size, dglstr); + { + char dglstr[MAX_DGL_SIZE*5]; + snprintf_dgl(dglstr, sizeof(dgl_wait->size*5), dgl_wait->locks, dgl_wait->size); + TRACE_CUR("Locking DGL with size %d: %s\n", dgl_wait->size, dglstr); + } #endif BUG_ON(dgl_wait->task != current); @@ -409,6 +419,7 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks. if(tmp->ops->dgl_lock(tmp, dgl_wait, &dgl_wait->wq_nodes[i])) { + sched_trace_lock(dgl_wait->task, tmp->ident, 1); --(dgl_wait->nr_remaining); TRACE_CUR("Acquired lock %d immediatly.\n", tmp->ident); } @@ -446,11 +457,11 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) TRACE_CUR("Woken up from DGL suspension.\n"); } - // FOR SANITY CHECK FOR TESTING - for(i = 0; i < dgl_wait->size; ++i) { - struct litmus_lock *tmp = dgl_wait->locks[i]; - BUG_ON(!tmp->ops->is_owner(tmp, dgl_wait->task)); - } +// // FOR SANITY CHECK FOR TESTING +// for(i = 0; i < dgl_wait->size; ++i) { +// struct litmus_lock *tmp = dgl_wait->locks[i]; +// BUG_ON(!tmp->ops->is_owner(tmp, dgl_wait->task)); +// } TRACE_CUR("Acquired entire DGL\n"); @@ -467,9 +478,11 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait) struct task_struct *t = current; #ifdef CONFIG_SCHED_DEBUG_TRACE - char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; - snprintf_dgl(dglstr, sizeof(dglstr), dgl_wait->locks, dgl_wait->size); - TRACE_CUR("Atomic locking DGL with size %d: %s\n", dgl_wait->size, dglstr); + { + char dglstr[MAX_DGL_SIZE*5]; + snprintf_dgl(dglstr, sizeof(dgl_wait->size*5), dgl_wait->locks, dgl_wait->size); + TRACE_CUR("Atomic locking DGL with size %d: %s\n", dgl_wait->size, dglstr); + } #endif dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); @@ -523,11 +536,11 @@ all_acquired: dgl_wait->nr_remaining = 0; - // SANITY CHECK FOR TESTING - for(i = 0; i < dgl_wait->size; ++i) { - struct litmus_lock *tmp = dgl_wait->locks[i]; - BUG_ON(!tmp->ops->is_owner(tmp, dgl_wait->task)); - } +// // SANITY CHECK FOR TESTING +// for(i = 0; i < dgl_wait->size; ++i) { +// struct litmus_lock *tmp = dgl_wait->locks[i]; +// BUG_ON(!tmp->ops->is_owner(tmp, dgl_wait->task)); +// } TRACE_CUR("Acquired entire DGL\n"); @@ -540,19 +553,14 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) struct task_struct *t = current; long err = -EINVAL; int dgl_ods[MAX_DGL_SIZE]; - int i; - - int num_need_atomic = 0; - - dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) goto out; - if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) + if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(*dgl_ods)))) goto out; - if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) + if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(*dgl_ods)))) goto out; if (!is_realtime(t)) { @@ -566,6 +574,10 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) err = sys_litmus_lock(dgl_ods[0]); } else { + int i; + int num_need_atomic = 0; + dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. + init_dgl_wait_state(&dgl_wait_state); for(i = 0; i < dgl_size; ++i) { @@ -618,11 +630,9 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) #ifdef CONFIG_SCHED_DEBUG_TRACE { - char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; - snprintf_dgl(dglstr, sizeof(dglstr), dgl_locks, dgl_size); - TRACE_CUR("Unlocking a DGL with size %d: %s\n", - dgl_size, - dglstr); + char dglstr[MAX_DGL_SIZE*5]; + snprintf_dgl(dglstr, sizeof(dgl_wait->size*5), dgl_locks, dgl_size); + TRACE_CUR("Unlocking a DGL with size %d: %s\n", dgl_size, dglstr); } #endif @@ -634,6 +644,7 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident); tmp_err = l->ops->unlock(l); + sched_trace_lock(current, l->ident, 0); if(tmp_err) { TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err); @@ -650,18 +661,14 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) { long err = -EINVAL; int dgl_ods[MAX_DGL_SIZE]; - struct od_table_entry* entry; - int i; - - struct litmus_lock* dgl_locks[MAX_DGL_SIZE]; if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) goto out; - if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) + if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(*dgl_ods)))) goto out; - if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) + if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(*dgl_ods)))) goto out; @@ -671,8 +678,10 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) err = sys_litmus_unlock(dgl_ods[0]); } else { + struct litmus_lock *dgl_locks[MAX_DGL_SIZE]; + int i; for(i = 0; i < dgl_size; ++i) { - entry = get_entry_for_od(dgl_ods[i]); + struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); if(entry && is_lock(entry)) { dgl_locks[i] = get_lock(entry); if(!dgl_locks[i]->ops->supports_dgl) { diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 69f30188f3ba..fc8f277a1958 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -1161,6 +1161,13 @@ static int __increase_priority_inheritance(struct task_struct* t, int check_preempt = 0; cedf_domain_t* cluster; + if (prio_inh && prio_inh == effective_priority(t)) { + /* relationship already established. */ + TRACE_TASK(t, "already has effective priority of %s/%d\n", + prio_inh->comm, prio_inh->pid); + goto out; + } + if (prio_inh && (effective_priority(prio_inh) != prio_inh)) { TRACE_TASK(t, "Inheriting from %s/%d instead of the eff_prio = %s/%d!\n", prio_inh->comm, prio_inh->pid, @@ -1182,13 +1189,6 @@ static int __increase_priority_inheritance(struct task_struct* t, #endif } - if (prio_inh && prio_inh == effective_priority(t)) { - /* relationship already established. */ - TRACE_TASK(t, "already has effective priority of %s/%d\n", - prio_inh->comm, prio_inh->pid); - goto out; - } - cluster = task_cpu_cluster(t); #ifdef CONFIG_LITMUS_NESTED_LOCKING @@ -1196,6 +1196,8 @@ static int __increase_priority_inheritance(struct task_struct* t, /* TODO (klmirqd): Skip this check if 't' is a proxy thread (???) */ if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { #endif + sched_trace_eff_prio_change(t, prio_inh); + TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); tsk_rt(t)->inh_task = prio_inh; @@ -1348,6 +1350,8 @@ static int __decrease_priority_inheritance(struct task_struct* t, #ifdef CONFIG_LITMUS_NESTED_LOCKING if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { #endif + sched_trace_eff_prio_change(t, prio_inh); + /* A job only stops inheriting a priority when it releases a * resource. Thus we can make the following assumption.*/ if(prio_inh) -- cgit v1.2.2