From c063e088be8e1bcbb6a76b8cd087f8dc8b6923b2 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Mon, 11 Feb 2013 22:48:51 -0500 Subject: BUG FIX: Support DGLs with PRIOQ_MUTEX First 'working' implementation of DGLs with PRIOQ_MUTEX. (All other code prior was work-in-progress.) General approach: Because of priority queue order, PRIOQ_MUTEX DGLs must be *acquired* atomically. This means that a task cannot acquire an available PRIOQ_MUTEX if another PRIOQ_MUTEX is not available at the same time. Requests are buffered in PRIOQ_MUTEX and the resource 'idles'-- that is, the mutex owner is NULL, but there are waiting tasks for the resource. Several notes/side-effects: 1) A high-priority task that idles a resource can effectively block lower-priority tasks from acquiring that resource. This is because the low-prio task cannot skip ahead of the high-prio task in the priority queue. 2) Priority inheritance from nesting can cause the low-prioity task in #1 to jump over the high-priority task and acquire the resource. This means that any task blocked on a DGL that receives an increase in priority while blocked on the DGL must trigger a re-eval of the locks it can take. If the resources can be acquired, then the task needs to be woken up! <<<<< Lock acquisition via inheritance is entirely new and weird! >>>>> 3) A similar case for #2 exists for priorty decreases (example: this can happen when a task loses a donor) while it is blocked on a PRIOQ_MUTEX. The high-priority task described in #1 can change and become a lower- priority task. Every idle lock (mutex owner is NULL) on which the task losing priority must be revaluated--- it is possible that the (possible) new head on the priority queue can take the lock. Note: This affects BOTH singular and DGL resource requests, while the case described in #2 only affects DGL requests (because a singular request at the head of the priority queue will never idle a resource). --- include/litmus/locking.h | 16 +++-- include/litmus/prioq_lock.h | 1 + litmus/ikglp_lock.c | 4 +- litmus/locking.c | 92 +++++++++------------------ litmus/prioq_lock.c | 151 ++++++++++++++++++++++---------------------- litmus/sched_cedf.c | 40 +++++++++--- 6 files changed, 152 insertions(+), 152 deletions(-) diff --git a/include/litmus/locking.h b/include/litmus/locking.h index b1024e397f67..fc437811d2b6 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h @@ -11,7 +11,6 @@ struct nested_info struct litmus_lock *lock; struct task_struct *hp_waiter_eff_prio; struct task_struct **hp_waiter_ptr; -// struct task_struct **owner_ptr; struct binheap_node hp_binheap_node; }; @@ -134,24 +133,31 @@ struct litmus_lock_ops { /* The lock is no longer being referenced (mandatory method). */ lock_free_t deallocate; - + #ifdef CONFIG_LITMUS_NESTED_LOCKING void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); #endif - - #ifdef CONFIG_LITMUS_DGL_SUPPORT raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); int (*is_owner)(struct litmus_lock *l, struct task_struct *t); struct task_struct* (*get_owner)(struct litmus_lock *l); void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); - + int (*dgl_can_quick_lock)(struct litmus_lock *l, struct task_struct *t); void (*dgl_quick_lock)(struct litmus_lock *l, struct litmus_lock *cur_lock, struct task_struct* t, wait_queue_t *q); #endif + + /* all flags at the end */ +#ifdef CONFIG_LITMUS_NESTED_LOCKING + int supports_nesting:1; +#endif +#ifdef CONFIG_LITMUS_DGL_SUPPORT + int supports_dgl:1; + int requires_atomic_dgl:1; +#endif }; diff --git a/include/litmus/prioq_lock.h b/include/litmus/prioq_lock.h index 5c135ef0bdc6..1b0a591ef1a6 100644 --- a/include/litmus/prioq_lock.h +++ b/include/litmus/prioq_lock.h @@ -34,6 +34,7 @@ static inline struct prioq_mutex* prioq_mutex_from_lock(struct litmus_lock* lock int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t); struct task_struct* prioq_mutex_get_owner(struct litmus_lock *l); int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); +int prioq_mutex_dgl_unlock(struct litmus_lock *l); void prioq_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_lock, struct task_struct* t, wait_queue_t *q); diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c index 3d79e41b42df..3fd760799a75 100644 --- a/litmus/ikglp_lock.c +++ b/litmus/ikglp_lock.c @@ -1401,7 +1401,9 @@ int ikglp_unlock(struct litmus_lock* l) struct nested_info, hp_binheap_node); ++count; } - litmus->decrease_prio(t, NULL); + if (count) { + litmus->decrease_prio(t, NULL); + } WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. } raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); diff --git a/litmus/locking.c b/litmus/locking.c index 4b8382cd77d1..eddc67a4d36a 100644 --- a/litmus/locking.c +++ b/litmus/locking.c @@ -365,18 +365,6 @@ static void snprintf_dgl(char* buf, size_t bsz, struct litmus_lock* dgl_locks[], #endif -static int failed_owner(struct litmus_lock *cur_lock, struct task_struct *t) -{ - struct task_struct *cur_owner = cur_lock->ops->get_owner(cur_lock); - printk(KERN_EMERG "lock %d expected owner %s/%d but got %s/%d.\n", - cur_lock->ident, - (t) ? t->comm : "null", - (t) ? t->pid : 0, - (cur_owner) ? cur_owner->comm : "null", - (cur_owner) ? cur_owner->pid : 0); - BUG(); -} - /* only valid when locks are prioq locks!!! * THE BIG DGL LOCK MUST BE HELD! */ int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait) @@ -395,12 +383,8 @@ int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t /* take the locks */ for(i = 0; i < dgl_wait->size; ++i) { struct litmus_lock *l = dgl_wait->locks[i]; - l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); - - if(!(l->ops->is_owner(l, dgl_wait->task))) - failed_owner(l, dgl_wait->task); - //BUG_ON(!(l->ops->is_owner(l, dgl_wait->task))); + BUG_ON(!(l->ops->is_owner(l, dgl_wait->task))); } return 0; /* success */ @@ -510,6 +494,7 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait) unsigned long irqflags; //, dummyflags; raw_spinlock_t *dgl_lock; struct litmus_lock *l; + struct task_struct *t = current; #ifdef CONFIG_SCHED_DEBUG_TRACE char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; @@ -519,7 +504,7 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait) dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); - BUG_ON(dgl_wait->task != current); + BUG_ON(dgl_wait->task != t); raw_spin_lock_irqsave(dgl_lock, irqflags); @@ -528,7 +513,8 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait) for(i = 0; i < dgl_wait->size; ++i) { struct litmus_lock *l = dgl_wait->locks[i]; - l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i]); // this should be a forced enqueue if atomic DGLs are needed. + // this should be a forced enqueue if atomic DGLs are needed. + l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i]); } if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) { @@ -536,27 +522,26 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait) * Pick a lock to push on and suspend. */ TRACE_CUR("Could not atomically acquire all locks.\n"); + /* we set the uninterruptible state here since + * __attempt_atomic_dgl_acquire() may actually succeed. */ + set_task_state(t, TASK_UNINTERRUPTIBLE); #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) // KLUDGE: don't count this suspension as time in the critical gpu // critical section - if(tsk_rt(dgl_wait->task)->held_gpus) { - tsk_rt(dgl_wait->task)->suspend_gpu_tracker_on_block = 1; + if(tsk_rt(t)->held_gpus) { + tsk_rt(t)->suspend_gpu_tracker_on_block = 1; } #endif - // we are not the owner of any lock, so push on the last one in the DGL - // by default. - l = dgl_wait->locks[dgl_wait->size - 1]; + // select a lock to push priority on + dgl_wait->last_primary = 0; // default + select_next_lock(dgl_wait); // may change value of last_primary - TRACE_CUR("Activating priority inheritance on lock %d\n", - l->ident); + l = dgl_wait->locks[dgl_wait->last_primary]; TS_DGL_LOCK_SUSPEND; - l->ops->enable_priority(l, dgl_wait); - dgl_wait->last_primary = dgl_wait->size - 1; - TRACE_CUR("Suspending for lock %d\n", l->ident); raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending @@ -578,9 +563,7 @@ all_acquired: // SANITY CHECK FOR TESTING for(i = 0; i < dgl_wait->size; ++i) { struct litmus_lock *l = dgl_wait->locks[i]; - if(!(l->ops->is_owner(l, dgl_wait->task))) - failed_owner(l, dgl_wait->task); - //BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); + BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); } TRACE_CUR("Acquired entire DGL\n"); @@ -589,23 +572,6 @@ all_acquired: } - - -static int supports_dgl(struct litmus_lock *l) -{ - struct litmus_lock_ops* ops = l->ops; - return (ops->dgl_lock && - ops->is_owner && - ops->get_owner && - ops->enable_priority); -} - -static int needs_atomic_dgl(struct litmus_lock *l) -{ - struct litmus_lock_ops* ops = l->ops; - return (ops->dgl_quick_lock != NULL); -} - asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) { struct task_struct *t = current; @@ -641,13 +607,13 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); if(entry && is_lock(entry)) { dgl_wait_state.locks[i] = get_lock(entry); - if(!supports_dgl(dgl_wait_state.locks[i])) { + if(!dgl_wait_state.locks[i]->ops->supports_dgl) { TRACE_CUR("Lock %d does not support all required DGL operations.\n", dgl_wait_state.locks[i]->ident); goto out; } - if (needs_atomic_dgl(dgl_wait_state.locks[i])) { + if(dgl_wait_state.locks[i]->ops->requires_atomic_dgl) { ++num_need_atomic; } } @@ -686,9 +652,13 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) long err = 0; #ifdef CONFIG_SCHED_DEBUG_TRACE - char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; - snprintf_dgl(dglstr, sizeof(dglstr), dgl_locks, dgl_size); - TRACE_CUR("Unlocking a DGL with size %d: %s\n", dgl_size, dglstr); + { + char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5]; + snprintf_dgl(dglstr, sizeof(dglstr), dgl_locks, dgl_size); + TRACE_CUR("Unlocking a DGL with size %d: %s\n", + dgl_size, + dglstr); + } #endif for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order @@ -740,7 +710,7 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) entry = get_entry_for_od(dgl_ods[i]); if(entry && is_lock(entry)) { dgl_locks[i] = get_lock(entry); - if(!supports_dgl(dgl_locks[i])) { + if(!dgl_locks[i]->ops->supports_dgl) { TRACE_CUR("Lock %d does not support all required DGL operations.\n", dgl_locks[i]->ident); goto out; @@ -852,19 +822,19 @@ void suspend_for_lock(void) tsk_rt(t)->suspend_gpu_tracker_on_block = 1; } #endif - + schedule(); - + /* TODO: Move the following to wake_up_for_lock()? */ - + #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) // re-enable tracking if(tsk_rt(t)->held_gpus) { tsk_rt(t)->suspend_gpu_tracker_on_block = 0; } #endif - + #ifdef CONFIG_LITMUS_NVIDIA if (gpu_restore) { /* restore our state */ @@ -883,9 +853,9 @@ void suspend_for_lock(void) int wake_up_for_lock(struct task_struct* t) { int ret; - + ret = wake_up_process(t); - + return ret; } diff --git a/litmus/prioq_lock.c b/litmus/prioq_lock.c index faf8c15df542..142f56fe9099 100644 --- a/litmus/prioq_lock.c +++ b/litmus/prioq_lock.c @@ -165,12 +165,12 @@ static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mute wait_queue_t *q; struct list_head *pos; struct task_struct *queued = NULL, *found = NULL; - + /* list in sorted order. higher-prio tasks likely at the front. */ list_for_each(pos, &mutex->wait.task_list) { q = list_entry(pos, wait_queue_t, task_list); queued = get_queued_task(q); - + /* Compare task prios, find high prio task. */ if (queued && (queued != skip) && @@ -195,12 +195,12 @@ static int ___prioq_dgl_acquire_via_inheritance(struct prioq_mutex *mutex, struc struct litmus_lock *l; BUG_ON(mutex->owner != NULL); BUG_ON(list_empty(&mutex->wait.task_list)); - + l = &mutex->litmus_lock; - + if (dgl_wait) { BUG_ON(t != dgl_wait->task); - + /* we're a part of a DGL */ if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) { TRACE_CUR("%s/%d cannot take entire DGL via inheritance.\n", @@ -218,50 +218,50 @@ static int ___prioq_dgl_acquire_via_inheritance(struct prioq_mutex *mutex, struc /* we're a regular singular request. we can always take the lock if * there is no mutex owner. */ wait_queue_t *first; - + TRACE_CUR("%s/%d can take it's singular lock via inheritance!\n", t->comm, t->pid); - + first = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); - + BUG_ON(get_queued_task(first) != t); - + __remove_wait_queue(&mutex->wait, first); /* remove the blocked task */ - + /* update/cleanup the state of the lock */ - + mutex->owner = t; /* take ownership!!! */ - + mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t); l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; - + if (mutex->hp_waiter) TRACE_CUR("%s/%d is new highest-prio waiter\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); else TRACE_CUR("no further waiters\n"); - + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); - + binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); - + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); } - + if (t) { BUG_ON(mutex->owner != t); - + TRACE_CUR("%s/%d waking up since it is no longer blocked.\n", t->comm, t->pid); - + tsk_rt(t)->blocked_lock = NULL; mb(); - + wake_up_for_lock(t); } - + return (t != NULL); } @@ -276,7 +276,7 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc // (1) Increase position for 't' for all locks it is waiting. // (2) Check to see if 't' can take the lock, DGL or singular lock. // (3) If it can, do so and wake up 't'. - + struct list_head *pos; struct task_struct *new_head; struct task_struct *cur_head = NULL; @@ -284,32 +284,32 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc int woke_up = 0; int found = 0; - + BUG_ON(list_empty(&mutex->wait.task_list)); - + /* note the task at the head of the queue */ if(mutex->owner == NULL) { cur_head = get_head_task(mutex); } - + list_for_each(pos, &mutex->wait.task_list) { dgl_wait_state_t *temp_dgl_state; wait_queue_t *q = list_entry(pos, wait_queue_t, task_list); struct task_struct *queued = get_queued_task_and_dgl_wait(q, &temp_dgl_state); - + if (queued == t) { - + TRACE_CUR("found %s/%d in prioq of lock %d\n", t->comm, t->pid, mutex->litmus_lock.ident); - + if(temp_dgl_state) { /* it's a DGL request */ int i; dgl_wait = temp_dgl_state; - + TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n", t->comm, t->pid, dgl_wait->size); - + // reposition on the other mutexes for(i = 0; i < dgl_wait->size; ++i) { // assume they're all PRIOQ_MUTEX @@ -318,7 +318,7 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc __prioq_increase_pos(pm, t); } } - + // reposition on this mutex __remove_wait_queue(&mutex->wait, q); __add_wait_queue_sorted(&mutex->wait, q); @@ -326,24 +326,24 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc break; } } - + BUG_ON(!found); - + if (mutex->owner == NULL) { /* who is the new head? */ new_head = get_head_task(mutex); - + /* is the prioq mutex idle? */ if(cur_head != new_head) { /* the new head might be able to take the lock */ - + BUG_ON(new_head != t); /* the new head must be this task since our prio increased */ - + TRACE_CUR("Change in prioq head on idle prioq mutex %d: old = %s/%d new = %s/%d\n", mutex->litmus_lock.ident, cur_head->comm, cur_head->pid, new_head->comm, new_head->pid); - + woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, t, dgl_wait); } } @@ -358,9 +358,9 @@ static int ___prioq_dgl_decrease_pos_and_check_acquire(struct prioq_mutex *mutex struct task_struct *cur_head = NULL; int woke_up = 0; int found = 1; - + BUG_ON(list_empty(&mutex->wait.task_list)); - + /* find the position of t in mutex's wait q if it's not provided */ if (q == NULL) { found = 0; @@ -375,21 +375,21 @@ static int ___prioq_dgl_decrease_pos_and_check_acquire(struct prioq_mutex *mutex BUG_ON(!q); BUG_ON(!found); - + if(mutex->owner == NULL) { cur_head = get_head_task(mutex); } - + // update the position __remove_wait_queue(&mutex->wait, q); __add_wait_queue_sorted(&mutex->wait, q); - + if(mutex->owner == NULL) { // get a reference to dgl_wait of the new head is a DGL request dgl_wait_state_t *dgl_wait; q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); new_head = get_queued_task_and_dgl_wait(q, &dgl_wait); - + /* is the prioq mutex idle and did the head change? */ if(cur_head != new_head) { /* the new head might be able to take the lock */ @@ -397,7 +397,7 @@ static int ___prioq_dgl_decrease_pos_and_check_acquire(struct prioq_mutex *mutex mutex->litmus_lock.ident, cur_head->comm, cur_head->pid, new_head->comm, new_head->pid); - + woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, new_head, dgl_wait); } } @@ -410,28 +410,28 @@ static void __prioq_dgl_decrease_pos(struct prioq_mutex *mutex, struct task_stru // (2) For every lock upon which 't' was the head AND that lock is idle: // (3) Can the new head take the lock? // (4) If it can, do so and wake up the new head. - + struct list_head *pos; - + BUG_ON(list_empty(&mutex->wait.task_list)); - + list_for_each(pos, &mutex->wait.task_list) { dgl_wait_state_t *dgl_wait; wait_queue_t *q = list_entry(pos, wait_queue_t, task_list); struct task_struct *queued = get_queued_task_and_dgl_wait(q, &dgl_wait); - + if (queued == t) { TRACE_CUR("found %s/%d in prioq of lock %d\n", t->comm, t->pid, - mutex->litmus_lock.ident); - + mutex->litmus_lock.ident); + if (dgl_wait) { // reposition on all mutexes and check for wakeup int i; - + TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n", t->comm, t->pid, dgl_wait->size); - + for(i = 0; i < dgl_wait->size; ++i) { // assume they're all PRIOQ_MUTEX struct prioq_mutex *pm = (struct prioq_mutex *) dgl_wait->locks[i]; @@ -442,12 +442,12 @@ static void __prioq_dgl_decrease_pos(struct prioq_mutex *mutex, struct task_stru } } else { - ___prioq_dgl_decrease_pos_and_check_acquire(mutex, t, q); + ___prioq_dgl_decrease_pos_and_check_acquire(mutex, t, q); } return; } } - + BUG(); } @@ -481,7 +481,7 @@ int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, init_dgl_waitqueue_entry(wq_node, dgl_wait); - set_task_state(t, TASK_UNINTERRUPTIBLE); + //set_task_state(t, TASK_UNINTERRUPTIBLE); /* done in do_litmus_dgl_atomic_lock() only if needed */ __add_wait_queue_sorted_exclusive(&mutex->wait, wq_node); return acquired_immediatly; @@ -494,7 +494,8 @@ void prioq_mutex_enable_priority(struct litmus_lock *l, struct prioq_mutex *mutex = prioq_mutex_from_lock(l); struct task_struct *t = dgl_wait->task; struct task_struct *owner = mutex->owner; - unsigned long flags = 0; // these are unused under DGL coarse-grain locking + unsigned long flags; + local_save_flags(flags); // needed for coarse-grain DGLs? /************************************** * This code looks like it supports fine-grain locking, but it does not! @@ -597,7 +598,7 @@ static void select_next_lock_if_primary(struct litmus_lock *l, effective_priority(mutex->hp_waiter) : NULL; - + if (mutex->hp_waiter) TRACE_CUR("%s/%d is new highest-prio waiter\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); @@ -822,30 +823,32 @@ int prioq_mutex_lock(struct litmus_lock* l) } - int prioq_mutex_unlock(struct litmus_lock* l) { + int err = 0; struct task_struct *t = current, *next = NULL; + struct task_struct *old_max_eff_prio; struct prioq_mutex *mutex = prioq_mutex_from_lock(l); unsigned long flags; - struct task_struct *old_max_eff_prio; - #ifdef CONFIG_LITMUS_DGL_SUPPORT + raw_spinlock_t *dgl_lock; dgl_wait_state_t *dgl_wait = NULL; - raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); #endif - int err = 0; - if (mutex->owner != t) { err = -EINVAL; return err; } +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_lock = litmus->get_dgl_spinlock(current); +#endif + lock_global_irqsave(dgl_lock, flags); lock_fine_irqsave(&mutex->lock, flags); + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); TRACE_TASK(t, "Freeing lock %d\n", l->ident); @@ -855,13 +858,13 @@ int prioq_mutex_unlock(struct litmus_lock* l) if(tsk_rt(t)->inh_task){ struct task_struct *new_max_eff_prio = - top_priority(&tsk_rt(t)->hp_blocked_tasks); + top_priority(&tsk_rt(t)->hp_blocked_tasks); if((new_max_eff_prio == NULL) || - /* there was a change in eff prio */ + /* there was a change in eff prio */ ( (new_max_eff_prio != old_max_eff_prio) && /* and owner had the old eff prio */ - (effective_priority(t) == old_max_eff_prio)) ) + (effective_priority(t) == old_max_eff_prio)) ) { // old_max_eff_prio > new_max_eff_prio @@ -888,8 +891,6 @@ int prioq_mutex_unlock(struct litmus_lock* l) raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); - - mutex->owner = NULL; #ifdef CONFIG_LITMUS_DGL_SUPPORT @@ -900,11 +901,11 @@ int prioq_mutex_unlock(struct litmus_lock* l) */ wait_queue_t *q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); get_queued_task_and_dgl_wait(q, &dgl_wait); - + if (dgl_wait) { TRACE_CUR("Checking to see if DGL waiter %s/%d can take its locks\n", dgl_wait->task->comm, dgl_wait->task->pid); - + if(__attempt_atomic_dgl_acquire(l, dgl_wait)) { /* failed. can't take this lock yet. we remain at head of prioq * allow hp requests in the future to go ahead of us. */ @@ -919,7 +920,7 @@ int prioq_mutex_unlock(struct litmus_lock* l) /* remove the first */ next = __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait); - + BUG_ON(dgl_wait && (next != dgl_wait->task)); } #else @@ -935,7 +936,7 @@ int prioq_mutex_unlock(struct litmus_lock* l) if (next == mutex->hp_waiter) { TRACE_CUR("%s/%d was highest-prio waiter\n", next->comm, next->pid); - + /* next has the highest priority --- it doesn't need to * inherit. However, we need to make sure that the * next-highest priority in the queue is reflected in @@ -945,13 +946,13 @@ int prioq_mutex_unlock(struct litmus_lock* l) effective_priority(mutex->hp_waiter) : NULL; - + if (mutex->hp_waiter) TRACE_CUR("%s/%d is new highest-prio waiter\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); else TRACE_CUR("no further waiters\n"); - + raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); @@ -1019,8 +1020,8 @@ int prioq_mutex_unlock(struct litmus_lock* l) #ifdef CONFIG_LITMUS_DGL_SUPPORT out: #endif - unlock_global_irqrestore(dgl_lock, flags); + unlock_global_irqrestore(dgl_lock, flags); TRACE_TASK(t, "-- Freed lock %d --\n", l->ident); return err; diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 40daf8e16d74..69f30188f3ba 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -1316,6 +1316,14 @@ static int __decrease_priority_inheritance(struct task_struct* t, { int success = 1; + if (prio_inh == tsk_rt(t)->inh_task) { + /* relationship already established. */ + TRACE_TASK(t, "already inherits priority from %s/%d\n", + (prio_inh) ? prio_inh->comm : "(null)", + (prio_inh) ? prio_inh->pid : 0); + goto out; + } + if (prio_inh && (effective_priority(prio_inh) != prio_inh)) { TRACE_TASK(t, "Inheriting from %s/%d instead of the eff_prio = %s/%d!\n", prio_inh->comm, prio_inh->pid, @@ -1337,14 +1345,6 @@ static int __decrease_priority_inheritance(struct task_struct* t, #endif } - if (prio_inh == tsk_rt(t)->inh_task) { - /* relationship already established. */ - TRACE_TASK(t, "already inherits priority from %s/%d\n", - (prio_inh) ? prio_inh->comm : "(null)", - (prio_inh) ? prio_inh->pid : 0); - goto out; - } - #ifdef CONFIG_LITMUS_NESTED_LOCKING if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { #endif @@ -1469,7 +1469,7 @@ static void nested_increase_priority_inheritance(struct task_struct* t, if(blocked_lock) { - if(blocked_lock->ops->propagate_increase_inheritance) { + if(blocked_lock->ops->supports_nesting) { TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); @@ -1506,7 +1506,7 @@ static void nested_decrease_priority_inheritance(struct task_struct* t, raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. if(blocked_lock) { - if(blocked_lock->ops->propagate_decrease_inheritance) { + if(blocked_lock->ops->supports_nesting) { TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); @@ -1547,7 +1547,11 @@ static struct litmus_lock_ops cedf_fifo_mutex_lock_ops = { .dgl_can_quick_lock = NULL, .dgl_quick_lock = NULL, + + .supports_dgl = 1, + .requires_atomic_dgl = 0, #endif + .supports_nesting = 1, }; static struct litmus_lock* cedf_new_fifo_mutex(void) @@ -1574,7 +1578,11 @@ static struct litmus_lock_ops cedf_prioq_mutex_lock_ops = { .dgl_can_quick_lock = prioq_mutex_dgl_can_quick_lock, .dgl_quick_lock = prioq_mutex_dgl_quick_lock, + + .supports_dgl = 1, + .requires_atomic_dgl = 1, #endif + .supports_nesting = 1, }; static struct litmus_lock* cedf_new_prioq_mutex(void) @@ -1593,6 +1601,12 @@ static struct litmus_lock_ops cedf_ikglp_lock_ops = { // ikglp can only be an outer-most lock. .propagate_increase_inheritance = NULL, .propagate_decrease_inheritance = NULL, + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + .supports_dgl = 0, + .requires_atomic_dgl = 0, +#endif + .supports_nesting = 0, }; static struct litmus_lock* cedf_new_ikglp(void* __user arg) @@ -1617,6 +1631,12 @@ static struct litmus_lock_ops cedf_kfmlp_lock_ops = { // kfmlp can only be an outer-most lock. .propagate_increase_inheritance = NULL, .propagate_decrease_inheritance = NULL, + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + .supports_dgl = 0, + .requires_atomic_dgl = 0, +#endif + .supports_nesting = 0, }; -- cgit v1.2.2