From 09939a5991fedc0d9f95e0ec9f26aa75e9c2da23 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Fri, 8 Feb 2013 13:06:54 -0500 Subject: Add PRIOQ_MUTEX semaphore --- include/litmus/fdso.h | 6 +- include/litmus/locking.h | 36 +- include/litmus/prioq_lock.h | 61 +++ include/litmus/rt_param.h | 9 +- litmus/Makefile | 2 +- litmus/fdso.c | 1 + litmus/locking.c | 35 +- litmus/prioq_lock.c | 1162 +++++++++++++++++++++++++++++++++++++++++++ 8 files changed, 1302 insertions(+), 10 deletions(-) create mode 100644 include/litmus/prioq_lock.h create mode 100644 litmus/prioq_lock.c diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h index e1a0ac24b8a2..f7887288d8f5 100644 --- a/include/litmus/fdso.h +++ b/include/litmus/fdso.h @@ -35,7 +35,9 @@ typedef enum { KFMLP_SIMPLE_GPU_AFF_OBS = 11, KFMLP_GPU_AFF_OBS = 12, - MAX_OBJ_TYPE = 12 + PRIOQ_MUTEX = 13, + + MAX_OBJ_TYPE = 13 } obj_type_t; struct inode_obj_id { @@ -84,6 +86,6 @@ static inline void* od_lookup(int od, obj_type_t type) #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) #define lookup_fifo_mutex(od)((struct litmus_lock*) od_lookup(od, FIFO_MUTEX)) - +#define lookup_prioq_mutex(od)((struct litmus_lock*) od_lookup(od, PRIOQ_MUTEX)) #endif diff --git a/include/litmus/locking.h b/include/litmus/locking.h index cc62fa0cb044..660bfc7f8174 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h @@ -11,6 +11,7 @@ struct nested_info struct litmus_lock *lock; struct task_struct *hp_waiter_eff_prio; struct task_struct **hp_waiter_ptr; + struct task_struct **owner_ptr; struct binheap_node hp_binheap_node; }; @@ -30,6 +31,8 @@ struct litmus_lock_proc_ops { void (*remove)(struct litmus_lock *l); }; + + /* Generic base struct for LITMUS^RT userspace semaphores. * This structure should be embedded in protocol-specific semaphores. */ @@ -41,10 +44,8 @@ struct litmus_lock { #ifdef CONFIG_LITMUS_NESTED_LOCKING struct nested_info nest; -//#ifdef CONFIG_DEBUG_SPINLOCK char cheat_lockdep[2]; struct lock_class_key key; -//#endif #endif struct litmus_lock_proc_ops *proc; @@ -71,7 +72,32 @@ void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lo void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait); int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key); void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task); + + +int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait); +#endif + + + + +static inline struct task_struct* get_queued_task(wait_queue_t* q) +{ + struct task_struct *queued; +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if(q->func == dgl_wake_up) { + dgl_wait_state_t *dgl_wait = (dgl_wait_state_t*) q->private; + queued = dgl_wait->task; + } + else { + queued = (struct task_struct*) q->private; + } +#else + queued = (struct task_struct*) q->private; #endif + return queued; +} + + typedef int (*lock_op_t)(struct litmus_lock *l); typedef lock_op_t lock_close_t; @@ -94,16 +120,22 @@ struct litmus_lock_ops { /* The lock is no longer being referenced (mandatory method). */ lock_free_t deallocate; + #ifdef CONFIG_LITMUS_NESTED_LOCKING void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); #endif + #ifdef CONFIG_LITMUS_DGL_SUPPORT raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); int (*is_owner)(struct litmus_lock *l, struct task_struct *t); void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); + + int (*dgl_can_quick_lock)(struct litmus_lock *l, struct task_struct *t); + void (*dgl_quick_lock)(struct litmus_lock *l, struct litmus_lock *cur_lock, + struct task_struct* t, wait_queue_t *q); #endif }; diff --git a/include/litmus/prioq_lock.h b/include/litmus/prioq_lock.h new file mode 100644 index 000000000000..f3c11d241991 --- /dev/null +++ b/include/litmus/prioq_lock.h @@ -0,0 +1,61 @@ +#ifndef LITMUS_PRIOQ_H +#define LITMUS_PRIOQ_H + +#include +#include +#include + +/* struct for semaphore with priority inheritance */ +struct prioq_mutex { + struct litmus_lock litmus_lock; + + /* current resource holder */ + struct task_struct *owner; + + /* highest-priority waiter */ + struct task_struct *hp_waiter; + + /* priority-ordered queue of waiting tasks. + * Ironically, we don't use a binheap because that would make DGL + * implementation a LOT harder. */ + wait_queue_head_t wait; + + /* we do some nesting within spinlocks, so we can't use the normal + sleeplocks found in wait_queue_head_t. */ + raw_spinlock_t lock; +}; + +static inline struct prioq_mutex* prioq_mutex_from_lock(struct litmus_lock* lock) +{ + return container_of(lock, struct prioq_mutex, litmus_lock); +} + +#ifdef CONFIG_LITMUS_DGL_SUPPORT +int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t); +int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); +void prioq_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); +void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_lock, + struct task_struct* t, wait_queue_t *q); +int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t); +#endif + +void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l, + struct task_struct* t, + raw_spinlock_t* to_unlock, + unsigned long irqflags); + +void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, + struct task_struct* t, + raw_spinlock_t* to_unlock, + unsigned long irqflags); + + +int prioq_mutex_lock(struct litmus_lock* l); +int prioq_mutex_unlock(struct litmus_lock* l); +int prioq_mutex_close(struct litmus_lock* l); +void prioq_mutex_free(struct litmus_lock* l); +struct litmus_lock* prioq_mutex_new(struct litmus_lock_ops*); + + +#endif + diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 144be3b6ee3d..716fc034c5f4 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -312,10 +312,6 @@ struct rt_param { gpu_migration_dist_t gpu_migration; int last_gpu; - - notify_rsrc_exit_t rsrc_exit_cb; - void* rsrc_exit_cb_args; - lt_t accum_gpu_time; lt_t gpu_time_stamp; @@ -323,6 +319,11 @@ struct rt_param { #endif #endif +#ifdef CONFIG_LITMUS_AFFINITY_LOCKING + notify_rsrc_exit_t rsrc_exit_cb; + void* rsrc_exit_cb_args; +#endif + #ifdef CONFIG_LITMUS_LOCKING /* Is the task being priority-boosted by a locking protocol? */ unsigned int priority_boosted:1; diff --git a/litmus/Makefile b/litmus/Makefile index 6ad94f69a347..6b7acf0bbf2c 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -33,7 +33,7 @@ obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o obj-$(CONFIG_LITMUS_LOCKING) += kfmlp_lock.o -obj-$(CONFIG_LITMUS_NESTED_LOCKING) += fifo_lock.o ikglp_lock.o +obj-$(CONFIG_LITMUS_NESTED_LOCKING) += fifo_lock.o prioq_lock.o ikglp_lock.o obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o obj-$(CONFIG_LITMUS_PAI_SOFTIRQD) += litmus_pai_softirq.o obj-$(CONFIG_LITMUS_NVIDIA) += nvidia_info.o sched_trace_external.o diff --git a/litmus/fdso.c b/litmus/fdso.c index 1fcc47a6a62b..e23e7d5a5daa 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c @@ -42,6 +42,7 @@ static const struct fdso_ops* fdso_ops[] = { &generic_affinity_ops, /* KFMLP_SIMPLE_GPU_AFF_OBS */ &generic_affinity_ops, /* KFMLP_GPU_AFF_OBS */ #endif + &generic_lock_ops, /* PRIOQ_MUTEX */ }; static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) diff --git a/litmus/locking.c b/litmus/locking.c index 58b5edd9df32..f7d33156cf49 100644 --- a/litmus/locking.c +++ b/litmus/locking.c @@ -246,6 +246,11 @@ void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lo //WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); + if (dgl_wait->last_primary == 0) { + /* loop around */ + dgl_wait->last_primary = dgl_wait->size; + } + // note reverse order for(dgl_wait->last_primary = dgl_wait->last_primary - 1; dgl_wait->last_primary >= 0; @@ -327,6 +332,35 @@ static void snprintf_dgl(char* buf, size_t bsz, struct litmus_lock* dgl_locks[], } #endif + +/* only valid when locks are prioq locks!!! + * THE BIG DGL LOCK MUST BE HELD! */ +int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait) +{ + int i; + + /* check to see if we can take all the locks */ + for(i = 0; i < dgl_wait->size; ++i) { + struct litmus_lock *l = dgl_wait->locks[i]; + if(!l->ops->dgl_can_quick_lock(l, dgl_wait->task)) + { + return -1; + } + } + + /* take the locks */ + for(i = 0; i < dgl_wait->size; ++i) { + struct litmus_lock *l = dgl_wait->locks[i]; + + l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]); + + BUG_ON(dgl_wait->task != *(l->nest.owner_ptr)); + } + + return 0; /* success */ +} + + static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) { int i; @@ -394,7 +428,6 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending suspend_for_lock(); // suspend!!! - //schedule(); // suspend!!! TS_DGL_LOCK_RESUME; diff --git a/litmus/prioq_lock.c b/litmus/prioq_lock.c new file mode 100644 index 000000000000..0091e4c1901e --- /dev/null +++ b/litmus/prioq_lock.c @@ -0,0 +1,1162 @@ +#include +#include + +#include +#include +#include + +#include + + +#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) +#include +#endif + +static void __attribute__((unused)) +__dump_lock_info(struct prioq_mutex *mutex) +{ +#ifdef CONFIG_SCHED_DEBUG_TRACE + TRACE_CUR("%s (mutex: %p):\n", mutex->litmus_lock.name, mutex); + TRACE_CUR("owner: %s/%d (inh: %s/%d)\n", + (mutex->owner) ? + mutex->owner->comm : "null", + (mutex->owner) ? + mutex->owner->pid : 0, + (mutex->owner && tsk_rt(mutex->owner)->inh_task) ? + tsk_rt(mutex->owner)->inh_task->comm : "null", + (mutex->owner && tsk_rt(mutex->owner)->inh_task) ? + tsk_rt(mutex->owner)->inh_task->pid : 0); + TRACE_CUR("hp waiter: %s/%d (inh: %s/%d)\n", + (mutex->hp_waiter) ? + mutex->hp_waiter->comm : "null", + (mutex->hp_waiter) ? + mutex->hp_waiter->pid : 0, + (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ? + tsk_rt(mutex->hp_waiter)->inh_task->comm : "null", + (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ? + tsk_rt(mutex->hp_waiter)->inh_task->pid : 0); + TRACE_CUR("blocked tasks, front to back:\n"); + if (waitqueue_active(&mutex->wait)) { + wait_queue_t *q; + struct list_head *pos; +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_wait_state_t *dgl_wait = NULL; +#endif + list_for_each(pos, &mutex->wait.task_list) { + struct task_struct *blocked_task; +#ifdef CONFIG_LITMUS_DGL_SUPPORT + int enabled = 1; +#endif + q = list_entry(pos, wait_queue_t, task_list); + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if(q->func == dgl_wake_up) { + dgl_wait = (dgl_wait_state_t*) q->private; + blocked_task = dgl_wait->task; + + if(tsk_rt(blocked_task)->blocked_lock != &mutex->litmus_lock) + enabled = 0; + } + else { + blocked_task = (struct task_struct*) q->private; + } +#else + blocked_task = (struct task_struct*) q->private; +#endif + TRACE_CUR("\t%s/%d (inh: %s/%d)" +#ifdef CONFIG_LITMUS_DGL_SUPPORT + " DGL enabled: %d" +#endif + "\n", + blocked_task->comm, blocked_task->pid, + (tsk_rt(blocked_task)->inh_task) ? + tsk_rt(blocked_task)->inh_task->comm : "null", + (tsk_rt(blocked_task)->inh_task) ? + tsk_rt(blocked_task)->inh_task->pid : 0 +#ifdef CONFIG_LITMUS_DGL_SUPPORT + , enabled +#endif + ); + } + } + else { + TRACE_CUR("\t\n"); + } +#endif +} + +static void __add_wait_queue_sorted(wait_queue_head_t *q, wait_queue_t *add_node) +{ + struct list_head *pq = &(q->task_list); + wait_queue_t *q_node; + struct task_struct *queued_task; + struct task_struct *add_task; + struct list_head *pos; + + if (list_empty(pq)) { + list_add_tail(&add_node->task_list, pq); + return; + } + + add_task = get_queued_task(add_node); + + /* less priority than tail? if so, go to tail */ + q_node = list_entry(pq->prev, wait_queue_t, task_list); + queued_task = get_queued_task(q_node); + if (litmus->compare(queued_task, add_task)) { + list_add_tail(&add_node->task_list, pq); + return; + } + + /* belongs at head or between nodes */ + list_for_each(pos, pq) { + q_node = list_entry(pos, wait_queue_t, task_list); + queued_task = get_queued_task(q_node); + if(litmus->compare(add_task, queued_task)) { + list_add(&add_node->task_list, pos->prev); + return; + } + } + + BUG(); +} + +static inline void __add_wait_queue_sorted_exclusive(wait_queue_head_t *q, wait_queue_t *wait) +{ + wait->flags |= WQ_FLAG_EXCLUSIVE; + __add_wait_queue_sorted(q, wait); +} + +static void __prioq_increase_pos(struct prioq_mutex *mutex, struct task_struct *t) +{ + wait_queue_t *q; + struct list_head *pos; + struct task_struct *queued; + + /* TODO: Make this efficient instead of remove/add */ + list_for_each(pos, &mutex->wait.task_list) { + q = list_entry(pos, wait_queue_t, task_list); + queued = get_queued_task(q); + if (queued == t) { + __remove_wait_queue(&mutex->wait, q); + __add_wait_queue_sorted(&mutex->wait, q); + return; + } + } + + BUG(); +} + +static void __prioq_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t) +{ + wait_queue_t *q; + struct list_head *pos; + struct task_struct *queued; + + /* TODO: Make this efficient instead of remove/add */ + list_for_each(pos, &mutex->wait.task_list) { + q = list_entry(pos, wait_queue_t, task_list); + queued = get_queued_task(q); + if (queued == t) { + __remove_wait_queue(&mutex->wait, q); + __add_wait_queue_sorted(&mutex->wait, q); + return; + } + } + + BUG(); +} + + +/* caller is responsible for locking */ +static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mutex, + struct task_struct* skip) +{ + wait_queue_t *q; + struct list_head *pos; + struct task_struct *queued = NULL, *found = NULL; + + /* list in sorted order. higher-prio tasks likely at the front. */ + list_for_each(pos, &mutex->wait.task_list) { + q = list_entry(pos, wait_queue_t, task_list); + queued = get_queued_task(q); + + /* Compare task prios, find high prio task. */ + if (queued && queued != skip && litmus->compare(queued, found)) { + found = queued; + } + } + return found; +} + + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + +int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) +{ + struct prioq_mutex *mutex = prioq_mutex_from_lock(l); + return(mutex->owner == t); +} + +// return 1 if resource was immediatly acquired. +// Assumes mutex->lock is held. +// Must set task state to TASK_UNINTERRUPTIBLE if task blocks. +int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, + wait_queue_t* wq_node) +{ + struct prioq_mutex *mutex = prioq_mutex_from_lock(l); + struct task_struct *t = dgl_wait->task; + + int acquired_immediatly = 0; + + BUG_ON(t != current); + + if (mutex->owner) { + TRACE_TASK(t, "Enqueuing on lock %d (held by %s/%d).\n", + l->ident, mutex->owner->comm, mutex->owner->pid); + + init_dgl_waitqueue_entry(wq_node, dgl_wait); + + set_task_state(t, TASK_UNINTERRUPTIBLE); + __add_wait_queue_sorted_exclusive(&mutex->wait, wq_node); + } else { + TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); + + /* it's ours now */ + mutex->owner = t; + + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); + binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); + + acquired_immediatly = 1; + } + + return acquired_immediatly; +} + +void prioq_mutex_enable_priority(struct litmus_lock *l, + dgl_wait_state_t* dgl_wait) +{ + struct prioq_mutex *mutex = prioq_mutex_from_lock(l); + struct task_struct *t = dgl_wait->task; + struct task_struct *owner = mutex->owner; + unsigned long flags = 0; // these are unused under DGL coarse-grain locking + + BUG_ON(owner == t); + + tsk_rt(t)->blocked_lock = l; + mb(); + + if (litmus->compare(t, mutex->hp_waiter)) { + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + struct task_struct *new_prio = NULL; + + if(mutex->hp_waiter) + TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", + mutex->hp_waiter->comm, mutex->hp_waiter->pid); + else + TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); + + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + mutex->hp_waiter = t; + l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); + binheap_decrease(&l->nest.hp_binheap_node, + &tsk_rt(owner)->hp_blocked_tasks); + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if(new_max_eff_prio != old_max_eff_prio) { + TRACE_TASK(t, "is new hp_waiter.\n"); + + if ((effective_priority(owner) == old_max_eff_prio) || + (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){ + new_prio = new_max_eff_prio; + } + } + else { + TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); + } + + if(new_prio) { + litmus->nested_increase_prio(owner, new_prio, + &mutex->lock, flags); // unlocks lock. + } + else { + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&mutex->lock, flags); + } + } + else { + TRACE_TASK(t, "no change in hp_waiter.\n"); + unlock_fine_irqrestore(&mutex->lock, flags); + } +} + +static void select_next_lock_if_primary(struct litmus_lock *l, + dgl_wait_state_t *dgl_wait) +{ + if(tsk_rt(dgl_wait->task)->blocked_lock == l) { + TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", + l->ident, dgl_wait->task->comm, dgl_wait->task->pid); + tsk_rt(dgl_wait->task)->blocked_lock = NULL; + mb(); + select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on + } + else { + TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", + l->ident, dgl_wait->task->comm, dgl_wait->task->pid); + } +} +#endif + + + + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + +int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t) +{ + struct prioq_mutex *mutex = prioq_mutex_from_lock(l); + + if(!mutex->owner && mutex->hp_waiter == t) { + wait_queue_t *front = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); + struct task_struct *at_front = get_queued_task(front); + if(t == at_front) { + return 1; + } + } + return 0; +} + +void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_lock, + struct task_struct* t, wait_queue_t *q) +{ + struct prioq_mutex *mutex = prioq_mutex_from_lock(l); + + BUG_ON(mutex->owner); + BUG_ON(mutex->hp_waiter != t); + BUG_ON(t != get_queued_task(list_entry(mutex->wait.task_list.next, wait_queue_t, task_list))); + + + mutex->owner = t; + + if (l != cur_lock) { + /* we have to update the state of the other lock for it */ + __remove_wait_queue(&mutex->wait, q); + + mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t); + l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? + effective_priority(mutex->hp_waiter) : + NULL; + + if (mutex->hp_waiter) + TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); + else + TRACE("no further waiters\n"); + + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); + + binheap_add(&l->nest.hp_binheap_node, + &tsk_rt(t)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); + } +} +#endif + + +int prioq_mutex_lock(struct litmus_lock* l) +{ + struct task_struct *t = current; + struct task_struct *owner; + struct prioq_mutex *mutex = prioq_mutex_from_lock(l); + wait_queue_t wait; + unsigned long flags; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + raw_spinlock_t *dgl_lock; +#endif + + if (!is_realtime(t)) + return -EPERM; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_lock = litmus->get_dgl_spinlock(t); +#endif + + lock_global_irqsave(dgl_lock, flags); + lock_fine_irqsave(&mutex->lock, flags); + + /* block if there is an owner, or if hp_waiter is blocked for DGL and + * prio(t) < prio(hp_waiter) */ + if (mutex->owner) { + TRACE_TASK(t, "Blocking on lock %d (held by %s/%d).\n", + l->ident, mutex->owner->comm, mutex->owner->pid); + +#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) + // KLUDGE: don't count this suspension as time in the critical gpu + // critical section + if(tsk_rt(t)->held_gpus) { + tsk_rt(t)->suspend_gpu_tracker_on_block = 1; + } +#endif + + /* resource is not free => must suspend and wait */ + + owner = mutex->owner; + + init_waitqueue_entry(&wait, t); + + tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ + mb(); // needed? + + /* FIXME: interruptible would be nice some day */ + set_task_state(t, TASK_UNINTERRUPTIBLE); + + __add_wait_queue_sorted_exclusive(&mutex->wait, &wait); + + /* check if we need to activate priority inheritance */ + if (litmus->compare(t, mutex->hp_waiter)) { + + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + struct task_struct *new_prio = NULL; + + if(mutex->hp_waiter) + TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", + mutex->hp_waiter->comm, mutex->hp_waiter->pid); + else + TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); + + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + mutex->hp_waiter = t; + + TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident); + __dump_lock_info(mutex); + + l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); + binheap_decrease(&l->nest.hp_binheap_node, + &tsk_rt(owner)->hp_blocked_tasks); + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if(new_max_eff_prio != old_max_eff_prio) { + TRACE_TASK(t, "is new hp_waiter.\n"); + + if ((effective_priority(owner) == old_max_eff_prio) || + (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){ + new_prio = new_max_eff_prio; + } + } + else { + TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); + } + + if(new_prio) { + litmus->nested_increase_prio(owner, new_prio, &mutex->lock, + flags); // unlocks lock. + } + else { + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&mutex->lock, flags); + } + } + else { + TRACE_TASK(t, "no change in hp_waiter.\n"); + + TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident); + __dump_lock_info(mutex); + + unlock_fine_irqrestore(&mutex->lock, flags); + } + + unlock_global_irqrestore(dgl_lock, flags); + + TS_LOCK_SUSPEND; + + /* We depend on the FIFO order. Thus, we don't need to recheck + * when we wake up; we are guaranteed to have the lock since + * there is only one wake up per release. + */ + + suspend_for_lock(); + + TS_LOCK_RESUME; + + /* Since we hold the lock, no other task will change + * ->owner. We can thus check it without acquiring the spin + * lock. */ + BUG_ON(mutex->owner != t); + + TRACE_TASK(t, "Acquired lock %d.\n", l->ident); + + } else { + TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); + + /* it's ours now */ + mutex->owner = t; + + raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); + binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); + + + unlock_fine_irqrestore(&mutex->lock, flags); + unlock_global_irqrestore(dgl_lock, flags); + } + + return 0; +} + + + +int prioq_mutex_unlock(struct litmus_lock* l) +{ + struct task_struct *t = current, *next = NULL; + struct prioq_mutex *mutex = prioq_mutex_from_lock(l); + unsigned long flags; + + struct task_struct *old_max_eff_prio; + + int wake_up_task = 1; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_wait_state_t *dgl_wait = NULL; + raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); +#endif + + int err = 0; + + if (mutex->owner != t) { + err = -EINVAL; + return err; + } + + lock_global_irqsave(dgl_lock, flags); + lock_fine_irqsave(&mutex->lock, flags); + + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); + + TRACE_TASK(t, "Freeing lock %d\n", l->ident); + + old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); + binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks); + + if(tsk_rt(t)->inh_task){ + struct task_struct *new_max_eff_prio = + top_priority(&tsk_rt(t)->hp_blocked_tasks); + + if((new_max_eff_prio == NULL) || + /* there was a change in eff prio */ + ( (new_max_eff_prio != old_max_eff_prio) && + /* and owner had the old eff prio */ + (effective_priority(t) == old_max_eff_prio)) ) + { + // old_max_eff_prio > new_max_eff_prio + + if(litmus->__compare(new_max_eff_prio, BASE, t, EFFECTIVE)) { + TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", + new_max_eff_prio->comm, new_max_eff_prio->pid, + t->comm, t->pid, tsk_rt(t)->inh_task->comm, + tsk_rt(t)->inh_task->pid); + WARN_ON(1); + } + + litmus->decrease_prio(t, new_max_eff_prio); + } + } + + if(binheap_empty(&tsk_rt(t)->hp_blocked_tasks) && + tsk_rt(t)->inh_task != NULL) + { + WARN_ON(tsk_rt(t)->inh_task != NULL); + TRACE_TASK(t, "No more locks are held, but eff_prio = %s/%d\n", + tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); + } + + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); + + + /* check if there are jobs waiting for this resource */ +#ifdef CONFIG_LITMUS_DGL_SUPPORT + __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); + if(dgl_wait) { + next = dgl_wait->task; + } +#else + + next = __waitqueue_remove_first(&mutex->wait); +#endif + if (next) { + /* next becomes the resouce holder */ + mutex->owner = next; + TRACE_CUR("lock %d ownership passed to %s/%d\n", l->ident, next->comm, next->pid); + + /* determine new hp_waiter if necessary */ + if (next == mutex->hp_waiter) { + + TRACE_TASK(next, "was highest-prio waiter\n"); + /* next has the highest priority --- it doesn't need to + * inherit. However, we need to make sure that the + * next-highest priority in the queue is reflected in + * hp_waiter. */ + mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, next); + l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? + effective_priority(mutex->hp_waiter) : + NULL; + + if (mutex->hp_waiter) + TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); + else + TRACE("no further waiters\n"); + + raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); + + binheap_add(&l->nest.hp_binheap_node, + &tsk_rt(next)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if(dgl_wait) { + select_next_lock_if_primary(l, dgl_wait); + --(dgl_wait->nr_remaining); + wake_up_task = (dgl_wait->nr_remaining == 0); + } +#endif + + raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); + } + else { + /* Well, if 'next' is not the highest-priority waiter, + * then it (probably) ought to inherit the highest-priority + * waiter's priority. */ + TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); + + raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); + + binheap_add(&l->nest.hp_binheap_node, + &tsk_rt(next)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + + /* It is possible that 'next' *should* be the hp_waiter, but isn't + * because that update hasn't yet executed (update operation is + * probably blocked on mutex->lock). So only inherit if the top of + * 'next's top heap node is indeed the effective prio. of hp_waiter. + * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) + * since the effective priority of hp_waiter can change (and the + * update has not made it to this lock).) + */ +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if((l->nest.hp_waiter_eff_prio != NULL) && + (top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) + { + if(dgl_wait && tsk_rt(next)->blocked_lock) { + if(litmus->__compare(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) { + litmus->nested_increase_prio(next, l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. + goto out; // all spinlocks are released. bail out now. + } + } + else { + litmus->increase_prio(next, l->nest.hp_waiter_eff_prio); + } + } + + raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); +#else + if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) + { + litmus->increase_prio(next, l->nest.hp_waiter_eff_prio); + } + raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); +#endif + } + + if(wake_up_task) { + TRACE_TASK(next, "waking up since it is no longer blocked.\n"); + + tsk_rt(next)->blocked_lock = NULL; + mb(); + +#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) + // re-enable tracking + if(tsk_rt(next)->held_gpus) { + tsk_rt(next)->suspend_gpu_tracker_on_block = 0; + } +#endif + + wake_up_process(next); + } + else { + TRACE_TASK(next, "is still blocked.\n"); + } + } + else { + /* becomes available */ + mutex->owner = NULL; + } + + unlock_fine_irqrestore(&mutex->lock, flags); + +#ifdef CONFIG_LITMUS_DGL_SUPPORT +out: +#endif + unlock_global_irqrestore(dgl_lock, flags); + + TRACE_TASK(t, "-- Freed lock %d --\n", l->ident); + + return err; +} + + +void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l, + struct task_struct* t, + raw_spinlock_t* to_unlock, + unsigned long irqflags) +{ + struct prioq_mutex *mutex = prioq_mutex_from_lock(l); + + // relay-style locking + lock_fine(&mutex->lock); + unlock_fine(to_unlock); + + __prioq_increase_pos(mutex, t); + + if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked + struct task_struct *owner = mutex->owner; + + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if((t != mutex->hp_waiter) && litmus->compare(t, mutex->hp_waiter)) { + TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); + mutex->hp_waiter = t; + + TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); + __dump_lock_info(mutex); + } + else { + TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); + __dump_lock_info(mutex); + } + + if(t == mutex->hp_waiter) { + // reflect the decreased priority in the heap node. + l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); + + BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); + BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node, + &tsk_rt(owner)->hp_blocked_tasks)); + + binheap_decrease(&l->nest.hp_binheap_node, + &tsk_rt(owner)->hp_blocked_tasks); + } + + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + + if(new_max_eff_prio != old_max_eff_prio) { + // new_max_eff_prio > old_max_eff_prio holds. + if ((effective_priority(owner) == old_max_eff_prio) || + (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))) { + TRACE_CUR("Propagating inheritance to holder of lock %d.\n", + l->ident); + + // beware: recursion + litmus->nested_increase_prio(owner, new_max_eff_prio, + &mutex->lock, irqflags); // unlocks mutex->lock + } + else { + TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", + owner->comm, owner->pid); + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + struct litmus_lock *still_blocked; + + TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); + __dump_lock_info(mutex); + + still_blocked = tsk_rt(t)->blocked_lock; + + TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); + if(still_blocked) { + TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", + still_blocked->ident); + if(still_blocked->ops->propagate_increase_inheritance) { + /* due to relay-style nesting of spinlocks (acq. A, acq. B, free A, free B) + we know that task 't' has not released any locks behind us in this + chain. Propagation just needs to catch up with task 't'. */ + still_blocked->ops->propagate_increase_inheritance(still_blocked, + t, + &mutex->lock, + irqflags); + } + else { + TRACE_TASK(t, + "Inheritor is blocked on lock (%p) that does not " + "support nesting!\n", + still_blocked); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } +} + + +void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, + struct task_struct* t, + raw_spinlock_t* to_unlock, + unsigned long irqflags) +{ + struct prioq_mutex *mutex = prioq_mutex_from_lock(l); + + // relay-style locking + lock_fine(&mutex->lock); + unlock_fine(to_unlock); + + __prioq_decrease_pos(mutex, t); + + if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked + if(t == mutex->hp_waiter) { + struct task_struct *owner = mutex->owner; + + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); + mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, NULL); + + TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); + __dump_lock_info(mutex); + + l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? + effective_priority(mutex->hp_waiter) : NULL; + binheap_add(&l->nest.hp_binheap_node, + &tsk_rt(owner)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if((old_max_eff_prio != new_max_eff_prio) && + (effective_priority(owner) == old_max_eff_prio)) + { + // Need to set new effective_priority for owner + + struct task_struct *decreased_prio; + + TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", + l->ident); + + if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) { + TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", + (new_max_eff_prio) ? new_max_eff_prio->comm : "null", + (new_max_eff_prio) ? new_max_eff_prio->pid : 0, + owner->comm, + owner->pid, + l->ident); + + decreased_prio = new_max_eff_prio; + } + else { + TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n", + (new_max_eff_prio) ? new_max_eff_prio->comm : "null", + (new_max_eff_prio) ? new_max_eff_prio->pid : 0, + owner->comm, + owner->pid, + l->ident); + + decreased_prio = NULL; + } + + // beware: recursion + litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock + } + else { + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); + __dump_lock_info(mutex); + + TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + struct litmus_lock *still_blocked; + + TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); + __dump_lock_info(mutex); + + still_blocked = tsk_rt(t)->blocked_lock; + + TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); + if(still_blocked) { + TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", + still_blocked->ident); + if(still_blocked->ops->propagate_decrease_inheritance) { + /* due to linked nesting of spinlocks (acq. A, acq. B, free A, free B) + we know that task 't' has not released any locks behind us in this + chain. propagation just needs to catch up with task 't' */ + still_blocked->ops->propagate_decrease_inheritance(still_blocked, + t, + &mutex->lock, + irqflags); + } + else { + TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", + still_blocked); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } +} + + +int prioq_mutex_close(struct litmus_lock* l) +{ + struct task_struct *t = current; + struct prioq_mutex *mutex = prioq_mutex_from_lock(l); + unsigned long flags; + + int owner; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); +#endif + + lock_global_irqsave(dgl_lock, flags); + lock_fine_irqsave(&mutex->lock, flags); + + owner = (mutex->owner == t); + + unlock_fine_irqrestore(&mutex->lock, flags); + unlock_global_irqrestore(dgl_lock, flags); + + /* + TODO: Currently panic. FIX THIS! + if (owner) + prioq_mutex_unlock(l); + */ + + return 0; +} + +void prioq_mutex_free(struct litmus_lock* lock) +{ + kfree(prioq_mutex_from_lock(lock)); +} + + +/* The following may race if DGLs are enabled. Only examine /proc if things + appear to be locked up. TODO: FIX THIS! Must find an elegant way to transmit + DGL lock to function. */ +static int prioq_proc_print(char *page, char **start, off_t off, int count, int *eof, void *data) +{ + struct prioq_mutex *mutex = prioq_mutex_from_lock((struct litmus_lock*)data); + + int attempts = 0; + const int max_attempts = 10; + int locked = 0; + unsigned long flags; + + int size = count; + char *next = page; + int w; + + while(attempts < max_attempts) + { + locked = raw_spin_trylock_irqsave(&mutex->lock, flags); + + if (unlikely(!locked)) { + ++attempts; + cpu_relax(); + } + else { + break; + } + } + + if (locked) { + w = scnprintf(next, size, "%s (mutex: %p, data: %p):\n", mutex->litmus_lock.name, mutex, data); + size -= w; + next += w; + + w = scnprintf(next, size, + "owner: %s/%d (inh: %s/%d)\n", + (mutex->owner) ? + mutex->owner->comm : "null", + (mutex->owner) ? + mutex->owner->pid : 0, + (mutex->owner && tsk_rt(mutex->owner)->inh_task) ? + tsk_rt(mutex->owner)->inh_task->comm : "null", + (mutex->owner && tsk_rt(mutex->owner)->inh_task) ? + tsk_rt(mutex->owner)->inh_task->pid : 0); + size -= w; + next += w; + + w = scnprintf(next, size, + "hp waiter: %s/%d (inh: %s/%d)\n", + (mutex->hp_waiter) ? + mutex->hp_waiter->comm : "null", + (mutex->hp_waiter) ? + mutex->hp_waiter->pid : 0, + (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ? + tsk_rt(mutex->hp_waiter)->inh_task->comm : "null", + (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ? + tsk_rt(mutex->hp_waiter)->inh_task->pid : 0); + size -= w; + next += w; + + w = scnprintf(next, size, "\nblocked tasks, front to back:\n"); + size -= w; + next += w; + + if (waitqueue_active(&mutex->wait)) { + wait_queue_t *q; + struct list_head *pos; +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_wait_state_t *dgl_wait = NULL; +#endif + list_for_each(pos, &mutex->wait.task_list) { + struct task_struct *blocked_task; +#ifdef CONFIG_LITMUS_DGL_SUPPORT + int enabled = 1; +#endif + q = list_entry(pos, wait_queue_t, task_list); + + blocked_task = get_queued_task(q); +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if(q->func == dgl_wake_up) { + dgl_wait = (dgl_wait_state_t*) q->private; + blocked_task = dgl_wait->task; + + if(tsk_rt(blocked_task)->blocked_lock != &mutex->litmus_lock) + enabled = 0; + } + else { + blocked_task = (struct task_struct*) q->private; + } +#else + blocked_task = (struct task_struct*) q->private; +#endif + + w = scnprintf(next, size, + "\t%s/%d (inh: %s/%d)" +#ifdef CONFIG_LITMUS_DGL_SUPPORT + " DGL enabled: %d" +#endif + "\n", + blocked_task->comm, blocked_task->pid, + (tsk_rt(blocked_task)->inh_task) ? + tsk_rt(blocked_task)->inh_task->comm : "null", + (tsk_rt(blocked_task)->inh_task) ? + tsk_rt(blocked_task)->inh_task->pid : 0 +#ifdef CONFIG_LITMUS_DGL_SUPPORT + , enabled +#endif + ); + size -= w; + next += w; + } + } + else { + w = scnprintf(next, size, "\t\n"); + size -= w; + next += w; + } + + raw_spin_unlock_irqrestore(&mutex->lock, flags); + } + else { + w = scnprintf(next, size, "%s is busy.\n", mutex->litmus_lock.name); + size -= w; + next += w; + } + + return count - size; +} + +static void prioq_proc_add(struct litmus_lock* l) +{ + snprintf(l->name, LOCK_NAME_LEN, "prioq-%d", l->ident); + + l->proc_entry = litmus_add_proc_lock(l, prioq_proc_print); +} + +static void prioq_proc_remove(struct litmus_lock* l) +{ + litmus_remove_proc_lock(l); +} + +static struct litmus_lock_proc_ops prioq_proc_ops = +{ + .add = prioq_proc_add, + .remove = prioq_proc_remove +}; + + +struct litmus_lock* prioq_mutex_new(struct litmus_lock_ops* ops) +{ + struct prioq_mutex* mutex; + + mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); + if (!mutex) + return NULL; + memset(mutex, 0, sizeof(*mutex)); + + mutex->litmus_lock.ops = ops; + mutex->owner = NULL; + mutex->hp_waiter = NULL; + init_waitqueue_head(&mutex->wait); + + +#ifdef CONFIG_DEBUG_SPINLOCK + { + __raw_spin_lock_init(&mutex->lock, + ((struct litmus_lock*)mutex)->cheat_lockdep, + &((struct litmus_lock*)mutex)->key); + } +#else + raw_spin_lock_init(&mutex->lock); +#endif + + ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; + ((struct litmus_lock*)mutex)->nest.owner_ptr = &mutex->owner; + + ((struct litmus_lock*)mutex)->proc = &prioq_proc_ops; + + return &mutex->litmus_lock; +} + -- cgit v1.2.2