From c0667dc4894e913048cf8904f0ce9a79b481b556 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Fri, 13 Apr 2012 16:18:03 -0400 Subject: Move RSM and IKGLP imp. to own .c files Also reformated code to be slightly more standard coding practice compliant. --- litmus/Makefile | 2 + litmus/edf_common.c | 16 +- litmus/ikglp_lock.c | 1587 +++++++++++++++++++++++++++ litmus/litmus.c | 2 + litmus/locking.c | 276 ++--- litmus/rsm_lock.c | 774 +++++++++++++ litmus/sched_gsn_edf.c | 2851 ++++-------------------------------------------- litmus/sched_plugin.c | 34 +- 8 files changed, 2763 insertions(+), 2779 deletions(-) create mode 100644 litmus/ikglp_lock.c create mode 100644 litmus/rsm_lock.c (limited to 'litmus') diff --git a/litmus/Makefile b/litmus/Makefile index b05f7982d823..c2449a761ea4 100644 --- a/litmus/Makefile +++ b/litmus/Makefile @@ -28,3 +28,5 @@ obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o + +obj-$(CONFIG_LITMUS_NESTED_LOCKING) += rsm_lock.o ikglp_lock.o diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 5ea6b1bc7f24..4b65be7302be 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c @@ -60,7 +60,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) first_task = first->rt_param.inh_task; } if (unlikely(second->rt_param.inh_task) -#ifdef CONFIG_LITMUS_NESTED_LOCKING +#ifdef CONFIG_LITMUS_NESTED_LOCKING && (second_mode == EFFECTIVE) #endif ) { @@ -85,24 +85,24 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) // // rate-monotonic for testing // return !is_realtime(second_task) || -// +// // /* is the deadline of the first task earlier? // * Then it has higher priority. // */ // shorter_period(first_task, second_task) || -// +// // /* Do we have a deadline tie? // * Then break by PID. // */ // (get_period(first_task) == get_period(second_task) && // (first_task->pid < second_task->pid || -// +// // /* If the PIDs are the same then the task with the EFFECTIVE // * priority wins. // */ // (first_task->pid == second_task->pid && -// !second->rt_param.inh_task))); - +// !second->rt_param.inh_task))); + return !is_realtime(second_task) || /* is the deadline of the first task earlier? @@ -134,7 +134,7 @@ int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b) { struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node); struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node); - + return __edf_higher_prio(l_a->hp_waiter_eff_prio, EFFECTIVE, l_b->hp_waiter_eff_prio, EFFECTIVE); } @@ -147,7 +147,7 @@ int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node { struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node); struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node); - + return __edf_higher_prio(l_a->hp_waiter_eff_prio, BASE, l_b->hp_waiter_eff_prio, BASE); } diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c new file mode 100644 index 000000000000..0ae9994111fb --- /dev/null +++ b/litmus/ikglp_lock.c @@ -0,0 +1,1587 @@ +#include +#include + +#include +#include +#include + +#include + +int ikglp_edf_max_heap_base_priority_order(struct binheap_node *a, + struct binheap_node *b) +{ + ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); + ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); + + BUG_ON(!d_a); + BUG_ON(!d_b); + + return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); +} + +int ikglp_edf_min_heap_base_priority_order(struct binheap_node *a, + struct binheap_node *b) +{ + ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); + ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); + + return __edf_higher_prio(d_b->task, BASE, d_a->task, BASE); +} + +int ikglp_donor_edf_max_heap_base_priority_order(struct binheap_node *a, + struct binheap_node *b) +{ + ikglp_wait_state_t *d_a = binheap_entry(a, ikglp_wait_state_t, node); + ikglp_wait_state_t *d_b = binheap_entry(b, ikglp_wait_state_t, node); + + return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); +} + + +int ikglp_edf_min_heap_donee_order(struct binheap_node *a, + struct binheap_node *b) +{ + struct task_struct *prio_a, *prio_b; + + ikglp_donee_heap_node_t *d_a = + binheap_entry(a, ikglp_donee_heap_node_t, node); + ikglp_donee_heap_node_t *d_b = + binheap_entry(b, ikglp_donee_heap_node_t, node); + + if(!d_a->donor_info) { + prio_a = d_a->task; + } + else { + prio_a = d_a->donor_info->task; + BUG_ON(d_a->task != d_a->donor_info->donee_info->task); + } + + if(!d_b->donor_info) { + prio_b = d_b->task; + } + else { + prio_b = d_b->donor_info->task; + BUG_ON(d_b->task != d_b->donor_info->donee_info->task); + } + + // note reversed order + return __edf_higher_prio(prio_b, BASE, prio_a, BASE); +} + + + +static inline int ikglp_get_idx(struct ikglp_semaphore *sem, + struct fifo_queue *queue) +{ + return (queue - &sem->fifo_queues[0]); +} + +static inline struct fifo_queue* ikglp_get_queue(struct ikglp_semaphore *sem, + struct task_struct *holder) +{ + int i; + for(i = 0; i < sem->nr_replicas; ++i) + if(sem->fifo_queues[i].owner == holder) + return(&sem->fifo_queues[i]); + return(NULL); +} + + + +static struct task_struct* ikglp_find_hp_waiter(struct fifo_queue *kqueue, + struct task_struct *skip) +{ + struct list_head *pos; + struct task_struct *queued, *found = NULL; + + list_for_each(pos, &kqueue->wait.task_list) { + queued = (struct task_struct*) list_entry(pos, + wait_queue_t, task_list)->private; + + /* Compare task prios, find high prio task. */ + if (queued != skip && edf_higher_prio(queued, found)) + found = queued; + } + return found; +} + +static struct fifo_queue* ikglp_find_shortest(struct ikglp_semaphore *sem, + struct fifo_queue *search_start) +{ + // we start our search at search_start instead of at the beginning of the + // queue list to load-balance across all resources. + struct fifo_queue* step = search_start; + struct fifo_queue* shortest = sem->shortest_fifo_queue; + + do { + step = (step+1 != &sem->fifo_queues[sem->nr_replicas]) ? + step+1 : &sem->fifo_queues[0]; + + if(step->count < shortest->count) { + shortest = step; + if(step->count == 0) + break; /* can't get any shorter */ + } + + }while(step != search_start); + + return(shortest); +} + +static inline struct task_struct* ikglp_mth_highest(struct ikglp_semaphore *sem) +{ + return binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node)->task; +} + + + +#if 0 +static void print_global_list(struct binheap_node* n, int depth) +{ + ikglp_heap_node_t *global_heap_node; + char padding[81] = " "; + + if(n == NULL) { + TRACE_CUR("+-> %p\n", NULL); + return; + } + + global_heap_node = binheap_entry(n, ikglp_heap_node_t, node); + + if(depth*2 <= 80) + padding[depth*2] = '\0'; + + TRACE_CUR("%s+-> %s/%d\n", + padding, + global_heap_node->task->comm, + global_heap_node->task->pid); + + if(n->left) print_global_list(n->left, depth+1); + if(n->right) print_global_list(n->right, depth+1); +} + +static void print_donees(struct ikglp_semaphore *sem, struct binheap_node *n, int depth) +{ + ikglp_donee_heap_node_t *donee_node; + char padding[81] = " "; + struct task_struct* donor = NULL; + + if(n == NULL) { + TRACE_CUR("+-> %p\n", NULL); + return; + } + + donee_node = binheap_entry(n, ikglp_donee_heap_node_t, node); + + if(depth*2 <= 80) + padding[depth*2] = '\0'; + + if(donee_node->donor_info) { + donor = donee_node->donor_info->task; + } + + TRACE_CUR("%s+-> %s/%d (d: %s/%d) (fq: %d)\n", + padding, + donee_node->task->comm, + donee_node->task->pid, + (donor) ? donor->comm : "nil", + (donor) ? donor->pid : -1, + ikglp_get_idx(sem, donee_node->fq)); + + if(n->left) print_donees(sem, n->left, depth+1); + if(n->right) print_donees(sem, n->right, depth+1); +} + +static void print_donors(struct binheap_node *n, int depth) +{ + ikglp_wait_state_t *donor_node; + char padding[81] = " "; + + if(n == NULL) { + TRACE_CUR("+-> %p\n", NULL); + return; + } + + donor_node = binheap_entry(n, ikglp_wait_state_t, node); + + if(depth*2 <= 80) + padding[depth*2] = '\0'; + + + TRACE_CUR("%s+-> %s/%d (donee: %s/%d)\n", + padding, + donor_node->task->comm, + donor_node->task->pid, + donor_node->donee_info->task->comm, + donor_node->donee_info->task->pid); + + if(n->left) print_donors(n->left, depth+1); + if(n->right) print_donors(n->right, depth+1); +} +#endif + +static void ikglp_add_global_list(struct ikglp_semaphore *sem, + struct task_struct *t, + ikglp_heap_node_t *node) +{ + + + node->task = t; + INIT_BINHEAP_NODE(&node->node); + + if(sem->top_m_size < sem->m) { + TRACE_CUR("Trivially adding %s/%d to top-m global list.\n", + t->comm, t->pid); +// TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); +// print_global_list(sem->top_m.root, 1); + + binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); + ++(sem->top_m_size); + +// TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); +// print_global_list(sem->top_m.root, 1); + } + else if(__edf_higher_prio(t, BASE, ikglp_mth_highest(sem), BASE)) { + ikglp_heap_node_t *evicted = + binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node); + + TRACE_CUR("Adding %s/%d to top-m and evicting %s/%d.\n", + t->comm, t->pid, + evicted->task->comm, evicted->task->pid); + +// TRACE_CUR("Not-Top-M Before:\n"); +// print_global_list(sem->not_top_m.root, 1); +// TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); +// print_global_list(sem->top_m.root, 1); + + + binheap_delete_root(&sem->top_m, ikglp_heap_node_t, node); + INIT_BINHEAP_NODE(&evicted->node); + binheap_add(&evicted->node, &sem->not_top_m, ikglp_heap_node_t, node); + + binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); + +// TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); +// print_global_list(sem->top_m.root, 1); +// TRACE_CUR("Not-Top-M After:\n"); +// print_global_list(sem->not_top_m.root, 1); + } + else { + TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n", + t->comm, t->pid); +// TRACE_CUR("Not-Top-M Before:\n"); +// print_global_list(sem->not_top_m.root, 1); + + binheap_add(&node->node, &sem->not_top_m, ikglp_heap_node_t, node); + +// TRACE_CUR("Not-Top-M After:\n"); +// print_global_list(sem->not_top_m.root, 1); + } +} + + +static void ikglp_del_global_list(struct ikglp_semaphore *sem, + struct task_struct *t, + ikglp_heap_node_t *node) +{ + BUG_ON(!binheap_is_in_heap(&node->node)); + + TRACE_CUR("Removing %s/%d from global list.\n", t->comm, t->pid); + + if(binheap_is_in_this_heap(&node->node, &sem->top_m)) { + TRACE_CUR("%s/%d is in top-m\n", t->comm, t->pid); + +// TRACE_CUR("Not-Top-M Before:\n"); +// print_global_list(sem->not_top_m.root, 1); +// TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); +// print_global_list(sem->top_m.root, 1); + + + binheap_delete(&node->node, &sem->top_m); + + if(!binheap_empty(&sem->not_top_m)) { + ikglp_heap_node_t *promoted = + binheap_top_entry(&sem->not_top_m, ikglp_heap_node_t, node); + + TRACE_CUR("Promoting %s/%d to top-m\n", + promoted->task->comm, promoted->task->pid); + + binheap_delete_root(&sem->not_top_m, ikglp_heap_node_t, node); + INIT_BINHEAP_NODE(&promoted->node); + + binheap_add(&promoted->node, &sem->top_m, ikglp_heap_node_t, node); + } + else { + TRACE_CUR("No one to promote to top-m.\n"); + --(sem->top_m_size); + } + +// TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); +// print_global_list(sem->top_m.root, 1); +// TRACE_CUR("Not-Top-M After:\n"); +// print_global_list(sem->not_top_m.root, 1); + } + else { +// TRACE_CUR("%s/%d is in not-top-m\n", t->comm, t->pid); +// TRACE_CUR("Not-Top-M Before:\n"); +// print_global_list(sem->not_top_m.root, 1); + + binheap_delete(&node->node, &sem->not_top_m); + +// TRACE_CUR("Not-Top-M After:\n"); +// print_global_list(sem->not_top_m.root, 1); + } +} + + +static void ikglp_add_donees(struct ikglp_semaphore *sem, + struct fifo_queue *fq, + struct task_struct *t, + ikglp_donee_heap_node_t* node) +{ +// TRACE_CUR("Adding %s/%d to donee list.\n", t->comm, t->pid); +// TRACE_CUR("donees Before:\n"); +// print_donees(sem, sem->donees.root, 1); + + node->task = t; + node->donor_info = NULL; + node->fq = fq; + INIT_BINHEAP_NODE(&node->node); + + binheap_add(&node->node, &sem->donees, ikglp_donee_heap_node_t, node); + +// TRACE_CUR("donees After:\n"); +// print_donees(sem, sem->donees.root, 1); +} + + +static void ikglp_refresh_owners_prio_increase(struct task_struct *t, + struct fifo_queue *fq, + struct ikglp_semaphore *sem, + unsigned long flags) +{ + // priority of 't' has increased (note: 't' might already be hp_waiter). + if ((t == fq->hp_waiter) || edf_higher_prio(t, fq->hp_waiter)) { + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + struct task_struct *new_prio = NULL; + struct task_struct *owner = fq->owner; + + if(fq->hp_waiter) + TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", + fq->hp_waiter->comm, fq->hp_waiter->pid); + else + TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); + + if(owner) + { + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + +// TRACE_TASK(owner, "Heap Before:\n"); +// print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + fq->hp_waiter = t; + fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); + + binheap_decrease(&fq->nest.hp_binheap_node, + &tsk_rt(owner)->hp_blocked_tasks); + +// TRACE_TASK(owner, "Heap After:\n"); +// print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); + + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if(new_max_eff_prio != old_max_eff_prio) { + TRACE_TASK(t, "is new hp_waiter.\n"); + + if ((effective_priority(owner) == old_max_eff_prio) || + (__edf_higher_prio(new_max_eff_prio, BASE, + owner, EFFECTIVE))){ + new_prio = new_max_eff_prio; + } + } + else { + TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); + } + + if(new_prio) { + // set new inheritance and propagate + TRACE_TASK(t, "Effective priority changed for owner %s/%d to %s/%d\n", + owner->comm, owner->pid, + new_prio->comm, new_prio->pid); + litmus->nested_increase_prio(owner, new_prio, &sem->lock, + flags); // unlocks lock. + } + else { + TRACE_TASK(t, "No change in effective priority (is %s/%d). Propagation halted.\n", + new_max_eff_prio->comm, new_max_eff_prio->pid); + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&sem->lock, flags); + } + } + else { + fq->hp_waiter = t; + fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); + + TRACE_TASK(t, "no owner??\n"); + unlock_fine_irqrestore(&sem->lock, flags); + } + } + else { + TRACE_TASK(t, "hp_waiter is unaffected.\n"); + unlock_fine_irqrestore(&sem->lock, flags); + } +} + +// hp_waiter has decreased +static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, + struct ikglp_semaphore *sem, + unsigned long flags) +{ + struct task_struct *owner = fq->owner; + + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + + if(!owner) { + TRACE_CUR("No owner. Returning.\n"); + unlock_fine_irqrestore(&sem->lock, flags); + return; + } + + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + binheap_delete(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); + fq->nest.hp_waiter_eff_prio = fq->hp_waiter; + binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if((old_max_eff_prio != new_max_eff_prio) && + (effective_priority(owner) == old_max_eff_prio)) + { + // Need to set new effective_priority for owner + struct task_struct *decreased_prio; + + TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", + ikglp_get_idx(sem, fq)); + + if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { + TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of fq %d.\n", + (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", + (new_max_eff_prio) ? new_max_eff_prio->pid : -1, + owner->comm, + owner->pid, + ikglp_get_idx(sem, fq)); + + decreased_prio = new_max_eff_prio; + } + else { + TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of fq %d.\n", + (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", + (new_max_eff_prio) ? new_max_eff_prio->pid : -1, + owner->comm, + owner->pid, + ikglp_get_idx(sem, fq)); + + decreased_prio = NULL; + } + + // beware: recursion + litmus->nested_decrease_prio(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock + } + else { + TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&sem->lock, flags); + } +} + + +static void ikglp_remove_donation_from_owner(struct binheap_node *n, + struct fifo_queue *fq, + struct ikglp_semaphore *sem, + unsigned long flags) +{ + struct task_struct *owner = fq->owner; + + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + + BUG_ON(!owner); + + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + binheap_delete(n, &tsk_rt(owner)->hp_blocked_tasks); + + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if((old_max_eff_prio != new_max_eff_prio) && + (effective_priority(owner) == old_max_eff_prio)) + { + // Need to set new effective_priority for owner + struct task_struct *decreased_prio; + + TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", + ikglp_get_idx(sem, fq)); + + if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { + TRACE_CUR("has greater base priority than base priority of owner of fq %d.\n", + ikglp_get_idx(sem, fq)); + decreased_prio = new_max_eff_prio; + } + else { + TRACE_CUR("has lesser base priority than base priority of owner of fq %d.\n", + ikglp_get_idx(sem, fq)); + decreased_prio = NULL; + } + + // beware: recursion + litmus->nested_decrease_prio(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock + } + else { + TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&sem->lock, flags); + } +} + +static void ikglp_remove_donation_from_fq_waiter(struct task_struct *t, + struct binheap_node *n) +{ + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); + + binheap_delete(n, &tsk_rt(t)->hp_blocked_tasks); + + new_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); + + if((old_max_eff_prio != new_max_eff_prio) && + (effective_priority(t) == old_max_eff_prio)) + { + // Need to set new effective_priority for owner + struct task_struct *decreased_prio; + + if(__edf_higher_prio(new_max_eff_prio, BASE, t, BASE)) { + decreased_prio = new_max_eff_prio; + } + else { + decreased_prio = NULL; + } + + tsk_rt(t)->inh_task = decreased_prio; + } + + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); +} + +static void ikglp_get_immediate(struct task_struct* t, + struct fifo_queue *fq, + struct ikglp_semaphore *sem, + unsigned long flags) +{ + // resource available now + TRACE_CUR("queue %d: acquired immediately\n", ikglp_get_idx(sem, fq)); + + fq->owner = t; + + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); + binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); + + ++(fq->count); + + ikglp_add_global_list(sem, t, &fq->global_heap_node); + ikglp_add_donees(sem, fq, t, &fq->donee_heap_node); + + sem->shortest_fifo_queue = ikglp_find_shortest(sem, sem->shortest_fifo_queue); + + unlock_fine_irqrestore(&sem->lock, flags); +} + + + + + +static void __ikglp_enqueue_on_fq(struct ikglp_semaphore *sem, + struct fifo_queue* fq, + struct task_struct* t, + wait_queue_t *wait, + ikglp_heap_node_t *global_heap_node, + ikglp_donee_heap_node_t *donee_heap_node) +{ + /* resource is not free => must suspend and wait */ + TRACE_TASK(t, "Enqueuing on fq %d.\n", + ikglp_get_idx(sem, fq)); + + init_waitqueue_entry(wait, t); + + __add_wait_queue_tail_exclusive(&fq->wait, wait); + + ++(fq->count); + + // update global list. + if(likely(global_heap_node)) { + if(binheap_is_in_heap(&global_heap_node->node)) { + WARN_ON(1); + ikglp_del_global_list(sem, t, global_heap_node); + } + ikglp_add_global_list(sem, t, global_heap_node); + } + // update donor eligiblity list. + if(likely(donee_heap_node)) { +// if(binheap_is_in_heap(&donee_heap_node->node)) { +// WARN_ON(1); +// } + ikglp_add_donees(sem, fq, t, donee_heap_node); + } + + if(likely(sem->shortest_fifo_queue == fq)) { + sem->shortest_fifo_queue = ikglp_find_shortest(sem, fq); + } + + TRACE_TASK(t, "shortest queue is now %d\n", ikglp_get_idx(sem, fq)); +} + + +static void ikglp_enqueue_on_fq( + struct ikglp_semaphore *sem, + struct fifo_queue *fq, + ikglp_wait_state_t *wait, + unsigned long flags) +{ + /* resource is not free => must suspend and wait */ + TRACE_TASK(wait->task, "queue %d: Resource is not free => must suspend and wait.\n", + ikglp_get_idx(sem, fq)); + + INIT_BINHEAP_NODE(&wait->global_heap_node.node); + INIT_BINHEAP_NODE(&wait->donee_heap_node.node); + + __ikglp_enqueue_on_fq(sem, fq, wait->task, &wait->fq_node, + &wait->global_heap_node, &wait->donee_heap_node); + + ikglp_refresh_owners_prio_increase(wait->task, fq, sem, flags); // unlocks sem->lock +} + + +static void __ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, + ikglp_wait_state_t *wait) +{ + TRACE_TASK(wait->task, "goes to PQ.\n"); + + wait->pq_node.task = wait->task; // copy over task (little redundant...) + + binheap_add(&wait->pq_node.node, &sem->priority_queue, + ikglp_heap_node_t, node); +} + +static void ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, + ikglp_wait_state_t *wait) +{ + INIT_BINHEAP_NODE(&wait->global_heap_node.node); + INIT_BINHEAP_NODE(&wait->donee_heap_node.node); + INIT_BINHEAP_NODE(&wait->pq_node.node); + + __ikglp_enqueue_on_pq(sem, wait); +} + +static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, + ikglp_wait_state_t* wait, + unsigned long flags) +{ + struct task_struct *t = wait->task; + ikglp_donee_heap_node_t *donee_node = NULL; + struct task_struct *donee; + + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + struct task_struct *new_prio = NULL; + + INIT_BINHEAP_NODE(&wait->global_heap_node.node); + INIT_BINHEAP_NODE(&wait->donee_heap_node.node); + INIT_BINHEAP_NODE(&wait->pq_node.node); + INIT_BINHEAP_NODE(&wait->node); + +// TRACE_CUR("Adding %s/%d as donor.\n", t->comm, t->pid); +// TRACE_CUR("donors Before:\n"); +// print_donors(sem->donors.root, 1); + + // Add donor to the global list. + ikglp_add_global_list(sem, t, &wait->global_heap_node); + + // Select a donee + donee_node = binheap_top_entry(&sem->donees, + ikglp_donee_heap_node_t, node); + donee = donee_node->task; + + TRACE_TASK(t, "Donee selected: %s/%d\n", donee->comm, donee->pid); + + TRACE_CUR("Temporarily removing %s/%d to donee list.\n", + donee->comm, donee->pid); +// TRACE_CUR("donees Before:\n"); +// print_donees(sem, sem->donees.root, 1); + + binheap_delete_root(&sem->donees, ikglp_donee_heap_node_t, node); // will re-add it shortly + +// TRACE_CUR("donees After:\n"); +// print_donees(sem, sem->donees.root, 1); + + + wait->donee_info = donee_node; + + // Add t to donor heap. + binheap_add(&wait->node, &sem->donors, ikglp_wait_state_t, node); + + // Now adjust the donee's priority. + + // Lock the donee's inheritance heap. + raw_spin_lock(&tsk_rt(donee)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks); + + if(donee_node->donor_info) { + // Steal donation relation. Evict old donor to PQ. + + // Remove old donor from donor heap + ikglp_wait_state_t *old_wait = donee_node->donor_info; + struct task_struct *old_donor = old_wait->task; + + TRACE_TASK(t, "Donee (%s/%d) had donor %s/%d. Moving old donor to PQ.\n", + donee->comm, donee->pid, old_donor->comm, old_donor->pid); + + binheap_delete(&old_wait->node, &sem->donors); + + // Remove donation from donee's inheritance heap. + binheap_delete(&old_wait->prio_donation.hp_binheap_node, + &tsk_rt(donee)->hp_blocked_tasks); + // WARNING: have not updated inh_prio! + + // Add old donor to PQ. + __ikglp_enqueue_on_pq(sem, old_wait); + + // Remove old donor from the global heap. + ikglp_del_global_list(sem, old_donor, &old_wait->global_heap_node); + } + + // Add back donee's node to the donees heap with increased prio + donee_node->donor_info = wait; + INIT_BINHEAP_NODE(&donee_node->node); + + + TRACE_CUR("Adding %s/%d back to donee list.\n", donee->comm, donee->pid); +// TRACE_CUR("donees Before:\n"); +// print_donees(sem, sem->donees.root, 1); + + binheap_add(&donee_node->node, &sem->donees, ikglp_donee_heap_node_t, node); + +// TRACE_CUR("donees After:\n"); +// print_donees(sem, sem->donees.root, 1); + + // Add an inheritance/donation to the donee's inheritance heap. + wait->prio_donation.lock = (struct litmus_lock*)sem; + wait->prio_donation.hp_waiter_eff_prio = t; + wait->prio_donation.hp_waiter_ptr = NULL; + INIT_BINHEAP_NODE(&wait->prio_donation.hp_binheap_node); + + binheap_add(&wait->prio_donation.hp_binheap_node, + &tsk_rt(donee)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + + new_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks); + + if(new_max_eff_prio != old_max_eff_prio) { + if ((effective_priority(donee) == old_max_eff_prio) || + (__edf_higher_prio(new_max_eff_prio, BASE, donee, EFFECTIVE))){ + TRACE_TASK(t, "Donation increases %s/%d's effective priority\n", + donee->comm, donee->pid); + new_prio = new_max_eff_prio; + } +// else { +// // should be bug. donor would not be in top-m. +// TRACE_TASK(t, "Donation is not greater than base prio of %s/%d?\n", donee->comm, donee->pid); +// WARN_ON(1); +// } +// } +// else { +// // should be bug. donor would not be in top-m. +// TRACE_TASK(t, "No change in %s/%d's inheritance heap?\n", donee->comm, donee->pid); +// WARN_ON(1); + } + + if(new_prio) { + struct fifo_queue *donee_fq = donee_node->fq; + + if(donee != donee_fq->owner) { + TRACE_TASK(t, "%s/%d is not the owner. Propagating priority to owner %s/%d.\n", + donee->comm, donee->pid, + donee_fq->owner->comm, donee_fq->owner->pid); + + raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); + ikglp_refresh_owners_prio_increase(donee, donee_fq, sem, flags); // unlocks sem->lock + } + else { + TRACE_TASK(t, "%s/%d is the owner. Progatating priority immediatly.\n", + donee->comm, donee->pid); + litmus->nested_increase_prio(donee, new_prio, &sem->lock, flags); // unlocks sem->lock and donee's heap lock + } + } + else { + TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", + new_max_eff_prio->comm, new_max_eff_prio->pid); + raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&sem->lock, flags); + } + + +// TRACE_CUR("donors After:\n"); +// print_donors(sem->donors.root, 1); +} + + +int ikglp_lock(struct litmus_lock* l) +{ + struct task_struct* t = current; + struct ikglp_semaphore *sem = ikglp_from_lock(l); + unsigned long flags = 0, real_flags; + struct fifo_queue *fq = NULL; + int replica = -EINVAL; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + raw_spinlock_t *dgl_lock; +#endif + + ikglp_wait_state_t wait; + + if (!is_realtime(t)) + return -EPERM; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_lock = litmus->get_dgl_spinlock(t); +#endif + + raw_spin_lock_irqsave(&sem->real_lock, real_flags); + + lock_global_irqsave(dgl_lock, flags); + lock_fine_irqsave(&sem->lock, flags); + + if(sem->shortest_fifo_queue->count == 0) { + // take available resource + replica = ikglp_get_idx(sem, sem->shortest_fifo_queue); + + ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock + + unlock_global_irqrestore(dgl_lock, flags); + raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); + } + else + { + // we have to suspend. + + wait.task = t; // THIS IS CRITICALLY IMPORTANT!!! + + tsk_rt(t)->blocked_lock = (struct litmus_lock*)sem; // record where we are blocked + mb(); + + /* FIXME: interruptible would be nice some day */ + set_task_state(t, TASK_UNINTERRUPTIBLE); + + if(sem->shortest_fifo_queue->count < sem->max_fifo_len) { + // enqueue on fq + ikglp_enqueue_on_fq(sem, sem->shortest_fifo_queue, &wait, flags); // unlocks sem->lock + } + else { + + TRACE_CUR("IKGLP fifo queues are full.\n"); + + // no room in fifos. Go to PQ or donors. + + if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { + // enqueue on PQ + ikglp_enqueue_on_pq(sem, &wait); + unlock_fine_irqrestore(&sem->lock, flags); + } + else { + // enqueue as donor + ikglp_enqueue_on_donor(sem, &wait, flags); // unlocks sem->lock + } + } + + unlock_global_irqrestore(dgl_lock, flags); + raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); + + TS_LOCK_SUSPEND; + + schedule(); + + TS_LOCK_RESUME; + + fq = ikglp_get_queue(sem, t); + BUG_ON(!fq); + + replica = ikglp_get_idx(sem, fq); + } + + TRACE_CUR("Acquired lock %d, queue %d\n", + l->ident, replica); + + return replica; +} + +static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, + struct fifo_queue *fq, + ikglp_wait_state_t *donor_info) +{ + struct task_struct *t = donor_info->task; + + TRACE_CUR("Donor %s/%d being moved to fq %d\n", + t->comm, + t->pid, + ikglp_get_idx(sem, fq)); + + binheap_delete(&donor_info->node, &sem->donors); + + __ikglp_enqueue_on_fq(sem, fq, t, + &donor_info->fq_node, + NULL, // already in global_list, so pass null to prevent adding 2nd time. + &donor_info->donee_heap_node); + + // warning: + // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. +} + +static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem, + struct fifo_queue *fq, + ikglp_wait_state_t *wait) +{ + struct task_struct *t = wait->task; + + TRACE_CUR("PQ request %s/%d being moved to fq %d\n", + t->comm, + t->pid, + ikglp_get_idx(sem, fq)); + + binheap_delete(&wait->pq_node.node, &sem->priority_queue); + + __ikglp_enqueue_on_fq(sem, fq, t, + &wait->fq_node, + &wait->global_heap_node, + &wait->donee_heap_node); + // warning: + // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. +} + +static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal( + struct ikglp_semaphore* sem) +{ + /* must hold sem->lock */ + + struct fifo_queue *fq = NULL; + struct list_head *pos; + struct task_struct *queued; + int i; + + for(i = 0; i < sem->nr_replicas; ++i) { + if( (sem->fifo_queues[i].count > 1) && + (!fq || edf_higher_prio(sem->fifo_queues[i].hp_waiter, fq->hp_waiter)) ) { + + TRACE_CUR("hp_waiter on fq %d (%s/%d) has higher prio than hp_waiter on fq %d (%s/%d)\n", + ikglp_get_idx(sem, &sem->fifo_queues[i]), + sem->fifo_queues[i].hp_waiter->comm, + sem->fifo_queues[i].hp_waiter->pid, + (fq) ? ikglp_get_idx(sem, fq) : -1, + (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->comm : "nil") : "nilXX", + (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->pid : -1) : -2); + + fq = &sem->fifo_queues[i]; + + WARN_ON(!(fq->hp_waiter)); + } + } + + if(fq) { + struct task_struct *max_hp = fq->hp_waiter; + ikglp_wait_state_t* ret = NULL; + + TRACE_CUR("Searching for %s/%d on fq %d\n", + max_hp->comm, + max_hp->pid, + ikglp_get_idx(sem, fq)); + + BUG_ON(!max_hp); + + list_for_each(pos, &fq->wait.task_list) { + wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list); + + queued = (struct task_struct*) wait->private; + + TRACE_CUR("fq %d entry: %s/%d\n", + ikglp_get_idx(sem, fq), + queued->comm, + queued->pid); + + /* Compare task prios, find high prio task. */ + if (queued == max_hp) { + TRACE_CUR("Found it!\n"); + ret = container_of(wait, ikglp_wait_state_t, fq_node); + } + } + + WARN_ON(!ret); + return ret; + } + + return(NULL); +} + +static void ikglp_steal_to_fq(struct ikglp_semaphore *sem, + struct fifo_queue *fq, + ikglp_wait_state_t *fq_wait) +{ + struct task_struct *t = fq_wait->task; + struct fifo_queue *fq_steal = fq_wait->donee_heap_node.fq; + + WARN_ON(t != fq_steal->hp_waiter); + + TRACE_CUR("FQ request %s/%d being moved to fq %d\n", + t->comm, + t->pid, + ikglp_get_idx(sem, fq)); + + fq_wait->donee_heap_node.fq = fq; // just to be safe + + + __remove_wait_queue(&fq_steal->wait, &fq_wait->fq_node); + --(fq_steal->count); + + fq_steal->hp_waiter = ikglp_find_hp_waiter(fq_steal, NULL); + TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", + ikglp_get_idx(sem, fq_steal), + (fq_steal->hp_waiter) ? fq_steal->hp_waiter->comm : "nil", + (fq_steal->hp_waiter) ? fq_steal->hp_waiter->pid : -1); + + + // Update shortest. + if(fq_steal->count < sem->shortest_fifo_queue->count) { + sem->shortest_fifo_queue = fq_steal; + } + + __ikglp_enqueue_on_fq(sem, fq, t, + &fq_wait->fq_node, + NULL, + NULL); + + // warning: We have not checked the priority inheritance of fq's owner yet. +} + + +static void ikglp_migrate_fq_to_owner_heap_nodes(struct ikglp_semaphore *sem, + struct fifo_queue *fq, + ikglp_wait_state_t *old_wait) +{ + struct task_struct *t = old_wait->task; + + BUG_ON(old_wait->donee_heap_node.fq != fq); + + TRACE_TASK(t, "Migrating wait_state to memory of queue %d.\n", + ikglp_get_idx(sem, fq)); + + // need to migrate global_heap_node and donee_heap_node off of the stack + // to the nodes allocated for the owner of this fq. + + // TODO: Enhance binheap() to perform this operation in place. + + ikglp_del_global_list(sem, t, &old_wait->global_heap_node); // remove + fq->global_heap_node = old_wait->global_heap_node; // copy + ikglp_add_global_list(sem, t, &fq->global_heap_node); // re-add + + binheap_delete(&old_wait->donee_heap_node.node, &sem->donees); // remove + fq->donee_heap_node = old_wait->donee_heap_node; // copy + + if(fq->donee_heap_node.donor_info) { + // let donor know that our location has changed + BUG_ON(fq->donee_heap_node.donor_info->donee_info->task != t); // validate cross-link + fq->donee_heap_node.donor_info->donee_info = &fq->donee_heap_node; + } + INIT_BINHEAP_NODE(&fq->donee_heap_node.node); + binheap_add(&fq->donee_heap_node.node, &sem->donees, + ikglp_donee_heap_node_t, node); // re-add +} + +int ikglp_unlock(struct litmus_lock* l) +{ + struct ikglp_semaphore *sem = ikglp_from_lock(l); + struct task_struct *t = current; + struct task_struct *donee = NULL; + struct task_struct *next = NULL; + struct task_struct *new_on_fq = NULL; + + ikglp_wait_state_t *other_donor_info = NULL; + struct fifo_queue *to_steal = NULL; + struct fifo_queue *fq; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + raw_spinlock_t *dgl_lock; +#endif + + unsigned long flags = 0, real_flags; + + int err = 0; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_lock = litmus->get_dgl_spinlock(t); +#endif + + raw_spin_lock_irqsave(&sem->real_lock, real_flags); + + lock_global_irqsave(dgl_lock, flags); // TODO: Push this deeper + lock_fine_irqsave(&sem->lock, flags); + + fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. + + if (!fq) { + err = -EINVAL; + goto out; + } + + TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); + + + // Remove 't' from the heaps, but data in nodes will still be good. + ikglp_del_global_list(sem, t, &fq->global_heap_node); + binheap_delete(&fq->donee_heap_node.node, &sem->donees); + + // Move the next request into the FQ and update heaps as needed. + // We defer re-evaluation of priorities to later in the function. + if(fq->donee_heap_node.donor_info) { // move my doner to FQ + ikglp_wait_state_t *donor_info = fq->donee_heap_node.donor_info; + + new_on_fq = donor_info->task; + + TRACE_TASK(t, "Moving MY donor (%s/%d) to fq %d.\n", + new_on_fq->comm, new_on_fq->pid, + ikglp_get_idx(sem, fq)); + // donor moved to FQ + donee = t; + ikglp_move_donor_to_fq(sem, fq, donor_info); + } + else if(!binheap_empty(&sem->donors)) { // No donor, so move any donor to FQ + // move other donor to FQ + other_donor_info = binheap_top_entry(&sem->donors, + ikglp_wait_state_t, node); + + new_on_fq = other_donor_info->task; + donee = other_donor_info->donee_info->task; + + // update the donee's heap position. + other_donor_info->donee_info->donor_info = NULL; // clear the cross-link + binheap_decrease(&other_donor_info->donee_info->node, &sem->donees); + + + TRACE_TASK(t, "Moving a donor (%s/%d) to fq %d.\n", + new_on_fq->comm, new_on_fq->pid, + ikglp_get_idx(sem, fq)); + + ikglp_move_donor_to_fq(sem, fq, other_donor_info); + } + else if(!binheap_empty(&sem->priority_queue)) { // No donors, so move PQ + ikglp_heap_node_t *pq_node = binheap_top_entry(&sem->priority_queue, + ikglp_heap_node_t, node); + ikglp_wait_state_t *pq_wait = container_of(pq_node, ikglp_wait_state_t, + pq_node); + + new_on_fq = pq_wait->task; + + TRACE_TASK(t, "Moving a pq waiter (%s/%d) to fq %d.\n", + new_on_fq->comm, new_on_fq->pid, + ikglp_get_idx(sem, fq)); + + ikglp_move_pq_to_fq(sem, fq, pq_wait); + } + else if(fq->count == 1) { // No PQ and this queue is empty, so steal + // steal. + ikglp_wait_state_t *fq_wait; + + TRACE_TASK(t, "Looking to steal a request for fq %d...\n", + ikglp_get_idx(sem, fq)); + + fq_wait = ikglp_find_hp_waiter_to_steal(sem); + if(fq_wait) { + to_steal = fq_wait->donee_heap_node.fq; + + new_on_fq = fq_wait->task; + + TRACE_TASK(t, "Found %s/%d of fq %d to steal for fq %d...\n", + new_on_fq->comm, new_on_fq->pid, + ikglp_get_idx(sem, to_steal), + ikglp_get_idx(sem, fq)); + + ikglp_steal_to_fq(sem, fq, fq_wait); + } + else { + TRACE_TASK(t, "Found nothing to steal for fq %d.\n", + ikglp_get_idx(sem, fq)); + } + } + else { // move no one + } + + // 't' must drop all priority and clean up data structures before hand-off. + + // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); + { + int count = 0; + while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) { + binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + ++count; + } + litmus->decrease_prio(t, NULL); + WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. + } + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); + + + // Updating the owner and updating sem->shortest_fifo_queue + // could have been done sooner, but it is deffered, hoping + // that it will reduce thrashing of sem->shortest_fifo_queue + // assignment. + fq->owner = NULL; // no longer owned!! + --(fq->count); + if(fq->count < sem->shortest_fifo_queue->count) { + sem->shortest_fifo_queue = fq; + } + + // Now patch up other priorities. + // + // At most one of the following: + // if(donee && donee != t), decrease prio, propagate to owner, or onward + // if(to_steal), update owner's prio (hp_waiter has already been set) + // + + BUG_ON((other_donor_info != NULL) && (to_steal != NULL)); + + if(other_donor_info) { + struct fifo_queue *other_fq = other_donor_info->donee_info->fq; + + BUG_ON(!donee); + BUG_ON(donee == t); + + TRACE_TASK(t, "Terminating donation relation of donor %s/%d to donee %s/%d!\n", + other_donor_info->task->comm, other_donor_info->task->pid, + donee->comm, donee->pid); + + // need to terminate donation relation. + if(donee == other_fq->owner) { + TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n", + donee->comm, donee->pid, + ikglp_get_idx(sem, other_fq)); + + ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags); + lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! + } + else { + TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n", + donee->comm, donee->pid, + ikglp_get_idx(sem, other_fq)); + + ikglp_remove_donation_from_fq_waiter(donee, &other_donor_info->prio_donation.hp_binheap_node); + if(donee == other_fq->hp_waiter) { + TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n", + donee->comm, donee->pid, + ikglp_get_idx(sem, other_fq)); + + other_fq->hp_waiter = ikglp_find_hp_waiter(other_fq, NULL); + TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", + ikglp_get_idx(sem, other_fq), + (other_fq->hp_waiter) ? other_fq->hp_waiter->comm : "nil", + (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : -1); + + ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it. + lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! + } + } + } + else if(to_steal) { + TRACE_TASK(t, "Rechecking priority inheritance of fq %d, triggered by stealing.\n", + ikglp_get_idx(sem, to_steal)); + + ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it. + lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! + } + + // check for new HP waiter. + if(new_on_fq) { + // fq->owner is null, so just update the hp_waiter without locking. + + if(new_on_fq == fq->hp_waiter) { + TRACE_TASK(t, "new_on_fq is already hp_waiter.\n", + fq->hp_waiter->comm, fq->hp_waiter->pid); + fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); // set this just to be sure... + } + else if(edf_higher_prio(new_on_fq, fq->hp_waiter)) { + if(fq->hp_waiter) + TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", + fq->hp_waiter->comm, fq->hp_waiter->pid); + else + TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); + + fq->hp_waiter = new_on_fq; + fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); + + TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", + ikglp_get_idx(sem, fq), + (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", + (fq->hp_waiter) ? fq->hp_waiter->pid : -1); + } + } + + + if(waitqueue_active(&fq->wait)) + { + wait_queue_t *wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list); + ikglp_wait_state_t *fq_wait = container_of(wait, ikglp_wait_state_t, fq_node); + next = (struct task_struct*) wait->private; + + __remove_wait_queue(&fq->wait, wait); + + TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", + ikglp_get_idx(sem, fq), + next->comm, next->pid); + + // migrate wait-state to fifo-memory. + ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait); + + /* next becomes the resouce holder */ + fq->owner = next; + tsk_rt(next)->blocked_lock = NULL; + + + /* determine new hp_waiter if necessary */ + if (next == fq->hp_waiter) { + + TRACE_TASK(next, "was highest-prio waiter\n"); + /* next has the highest priority --- it doesn't need to + * inherit. However, we need to make sure that the + * next-highest priority in the queue is reflected in + * hp_waiter. */ + fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL); + TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n", + ikglp_get_idx(sem, fq), + (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", + (fq->hp_waiter) ? fq->hp_waiter->pid : -1); + + fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ? + effective_priority(fq->hp_waiter) : NULL; + + if (fq->hp_waiter) + TRACE_TASK(fq->hp_waiter, "is new highest-prio waiter\n"); + else + TRACE("no further waiters\n"); + + raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); + +// TRACE_TASK(next, "Heap Before:\n"); +// print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); + + binheap_add(&fq->nest.hp_binheap_node, + &tsk_rt(next)->hp_blocked_tasks, + struct nested_info, + hp_binheap_node); + +// TRACE_TASK(next, "Heap After:\n"); +// print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); + + raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); + } + else { + /* Well, if 'next' is not the highest-priority waiter, + * then it (probably) ought to inherit the highest-priority + * waiter's priority. */ + TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n", + ikglp_get_idx(sem, fq), + (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", + (fq->hp_waiter) ? fq->hp_waiter->pid : -1); + + raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); + + binheap_add(&fq->nest.hp_binheap_node, + &tsk_rt(next)->hp_blocked_tasks, + struct nested_info, + hp_binheap_node); + + /* It is possible that 'next' *should* be the hp_waiter, but isn't + * because that update hasn't yet executed (update operation is + * probably blocked on mutex->lock). So only inherit if the top of + * 'next's top heap node is indeed the effective prio. of hp_waiter. + * (We use fq->hp_waiter_eff_prio instead of effective_priority(hp_waiter) + * since the effective priority of hp_waiter can change (and the + * update has not made it to this lock).) + */ + if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == + fq->nest.hp_waiter_eff_prio)) + { + if(fq->nest.hp_waiter_eff_prio) + litmus->increase_prio(next, fq->nest.hp_waiter_eff_prio); + else + WARN_ON(1); + } + + raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); + } + + + // wake up the new resource holder! + wake_up_process(next); + } + +out: + unlock_fine_irqrestore(&sem->lock, flags); + unlock_global_irqrestore(dgl_lock, flags); + + raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); + + return err; +} + + + +int ikglp_close(struct litmus_lock* l) +{ + struct task_struct *t = current; + struct ikglp_semaphore *sem = ikglp_from_lock(l); + unsigned long flags; + + int owner = 0; + int i; + + raw_spin_lock_irqsave(&sem->real_lock, flags); + + for(i = 0; i < sem->nr_replicas; ++i) { + if(sem->fifo_queues[i].owner == t) { + owner = 1; + break; + } + } + + raw_spin_unlock_irqrestore(&sem->real_lock, flags); + + if (owner) + ikglp_unlock(l); + + return 0; +} + +void ikglp_free(struct litmus_lock* l) +{ + struct ikglp_semaphore *sem = ikglp_from_lock(l); + + kfree(sem->fifo_queues); + kfree(sem); +} + + + +struct litmus_lock* ikglp_new(int m, + struct litmus_lock_ops* ops, + void* __user arg) +{ + struct ikglp_semaphore* sem; + int nr_replicas = 0; + int i; + + if(!access_ok(VERIFY_READ, arg, sizeof(nr_replicas))) + { + return(NULL); + } + if(__copy_from_user(&nr_replicas, arg, sizeof(nr_replicas))) + { + return(NULL); + } + if(nr_replicas < 1) + { + return(NULL); + } + + sem = kmalloc(sizeof(*sem), GFP_KERNEL); + if(!sem) + { + return NULL; + } + + sem->fifo_queues = kmalloc(sizeof(struct fifo_queue)*nr_replicas, GFP_KERNEL); + if(!sem->fifo_queues) + { + kfree(sem); + return NULL; + } + + sem->litmus_lock.ops = ops; + +#ifdef CONFIG_DEBUG_SPINLOCK + { + __raw_spin_lock_init(&sem->lock, ((struct litmus_lock*)sem)->cheat_lockdep, &((struct litmus_lock*)sem)->key); + } +#else + raw_spin_lock_init(&sem->lock); +#endif + + raw_spin_lock_init(&sem->real_lock); + + sem->nr_replicas = nr_replicas; + sem->m = m; + sem->max_fifo_len = (sem->m/nr_replicas) + ((sem->m%nr_replicas) != 0); + + TRACE("New IKGLP Sem: m = %d, k = %d, max fifo_len = %d\n", + sem->m, + sem->nr_replicas, + sem->max_fifo_len); + + for(i = 0; i < nr_replicas; ++i) + { + struct fifo_queue* q = &(sem->fifo_queues[i]); + + q->owner = NULL; + q->hp_waiter = NULL; + init_waitqueue_head(&q->wait); + q->count = 0; + + q->global_heap_node.task = NULL; + INIT_BINHEAP_NODE(&q->global_heap_node.node); + + q->donee_heap_node.task = NULL; + q->donee_heap_node.donor_info = NULL; + q->donee_heap_node.fq = NULL; + INIT_BINHEAP_NODE(&q->donee_heap_node.node); + + q->nest.lock = (struct litmus_lock*)sem; + q->nest.hp_waiter_eff_prio = NULL; + q->nest.hp_waiter_ptr = &q->hp_waiter; + INIT_BINHEAP_NODE(&q->nest.hp_binheap_node); + } + + sem->shortest_fifo_queue = &sem->fifo_queues[0]; + + sem->top_m_size = 0; + + // init heaps + INIT_BINHEAP_HANDLE(&sem->top_m, ikglp_edf_min_heap_base_priority_order); + INIT_BINHEAP_HANDLE(&sem->not_top_m, ikglp_edf_max_heap_base_priority_order); + INIT_BINHEAP_HANDLE(&sem->donees, ikglp_edf_min_heap_donee_order); + INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_edf_max_heap_base_priority_order); + INIT_BINHEAP_HANDLE(&sem->donors, ikglp_donor_edf_max_heap_base_priority_order); + + return &sem->litmus_lock; +} diff --git a/litmus/litmus.c b/litmus/litmus.c index 68285b319160..7271af09a188 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -379,8 +379,10 @@ long litmus_admit_task(struct task_struct* tsk) bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); } +#ifdef CONFIG_LITMUS_NESTED_LOCKING tsk_rt(tsk)->blocked_lock = NULL; raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock); +#endif retval = litmus->admit_task(tsk); diff --git a/litmus/locking.c b/litmus/locking.c index b2f4a205cd04..f78169dbbeef 100644 --- a/litmus/locking.c +++ b/litmus/locking.c @@ -22,6 +22,9 @@ struct fdso_ops generic_lock_ops = { .destroy = destroy_generic_lock }; +static atomic_t lock_id_gen = ATOMIC_INIT(0); + + static inline bool is_lock(struct od_table_entry* entry) { return entry->class == &generic_lock_ops; @@ -33,11 +36,6 @@ static inline struct litmus_lock* get_lock(struct od_table_entry* entry) return (struct litmus_lock*) entry->obj->obj; } - -atomic_t lock_id_gen = ATOMIC_INIT(0); -//raw_spinlock_t rsm_global_lock; - - static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) { struct litmus_lock* lock; @@ -48,16 +46,11 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar #ifdef CONFIG_LITMUS_NESTED_LOCKING lock->nest.lock = lock; lock->nest.hp_waiter_eff_prio = NULL; - + INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node); WARN_ON(!(lock->nest.hp_waiter_ptr)); - - lock->ident = atomic_inc_return(&lock_id_gen); - -// if(lock->ident == 1) { -// raw_spin_lock_init(&rsm_global_lock); -// } #endif + lock->ident = atomic_inc_return(&lock_id_gen); *obj_ref = lock; } return err; @@ -145,69 +138,86 @@ struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq) return(t); } +#ifdef CONFIG_LITMUS_NESTED_LOCKING + +void print_hp_waiters(struct binheap_node* n, int depth) +{ + struct litmus_lock *l; + struct nested_info *nest; + char padding[81] = " "; + struct task_struct *hp = NULL; + struct task_struct *hp_eff = NULL; + struct task_struct *node_prio = NULL; + + + if(n == NULL) { + TRACE("+-> %p\n", NULL); + return; + } + + nest = binheap_entry(n, struct nested_info, hp_binheap_node); + l = nest->lock; + + if(depth*2 <= 80) + padding[depth*2] = '\0'; + + if(nest->hp_waiter_ptr && *(nest->hp_waiter_ptr)) { + hp = *(nest->hp_waiter_ptr); + + if(tsk_rt(hp)->inh_task) { + hp_eff = tsk_rt(hp)->inh_task; + } + } + + node_prio = nest->hp_waiter_eff_prio; + + TRACE("%s+-> %s/%d [waiter = %s/%d] [waiter's inh = %s/%d] (lock = %d)\n", + padding, + (node_prio) ? node_prio->comm : "nil", + (node_prio) ? node_prio->pid : -1, + (hp) ? hp->comm : "nil", + (hp) ? hp->pid : -1, + (hp_eff) ? hp_eff->comm : "nil", + (hp_eff) ? hp_eff->pid : -1, + l->ident); + + if(n->left) print_hp_waiters(n->left, depth+1); + if(n->right) print_hp_waiters(n->right, depth+1); +} +#endif + #ifdef CONFIG_LITMUS_DGL_SUPPORT -void select_next_lock(dgl_wait_state_t* dgl_wait, struct litmus_lock* prev_lock) +void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/) { -// int i = dgl_wait->size - 1; - - + /* + We pick the next lock in reverse order. This causes inheritance propagation + from locks received earlier to flow in the same direction as regular nested + locking. This might make fine-grain DGL easier in the future. + */ + BUG_ON(tsk_rt(dgl_wait->task)->blocked_lock); - - WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); -// -// // since dgl_wait->task->blocked_lock, all locks after prev_lock -// // are already held. -// -// // find the lock after prev. -// if(prev_lock) { -// for(/**/; i >= 0; --i) { -// if(prev_lock == dgl_wait->locks[i]) { -// --i; -// break; -// } -// else { -// BUG_ON(!dgl_wait->locks[i]->ops->is_owner(dgl_wait->locks[i], dgl_wait->task)); -// } -// } -// } - + + //WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); + + // note reverse order for(dgl_wait->last_primary = dgl_wait->last_primary - 1; dgl_wait->last_primary >= 0; --(dgl_wait->last_primary)){ - if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner(dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) { - - tsk_rt(dgl_wait->task)->blocked_lock = dgl_wait->locks[dgl_wait->last_primary]; + if(!dgl_wait->locks[dgl_wait->last_primary]->ops->is_owner( + dgl_wait->locks[dgl_wait->last_primary], dgl_wait->task)) { + + tsk_rt(dgl_wait->task)->blocked_lock = + dgl_wait->locks[dgl_wait->last_primary]; mb(); - - TRACE_CUR("New blocked lock is %d\n", dgl_wait->locks[dgl_wait->last_primary]->ident); - + + TRACE_CUR("New blocked lock is %d\n", + dgl_wait->locks[dgl_wait->last_primary]->ident); + break; } } - -// for(/**/; i >= 0; --i) { -// struct litmus_lock *l = dgl_wait->locks[i]; -// if(!l->ops->is_owner(l, dgl_wait->task)) { -// -// tsk_rt(dgl_wait->task)->blocked_lock = l; -// mb(); -// -// TRACE_CUR("New blocked lock is %d\n", l->ident); -// -// if(dgl_wait->last_primary >= 0) -// { -// TRACE_CUR("old meth = %d; new meth = %d\n", l->ident, dgl_wait->locks[dgl_wait->last_primary]->ident); -// WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != l); -// } -// -// break; -// } -// else { -// TRACE_CUR("Lock %d is actually held!\n", l->ident); -// } -// } } int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) @@ -217,24 +227,26 @@ int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key) return 1; } -void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task) +void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, + dgl_wait_state_t** dgl_wait, + struct task_struct **task) { wait_queue_t *q; - + *dgl_wait = NULL; *task = NULL; - + if (waitqueue_active(wq)) { q = list_entry(wq->task_list.next, wait_queue_t, task_list); - + if(q->func == dgl_wake_up) { *dgl_wait = (dgl_wait_state_t*) q->private; } else { *task = (struct task_struct*) q->private; } - + __remove_wait_queue(wq, q); } } @@ -252,76 +264,76 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) int i; unsigned long irqflags; //, dummyflags; raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); - + BUG_ON(dgl_wait->task != current); - + raw_spin_lock_irqsave(dgl_lock, irqflags); - - + + dgl_wait->nr_remaining = dgl_wait->size; - //atomic_set(&dgl_wait->nr_remaining, dgl_wait->size); - + // try to acquire each lock. enqueue (non-blocking) if it is unavailable. for(i = 0; i < dgl_wait->size; ++i) { struct litmus_lock *l = dgl_wait->locks[i]; - + // dgl_lock() must set task state to TASK_UNINTERRUPTIBLE if task blocks. - + if(l->ops->dgl_lock(l, dgl_wait, &dgl_wait->wq_nodes[i])) { --(dgl_wait->nr_remaining); - //atomic_dec(&dgl_wait->nr_remaining); TRACE_CUR("Acquired lock %d immediatly.\n", l->ident); } } - //if(atomic_read(&dgl_wait->nr_remaining) == 0) { if(dgl_wait->nr_remaining == 0) { // acquired entire group immediatly TRACE_CUR("Acquired all locks in DGL immediatly!\n"); } - else { + else { + + TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n", + dgl_wait->nr_remaining); - TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n", dgl_wait->nr_remaining); //atomic_read(&dgl_wait->nr_remaining)); - + // note reverse order. see comments in select_next_lock for reason. for(i = dgl_wait->size - 1; i >= 0; --i) { struct litmus_lock *l = dgl_wait->locks[i]; if(!l->ops->is_owner(l, dgl_wait->task)) { // double-check to be thread safe - - TRACE_CUR("Activating priority inheritance on lock %d\n", l->ident); - + + TRACE_CUR("Activating priority inheritance on lock %d\n", + l->ident); + TS_DGL_LOCK_SUSPEND; - + l->ops->enable_priority(l, dgl_wait); dgl_wait->last_primary = i; - + TRACE_CUR("Suspending for lock %d\n", l->ident); - + raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending - + schedule(); // suspend!!! - + TS_DGL_LOCK_RESUME; - + TRACE_CUR("Woken up from DGL suspension.\n"); - + goto all_acquired; // we should hold all locks when we wake up. } } - + TRACE_CUR("Didn't have to suspend after all, but calling schedule() anyway.\n"); BUG(); } - + raw_spin_unlock_irqrestore(dgl_lock, irqflags); - + all_acquired: - + // FOR SANITY CHECK FOR TESTING for(i = 0; i < dgl_wait->size; ++i) { struct litmus_lock *l = dgl_wait->locks[i]; BUG_ON(!l->ops->is_owner(l, dgl_wait->task)); } - + TRACE_CUR("Acquired entire DGL\n"); return 0; @@ -330,7 +342,7 @@ all_acquired: //static int supports_dgl(struct litmus_lock *l) //{ // struct litmus_lock_ops* ops = l->ops; -// +// // return (ops->dgl_lock && // ops->is_owner && // ops->enable_priority); @@ -342,23 +354,23 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) long err = -EINVAL; int dgl_ods[MAX_DGL_SIZE]; int i; - + dgl_wait_state_t dgl_wait_state; // lives on the stack until all resources in DGL are held. - + if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) goto out; - + if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) goto out; - + if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) goto out; - + if (!is_realtime(t)) { err = -EPERM; goto out; } - + for(i = 0; i < dgl_size; ++i) { struct od_table_entry *entry = get_entry_for_od(dgl_ods[i]); if(entry && is_lock(entry)) { @@ -374,17 +386,17 @@ asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) goto out; } } - + dgl_wait_state.task = t; dgl_wait_state.size = dgl_size; - + TS_DGL_LOCK_START; err = do_litmus_dgl_lock(&dgl_wait_state); - + /* Note: task my have been suspended or preempted in between! Take * this into account when computing overheads. */ - TS_DGL_LOCK_END; - + TS_DGL_LOCK_END; + out: return err; } @@ -393,26 +405,26 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size) { int i; long err = 0; - + TRACE_CUR("Unlocking a DGL of %d size\n", dgl_size); - + for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order - + struct litmus_lock *l = dgl_locks[i]; long tmp_err; - + TRACE_CUR("Unlocking lock %d of DGL.\n", l->ident); - + tmp_err = l->ops->unlock(l); - + if(tmp_err) { TRACE_CUR("There was an error unlocking %d: %d.\n", l->ident, tmp_err); err = tmp_err; } } - + TRACE_CUR("DGL unlocked. err = %d\n", err); - + return err; } @@ -422,18 +434,18 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) int dgl_ods[MAX_DGL_SIZE]; struct od_table_entry* entry; int i; - + struct litmus_lock* dgl_locks[MAX_DGL_SIZE]; - + if(dgl_size > MAX_DGL_SIZE || dgl_size < 1) goto out; - + if(!access_ok(VERIFY_READ, usr_dgl_ods, dgl_size*(sizeof(int)))) goto out; - + if(__copy_from_user(&dgl_ods, usr_dgl_ods, dgl_size*(sizeof(int)))) goto out; - + for(i = 0; i < dgl_size; ++i) { entry = get_entry_for_od(dgl_ods[i]); if(entry && is_lock(entry)) { @@ -449,16 +461,28 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) goto out; } } - + TS_DGL_UNLOCK_START; err = do_litmus_dgl_unlock(dgl_locks, dgl_size); - + /* Note: task my have been suspended or preempted in between! Take * this into account when computing overheads. */ - TS_DGL_UNLOCK_END; - + TS_DGL_UNLOCK_END; + out: - return err; + return err; +} + +#else + +asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) +{ + return -ENOSYS; +} + +asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size) +{ + return -ENOSYS; } #endif diff --git a/litmus/rsm_lock.c b/litmus/rsm_lock.c new file mode 100644 index 000000000000..11d119210ef9 --- /dev/null +++ b/litmus/rsm_lock.c @@ -0,0 +1,774 @@ +#include +#include + +#include +#include +#include + +#include + + +/* caller is responsible for locking */ +static struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, + struct task_struct* skip) +{ + wait_queue_t *q; + struct list_head *pos; + struct task_struct *queued = NULL, *found = NULL; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_wait_state_t *dgl_wait = NULL; +#endif + + list_for_each(pos, &mutex->wait.task_list) { + q = list_entry(pos, wait_queue_t, task_list); + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if(q->func == dgl_wake_up) { + dgl_wait = (dgl_wait_state_t*) q->private; + if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { + queued = dgl_wait->task; + } + else { + queued = NULL; // skip it. + } + } + else { + queued = (struct task_struct*) q->private; + } +#else + queued = (struct task_struct*) q->private; +#endif + + /* Compare task prios, find high prio task. */ + if (queued && queued != skip && edf_higher_prio(queued, found)) { + found = queued; + } + } + return found; +} + + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + +int rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) +{ + struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + return(mutex->owner == t); +} + +// return 1 if resource was immediatly acquired. +// Assumes mutex->lock is held. +// Must set task state to TASK_UNINTERRUPTIBLE if task blocks. +int rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, + wait_queue_t* wq_node) +{ + struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + struct task_struct *t = dgl_wait->task; + + int acquired_immediatly = 0; + + BUG_ON(t != current); + + if (mutex->owner) { + TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident); + + init_dgl_waitqueue_entry(wq_node, dgl_wait); + + set_task_state(t, TASK_UNINTERRUPTIBLE); + __add_wait_queue_tail_exclusive(&mutex->wait, wq_node); + } else { + TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); + + /* it's ours now */ + mutex->owner = t; + + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); + binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); + + acquired_immediatly = 1; + } + + return acquired_immediatly; +} + +void rsm_mutex_enable_priority(struct litmus_lock *l, + dgl_wait_state_t* dgl_wait) +{ + struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + struct task_struct *t = dgl_wait->task; + struct task_struct *owner = mutex->owner; + unsigned long flags = 0; // these are unused under DGL coarse-grain locking + + BUG_ON(owner == t); + + tsk_rt(t)->blocked_lock = l; + mb(); + + if (edf_higher_prio(t, mutex->hp_waiter)) { + + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + struct task_struct *new_prio = NULL; + + if(mutex->hp_waiter) + TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", + mutex->hp_waiter->comm, mutex->hp_waiter->pid); + else + TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); + + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + mutex->hp_waiter = t; + l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); + binheap_decrease(&l->nest.hp_binheap_node, + &tsk_rt(owner)->hp_blocked_tasks); + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if(new_max_eff_prio != old_max_eff_prio) { + TRACE_TASK(t, "is new hp_waiter.\n"); + + if ((effective_priority(owner) == old_max_eff_prio) || + (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ + new_prio = new_max_eff_prio; + } + } + else { + TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); + } + + if(new_prio) { + litmus->nested_increase_prio(owner, new_prio, + &mutex->lock, flags); // unlocks lock. + } + else { + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&mutex->lock, flags); + } + } + else { + TRACE_TASK(t, "no change in hp_waiter.\n"); + unlock_fine_irqrestore(&mutex->lock, flags); + } +} + +static void select_next_lock_if_primary(struct litmus_lock *l, + dgl_wait_state_t *dgl_wait) +{ + if(tsk_rt(dgl_wait->task)->blocked_lock == l) { + TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", + l->ident, dgl_wait->task->comm, dgl_wait->task->pid); + tsk_rt(dgl_wait->task)->blocked_lock = NULL; + mb(); + select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on + } + else { + TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", + l->ident, dgl_wait->task->comm, dgl_wait->task->pid); + } +} +#endif + + + + +int rsm_mutex_lock(struct litmus_lock* l) +{ + struct task_struct *t = current; + struct task_struct *owner; + struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + wait_queue_t wait; + unsigned long flags; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + raw_spinlock_t *dgl_lock; +#endif + + if (!is_realtime(t)) + return -EPERM; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_lock = litmus->get_dgl_spinlock(t); +#endif + + lock_global_irqsave(dgl_lock, flags); + lock_fine_irqsave(&mutex->lock, flags); + + if (mutex->owner) { + TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); + + /* resource is not free => must suspend and wait */ + + owner = mutex->owner; + + init_waitqueue_entry(&wait, t); + + tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ + mb(); // needed? + + /* FIXME: interruptible would be nice some day */ + set_task_state(t, TASK_UNINTERRUPTIBLE); + + __add_wait_queue_tail_exclusive(&mutex->wait, &wait); + + /* check if we need to activate priority inheritance */ + if (edf_higher_prio(t, mutex->hp_waiter)) { + + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + struct task_struct *new_prio = NULL; + + if(mutex->hp_waiter) + TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", + mutex->hp_waiter->comm, mutex->hp_waiter->pid); + else + TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); + + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + mutex->hp_waiter = t; + l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); + binheap_decrease(&l->nest.hp_binheap_node, + &tsk_rt(owner)->hp_blocked_tasks); + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if(new_max_eff_prio != old_max_eff_prio) { + TRACE_TASK(t, "is new hp_waiter.\n"); + + if ((effective_priority(owner) == old_max_eff_prio) || + (__edf_higher_prio(new_max_eff_prio, BASE, + owner, EFFECTIVE))){ + new_prio = new_max_eff_prio; + } + } + else { + TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); + } + + if(new_prio) { + litmus->nested_increase_prio(owner, new_prio, &mutex->lock, + flags); // unlocks lock. + } + else { + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&mutex->lock, flags); + } + } + else { + TRACE_TASK(t, "no change in hp_waiter.\n"); + + unlock_fine_irqrestore(&mutex->lock, flags); + } + + unlock_global_irqrestore(dgl_lock, flags); + + TS_LOCK_SUSPEND; + + /* We depend on the FIFO order. Thus, we don't need to recheck + * when we wake up; we are guaranteed to have the lock since + * there is only one wake up per release. + */ + + schedule(); + + TS_LOCK_RESUME; + + /* Since we hold the lock, no other task will change + * ->owner. We can thus check it without acquiring the spin + * lock. */ + BUG_ON(mutex->owner != t); + + TRACE_TASK(t, "Acquired lock %d.\n", l->ident); + + } else { + TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); + + /* it's ours now */ + mutex->owner = t; + + raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); + binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); + + + unlock_fine_irqrestore(&mutex->lock, flags); + unlock_global_irqrestore(dgl_lock, flags); + } + + return 0; +} + + + +int rsm_mutex_unlock(struct litmus_lock* l) +{ + struct task_struct *t = current, *next = NULL; + struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + unsigned long flags; + + struct task_struct *old_max_eff_prio; + + int wake_up_task = 1; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_wait_state_t *dgl_wait = NULL; + raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); +#endif + + int err = 0; + + lock_global_irqsave(dgl_lock, flags); + lock_fine_irqsave(&mutex->lock, flags); + + + if (mutex->owner != t) { + err = -EINVAL; + unlock_fine_irqrestore(&mutex->lock, flags); + unlock_global_irqrestore(dgl_lock, flags); + return err; + } + + + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); + + TRACE_TASK(t, "Freeing lock %d\n", l->ident); + + old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); + binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks); + + if(tsk_rt(t)->inh_task){ + struct task_struct *new_max_eff_prio = + top_priority(&tsk_rt(t)->hp_blocked_tasks); + + if((new_max_eff_prio == NULL) || + /* there was a change in eff prio */ + ( (new_max_eff_prio != old_max_eff_prio) && + /* and owner had the old eff prio */ + (effective_priority(t) == old_max_eff_prio)) ) + { + // old_max_eff_prio > new_max_eff_prio + + if(__edf_higher_prio(new_max_eff_prio, BASE, t, EFFECTIVE)) { + TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", + new_max_eff_prio->comm, new_max_eff_prio->pid, + t->comm, t->pid, tsk_rt(t)->inh_task->comm, + tsk_rt(t)->inh_task->pid); + WARN_ON(1); + } + + litmus->decrease_prio(t, new_max_eff_prio); + } + } + + if(binheap_empty(&tsk_rt(t)->hp_blocked_tasks) && + tsk_rt(t)->inh_task != NULL) + { + WARN_ON(tsk_rt(t)->inh_task != NULL); + TRACE_TASK(t, "No more locks are held, but eff_prio = %s/%d\n", + tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); + } + + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); + + + /* check if there are jobs waiting for this resource */ +#ifdef CONFIG_LITMUS_DGL_SUPPORT + __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); + if(dgl_wait) { + next = dgl_wait->task; + //select_next_lock_if_primary(l, dgl_wait); + } +#else + next = __waitqueue_remove_first(&mutex->wait); +#endif + if (next) { + /* next becomes the resouce holder */ + mutex->owner = next; + TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); + + /* determine new hp_waiter if necessary */ + if (next == mutex->hp_waiter) { + + TRACE_TASK(next, "was highest-prio waiter\n"); + /* next has the highest priority --- it doesn't need to + * inherit. However, we need to make sure that the + * next-highest priority in the queue is reflected in + * hp_waiter. */ + mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); + l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? + effective_priority(mutex->hp_waiter) : + NULL; + + if (mutex->hp_waiter) + TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); + else + TRACE("no further waiters\n"); + + raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); + + binheap_add(&l->nest.hp_binheap_node, + &tsk_rt(next)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if(dgl_wait) { + select_next_lock_if_primary(l, dgl_wait); + //wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); + --(dgl_wait->nr_remaining); + wake_up_task = (dgl_wait->nr_remaining == 0); + } +#endif + raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); + } + else { + /* Well, if 'next' is not the highest-priority waiter, + * then it (probably) ought to inherit the highest-priority + * waiter's priority. */ + TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); + + raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); + + binheap_add(&l->nest.hp_binheap_node, + &tsk_rt(next)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if(dgl_wait) { + select_next_lock_if_primary(l, dgl_wait); + --(dgl_wait->nr_remaining); + wake_up_task = (dgl_wait->nr_remaining == 0); + } +#endif + + /* It is possible that 'next' *should* be the hp_waiter, but isn't + * because that update hasn't yet executed (update operation is + * probably blocked on mutex->lock). So only inherit if the top of + * 'next's top heap node is indeed the effective prio. of hp_waiter. + * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) + * since the effective priority of hp_waiter can change (and the + * update has not made it to this lock).) + */ +#ifdef CONFIG_LITMUS_DGL_SUPPORT + if((l->nest.hp_waiter_eff_prio != NULL) && + (top_priority(&tsk_rt(next)->hp_blocked_tasks) == + l->nest.hp_waiter_eff_prio)) + { + if(dgl_wait && tsk_rt(next)->blocked_lock) { + BUG_ON(wake_up_task); + if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, + next, EFFECTIVE)) { + litmus->nested_increase_prio(next, + l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. + goto out; // all spinlocks are released. bail out now. + } + } + else { + litmus->increase_prio(next, l->nest.hp_waiter_eff_prio); + } + } + + raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); +#else + if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == + l->nest.hp_waiter_eff_prio)) + { + litmus->increase_prio(next, l->nest.hp_waiter_eff_prio); + } + raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); +#endif + } + + if(wake_up_task) { + TRACE_TASK(next, "waking up since it is no longer blocked.\n"); + + tsk_rt(next)->blocked_lock = NULL; + mb(); + + wake_up_process(next); + } + else { + TRACE_TASK(next, "is still blocked.\n"); + } + } + else { + /* becomes available */ + mutex->owner = NULL; + } + + unlock_fine_irqrestore(&mutex->lock, flags); + +#ifdef CONFIG_LITMUS_DGL_SUPPORT +out: +#endif + unlock_global_irqrestore(dgl_lock, flags); + + return err; +} + + +void rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, + struct task_struct* t, + raw_spinlock_t* to_unlock, + unsigned long irqflags) +{ + struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + + // relay-style locking + lock_fine(&mutex->lock); + unlock_fine(to_unlock); + + if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked + struct task_struct *owner = mutex->owner; + + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) { + TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); + mutex->hp_waiter = t; + } + if(t == mutex->hp_waiter) { + // reflect the decreased priority in the heap node. + l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); + + BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); + BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node, + &tsk_rt(owner)->hp_blocked_tasks)); + + binheap_decrease(&l->nest.hp_binheap_node, + &tsk_rt(owner)->hp_blocked_tasks); + } + + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + + if(new_max_eff_prio != old_max_eff_prio) { + // new_max_eff_prio > old_max_eff_prio holds. + if ((effective_priority(owner) == old_max_eff_prio) || + (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))) { + + TRACE_CUR("Propagating inheritance to holder of lock %d.\n", + l->ident); + + // beware: recursion + litmus->nested_increase_prio(owner, new_max_eff_prio, + &mutex->lock, irqflags); // unlocks mutex->lock + } + else { + TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", + owner->comm, owner->pid); + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; + + TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); + if(still_blocked) { + TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", + still_blocked->ident); + if(still_blocked->ops->propagate_increase_inheritance) { + /* due to relay-style nesting of spinlocks (acq. A, acq. B, free A, free B) + we know that task 't' has not released any locks behind us in this + chain. Propagation just needs to catch up with task 't'. */ + still_blocked->ops->propagate_increase_inheritance(still_blocked, + t, + &mutex->lock, + irqflags); + } + else { + TRACE_TASK(t, + "Inheritor is blocked on lock (%p) that does not " + "support nesting!\n", + still_blocked); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } +} + + +void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, + struct task_struct* t, + raw_spinlock_t* to_unlock, + unsigned long irqflags) +{ + struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + + // relay-style locking + lock_fine(&mutex->lock); + unlock_fine(to_unlock); + + if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked + if(t == mutex->hp_waiter) { + struct task_struct *owner = mutex->owner; + + struct task_struct *old_max_eff_prio; + struct task_struct *new_max_eff_prio; + + raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); + + old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); + mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); + l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? + effective_priority(mutex->hp_waiter) : NULL; + binheap_add(&l->nest.hp_binheap_node, + &tsk_rt(owner)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + + new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); + + if((old_max_eff_prio != new_max_eff_prio) && + (effective_priority(owner) == old_max_eff_prio)) + { + // Need to set new effective_priority for owner + + struct task_struct *decreased_prio; + + TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", + l->ident); + + if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { + TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", + (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", + (new_max_eff_prio) ? new_max_eff_prio->pid : -1, + owner->comm, + owner->pid, + l->ident); + + decreased_prio = new_max_eff_prio; + } + else { + TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n", + (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", + (new_max_eff_prio) ? new_max_eff_prio->pid : -1, + owner->comm, + owner->pid, + l->ident); + + decreased_prio = NULL; + } + + // beware: recursion + litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock + } + else { + raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; + + TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); + if(still_blocked) { + TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", + still_blocked->ident); + if(still_blocked->ops->propagate_decrease_inheritance) { + /* due to linked nesting of spinlocks (acq. A, acq. B, free A, free B) + we know that task 't' has not released any locks behind us in this + chain. propagation just needs to catch up with task 't' */ + still_blocked->ops->propagate_decrease_inheritance(still_blocked, + t, + &mutex->lock, + irqflags); + } + else { + TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", + still_blocked); + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } + else { + unlock_fine_irqrestore(&mutex->lock, irqflags); + } + } +} + + +int rsm_mutex_close(struct litmus_lock* l) +{ + struct task_struct *t = current; + struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + unsigned long flags; + + int owner; + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); +#endif + + lock_global_irqsave(dgl_lock, flags); + lock_fine_irqsave(&mutex->lock, flags); + + owner = (mutex->owner == t); + + unlock_fine_irqrestore(&mutex->lock, flags); + unlock_global_irqrestore(dgl_lock, flags); + + if (owner) + rsm_mutex_unlock(l); + + return 0; +} + +void rsm_mutex_free(struct litmus_lock* lock) +{ + kfree(rsm_mutex_from_lock(lock)); +} + +struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops) +{ + struct rsm_mutex* mutex; + + mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); + if (!mutex) + return NULL; + + mutex->litmus_lock.ops = ops; + mutex->owner = NULL; + mutex->hp_waiter = NULL; + init_waitqueue_head(&mutex->wait); + + +#ifdef CONFIG_DEBUG_SPINLOCK + { + __raw_spin_lock_init(&mutex->lock, + ((struct litmus_lock*)mutex)->cheat_lockdep, + &((struct litmus_lock*)mutex)->key); + } +#else + raw_spin_lock_init(&mutex->lock); +#endif + + ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; + + return &mutex->litmus_lock; +} + diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index c0316c4a1b35..59236d007fd8 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -29,6 +29,11 @@ #include #include +#ifdef CONFIG_LITMUS_NESTED_LOCKING +#include +#include +#endif + #ifdef CONFIG_SCHED_CPU_AFFINITY #include #endif @@ -122,6 +127,11 @@ static rt_domain_t gsnedf; #ifdef CONFIG_LITMUS_DGL_SUPPORT static raw_spinlock_t dgl_lock; + +static raw_spinlock_t* gsnedf_get_dgl_spinlock(struct task_struct *t) +{ + return(&dgl_lock); +} #endif /* Uncomment this if you want to see all scheduling decisions in the @@ -133,7 +143,7 @@ static int cpu_lower_prio(struct binheap_node *_a, struct binheap_node *_b) { cpu_entry_t *a = binheap_entry(_a, cpu_entry_t, hn); cpu_entry_t *b = binheap_entry(_b, cpu_entry_t, hn); - + /* Note that a and b are inverted: we want the lowest-priority CPU at * the top of the heap. */ @@ -645,29 +655,37 @@ static void gsnedf_task_exit(struct task_struct * t) static long gsnedf_admit_task(struct task_struct* tsk) { #ifdef CONFIG_LITMUS_NESTED_LOCKING - INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, edf_max_heap_base_priority_order); + INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, + edf_max_heap_base_priority_order); #endif - + return 0; } -#ifdef CONFIG_LITMUS_LOCKING -extern raw_spinlock_t rsm_global_lock; + + + + +#ifdef CONFIG_LITMUS_LOCKING #include /* called with IRQs off */ -static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) +static void increase_priority_inheritance(struct task_struct* t, + struct task_struct* prio_inh) { int linked_on; int check_preempt = 0; raw_spin_lock(&gsnedf_lock); +#ifdef CONFIG_LITMUS_NESTED_LOCKING /* this sanity check allows for weaker locking in protocols */ if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { - TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); +#endif + TRACE_TASK(t, "inherits priority from %s/%d\n", + prio_inh->comm, prio_inh->pid); tsk_rt(t)->inh_task = prio_inh; linked_on = tsk_rt(t)->linked_on; @@ -721,6 +739,7 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str check_for_preemptions(); } } +#ifdef CONFIG_LITMUS_NESTED_LOCKING } else { TRACE_TASK(t, "Spurious invalid priority increase. " @@ -730,28 +749,33 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str effective_priority(t)->comm, effective_priority(t)->pid, prio_inh->comm, prio_inh->pid); } +#endif raw_spin_unlock(&gsnedf_lock); } /* called with IRQs off */ -static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) +static void decrease_priority_inheritance(struct task_struct* t, + struct task_struct* prio_inh) { raw_spin_lock(&gsnedf_lock); - + +#ifdef CONFIG_LITMUS_NESTED_LOCKING if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { +#endif /* A job only stops inheriting a priority when it releases a * resource. Thus we can make the following assumption.*/ if(prio_inh) - TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid); + TRACE_TASK(t, "EFFECTIVE priority decreased to %s/%d\n", + prio_inh->comm, prio_inh->pid); else - TRACE_TASK(t, "base priority restored.\n"); - + TRACE_TASK(t, "base priority restored.\n"); + tsk_rt(t)->inh_task = prio_inh; - + if(tsk_rt(t)->scheduled_on != NO_CPU) { - TRACE_TASK(t, "is scheduled.\n"); - + TRACE_TASK(t, "is scheduled.\n"); + /* Check if rescheduling is necessary. We can't use heap_decrease() * since the priority was effectively lowered. */ unlink(t); @@ -762,7 +786,7 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str raw_spin_lock(&gsnedf.release_lock); if (is_queued(t)) { TRACE_TASK(t, "is queued.\n"); - + /* decrease in priority, so we have to re-add to binomial heap */ unlink(t); gsnedf_job_arrival(t); @@ -772,6 +796,7 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str } raw_spin_unlock(&gsnedf.release_lock); } +#ifdef CONFIG_LITMUS_NESTED_LOCKING } else { TRACE_TASK(t, "Spurious invalid priority decrease. " @@ -782,8 +807,8 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str (prio_inh) ? prio_inh->comm : "nil", (prio_inh) ? prio_inh->pid : -1); } +#endif - raw_spin_unlock(&gsnedf_lock); } @@ -792,96 +817,15 @@ static void decrease_priority_inheritance(struct task_struct* t, struct task_str #ifdef CONFIG_LITMUS_NESTED_LOCKING - -void print_hp_waiters(struct binheap_node* n, int depth) -{ - struct litmus_lock *l; - struct nested_info *nest; - char padding[81] = " "; - struct task_struct *hp = NULL; - struct task_struct *hp_eff = NULL; - struct task_struct *node_prio = NULL; - - - if(n == NULL) { - TRACE("+-> %p\n", NULL); - return; - } - - nest = binheap_entry(n, struct nested_info, hp_binheap_node); - l = nest->lock; - - if(depth*2 <= 80) - padding[depth*2] = '\0'; - - if(nest->hp_waiter_ptr && *(nest->hp_waiter_ptr)) { - hp = *(nest->hp_waiter_ptr); - - if(tsk_rt(hp)->inh_task) { - hp_eff = tsk_rt(hp)->inh_task; - } - } - - node_prio = nest->hp_waiter_eff_prio; - - TRACE("%s+-> %s/%d [waiter = %s/%d] [waiter's inh = %s/%d] (lock = %d)\n", - padding, - (node_prio) ? node_prio->comm : "nil", - (node_prio) ? node_prio->pid : -1, - (hp) ? hp->comm : "nil", - (hp) ? hp->pid : -1, - (hp_eff) ? hp_eff->comm : "nil", - (hp_eff) ? hp_eff->pid : -1, - l->ident); - - if(n->left) print_hp_waiters(n->left, depth+1); - if(n->right) print_hp_waiters(n->right, depth+1); -} - -void dump_node_data(struct binheap_node* parent, struct binheap_node* child) -{ - struct binheap_node *root = (parent != BINHEAP_POISON) ? parent : child; - struct binheap_node *bad_node = (parent == BINHEAP_POISON) ? parent : child; - struct nested_info *nest; - - while(root->parent != NULL) { - root = root->parent; - } - - if(parent == BINHEAP_POISON) { - TRACE_CUR("parent was bad node.\n"); - } - else { - TRACE_CUR("child was bad node.\n"); - } - TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right); - - nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node); - TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1); - - print_hp_waiters(root, 1); -} - -void dump_node_data2(struct binheap_handle *handle, struct binheap_node* bad_node) -{ - struct binheap_node *root = handle->root; - struct nested_info *nest; - - TRACE_CUR("Bad node info: data = %p, left = %p, right = %p\n", bad_node->data, bad_node->left, bad_node->right); - - nest = binheap_entry(bad_node, struct nested_info, hp_binheap_node); - TRACE_CUR("Lock with bad node: lock = %d\n", (nest->lock) ? nest->lock->ident : -1); - - print_hp_waiters(root, 1); -} - - /* called with IRQs off */ /* preconditions: (1) The 'hp_blocked_tasks_lock' of task 't' is held. (2) The lock 'to_unlock' is held. */ -static void nested_increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh, raw_spinlock_t *to_unlock, unsigned long irqflags) +static void nested_increase_priority_inheritance(struct task_struct* t, + struct task_struct* prio_inh, + raw_spinlock_t *to_unlock, + unsigned long irqflags) { struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; @@ -890,17 +834,21 @@ static void nested_increase_priority_inheritance(struct task_struct* t, struct t } raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. - - + + if(blocked_lock) { if(blocked_lock->ops->propagate_increase_inheritance) { - TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); - - // beware: recursion - blocked_lock->ops->propagate_increase_inheritance(blocked_lock, t, to_unlock, irqflags); + TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", + blocked_lock->ident); + + // beware: recursion + blocked_lock->ops->propagate_increase_inheritance(blocked_lock, + t, to_unlock, + irqflags); } else { - TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", blocked_lock->ident); + TRACE_TASK(t, "Inheritor is blocked on lock (%d) that does not support nesting!\n", + blocked_lock->ident); unlock_fine_irqrestore(to_unlock, irqflags); } } @@ -915,22 +863,29 @@ static void nested_increase_priority_inheritance(struct task_struct* t, struct t (1) The 'hp_blocked_tasks_lock' of task 't' is held. (2) The lock 'to_unlock' is held. */ -static void nested_decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh, raw_spinlock_t *to_unlock, unsigned long irqflags) +static void nested_decrease_priority_inheritance(struct task_struct* t, + struct task_struct* prio_inh, + raw_spinlock_t *to_unlock, + unsigned long irqflags) { struct litmus_lock *blocked_lock = tsk_rt(t)->blocked_lock; decrease_priority_inheritance(t, prio_inh); - + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. - + if(blocked_lock) { - if(blocked_lock->ops->propagate_decrease_inheritance) { - TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", blocked_lock->ident); - + if(blocked_lock->ops->propagate_decrease_inheritance) { + TRACE_TASK(t, "Inheritor is blocked (...perhaps). Checking lock %d.\n", + blocked_lock->ident); + // beware: recursion - blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, to_unlock, irqflags); - } + blocked_lock->ops->propagate_decrease_inheritance(blocked_lock, t, + to_unlock, + irqflags); + } else { - TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", blocked_lock); + TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", + blocked_lock); unlock_fine_irqrestore(to_unlock, irqflags); } } @@ -943,2591 +898,197 @@ static void nested_decrease_priority_inheritance(struct task_struct* t, struct t /* ******************** RSM MUTEX ********************** */ +static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = { + .lock = rsm_mutex_lock, + .unlock = rsm_mutex_unlock, + .close = rsm_mutex_close, + .deallocate = rsm_mutex_free, + + .propagate_increase_inheritance = rsm_mutex_propagate_increase_inheritance, + .propagate_decrease_inheritance = rsm_mutex_propagate_decrease_inheritance, + +#ifdef CONFIG_LITMUS_DGL_SUPPORT + .dgl_lock = rsm_mutex_dgl_lock, + .is_owner = rsm_mutex_is_owner, + .enable_priority = rsm_mutex_enable_priority, +#endif +}; + +static struct litmus_lock* gsnedf_new_rsm_mutex(void) +{ + return rsm_mutex_new(&gsnedf_rsm_mutex_lock_ops); +} + +/* ******************** IKGLP ********************** */ + +static struct litmus_lock_ops gsnedf_ikglp_lock_ops = { + .lock = ikglp_lock, + .unlock = ikglp_unlock, + .close = ikglp_close, + .deallocate = ikglp_free, + + // ikglp can only be an outer-most lock. + .propagate_increase_inheritance = NULL, + .propagate_decrease_inheritance = NULL, +}; + +static struct litmus_lock* gsnedf_new_ikglp(void* __user arg) +{ + return ikglp_new(num_online_cpus(), &gsnedf_ikglp_lock_ops, arg); +} + +#endif /* CONFIG_LITMUS_NESTED_LOCKING */ + + +/* ******************** FMLP support ********************** */ + /* struct for semaphore with priority inheritance */ -struct rsm_mutex { +struct fmlp_semaphore { struct litmus_lock litmus_lock; - + /* current resource holder */ struct task_struct *owner; - + /* highest-priority waiter */ struct task_struct *hp_waiter; - - /* FIFO queue of waiting tasks -- for now. time stamp in the future. */ - wait_queue_head_t wait; - - /* we do some nesting within spinlocks, so we can't use the normal - sleeplocks found in wait_queue_head_t. */ - raw_spinlock_t lock; + + /* FIFO queue of waiting tasks */ + wait_queue_head_t wait; }; -static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) +static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) { - return container_of(lock, struct rsm_mutex, litmus_lock); + return container_of(lock, struct fmlp_semaphore, litmus_lock); } /* caller is responsible for locking */ -struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, - struct task_struct* skip) +struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem, + struct task_struct* skip) { - wait_queue_t *q; struct list_head *pos; - struct task_struct *queued = NULL, *found = NULL; - -#ifdef CONFIG_LITMUS_DGL_SUPPORT - dgl_wait_state_t *dgl_wait = NULL; -#endif - - list_for_each(pos, &mutex->wait.task_list) { - q = list_entry(pos, wait_queue_t, task_list); - -#ifdef CONFIG_LITMUS_DGL_SUPPORT - if(q->func == dgl_wake_up) { - dgl_wait = (dgl_wait_state_t*) q->private; - if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { - queued = dgl_wait->task; - } - else { - queued = NULL; // skip it. - } - } - else { - queued = (struct task_struct*) q->private; - } -#else - queued = (struct task_struct*) q->private; -#endif - + struct task_struct *queued, *found = NULL; + + list_for_each(pos, &sem->wait.task_list) { + queued = (struct task_struct*) list_entry(pos, wait_queue_t, + task_list)->private; + /* Compare task prios, find high prio task. */ - if (queued && queued != skip && edf_higher_prio(queued, found)) { + if (queued != skip && edf_higher_prio(queued, found)) found = queued; - } } return found; } -static inline struct task_struct* top_priority(struct binheap_handle* handle) { - if(!binheap_empty(handle)) { - return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio); - } - return NULL; -} - -#ifdef CONFIG_LITMUS_DGL_SUPPORT -//static void gsnedf_rsm_mutex_reserve(struct litmus_lock *l, unsigned long *irqflags) -//{ -// struct rsm_mutex *mutex = rsm_mutex_from_lock(l); -// raw_spin_lock_irqsave(&mutex->lock, *irqflags); -//} -// -//static void gsnedf_rsm_mutex_unreserve(struct litmus_lock *l, unsigned long irqflags) -//{ -// struct rsm_mutex *mutex = rsm_mutex_from_lock(l); -// raw_spin_unlock_irqrestore(&mutex->lock, irqflags); -//} - -static raw_spinlock_t* gsn_edf_get_dgl_spinlock(struct task_struct *t) -{ - return(&dgl_lock); -} - -static int gsn_edf_rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) -{ - struct rsm_mutex *mutex = rsm_mutex_from_lock(l); - return(mutex->owner == t); -} - - -// return 1 if resource was immediatly acquired. -// Assumes mutex->lock is held. -// Must set task state to TASK_UNINTERRUPTIBLE if task blocks. -static int gsn_edf_rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node) -{ - struct rsm_mutex *mutex = rsm_mutex_from_lock(l); - struct task_struct *t = dgl_wait->task; - - int acquired_immediatly = 0; - - BUG_ON(t != current); - - if (mutex->owner) { - TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident); - - init_dgl_waitqueue_entry(wq_node, dgl_wait); - - set_task_state(t, TASK_UNINTERRUPTIBLE); - __add_wait_queue_tail_exclusive(&mutex->wait, wq_node); - } else { - TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); - - /* it's ours now */ - mutex->owner = t; - - raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); - binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); - raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); - - acquired_immediatly = 1; - } - - return acquired_immediatly; -} - -// Assumes mutex->lock is held. -static void gsn_edf_rsm_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait) -{ - struct rsm_mutex *mutex = rsm_mutex_from_lock(l); - struct task_struct *t = dgl_wait->task; - struct task_struct *owner = mutex->owner; - unsigned long flags = 0; // these are unused under DGL coarse-grain locking - - BUG_ON(owner == t); - - tsk_rt(t)->blocked_lock = l; - mb(); - - if (edf_higher_prio(t, mutex->hp_waiter)) { - - struct task_struct *old_max_eff_prio; - struct task_struct *new_max_eff_prio; - struct task_struct *new_prio = NULL; - - if(mutex->hp_waiter) - TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); - else - TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); - - raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); - - //TRACE_TASK(owner, "Heap Before:\n"); - //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); - - old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - mutex->hp_waiter = t; - l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); - - binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); - - //TRACE_TASK(owner, "Heap After:\n"); - //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); - - new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - if(new_max_eff_prio != old_max_eff_prio) { - TRACE_TASK(t, "is new hp_waiter.\n"); - - if ((effective_priority(owner) == old_max_eff_prio) || - (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ - new_prio = new_max_eff_prio; - } - } - else { - TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); - } - - //raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - - if(new_prio) { - nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. - } - else { - raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - unlock_fine_irqrestore(&mutex->lock, flags); - } - } - else { - TRACE_TASK(t, "no change in hp_waiter.\n"); - unlock_fine_irqrestore(&mutex->lock, flags); - } -} -#endif - -int gsnedf_rsm_mutex_lock(struct litmus_lock* l) +int gsnedf_fmlp_lock(struct litmus_lock* l) { - struct task_struct *t = current; - struct task_struct *owner; - struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + struct task_struct* t = current; + struct fmlp_semaphore *sem = fmlp_from_lock(l); wait_queue_t wait; unsigned long flags; - + if (!is_realtime(t)) return -EPERM; + spin_lock_irqsave(&sem->wait.lock, flags); - lock_global_irqsave(&dgl_lock, flags); - lock_fine_irqsave(&mutex->lock, flags); - - if (mutex->owner) { - TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); - + if (sem->owner) { /* resource is not free => must suspend and wait */ - - owner = mutex->owner; - + init_waitqueue_entry(&wait, t); - - tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ - mb(); // needed? - + /* FIXME: interruptible would be nice some day */ set_task_state(t, TASK_UNINTERRUPTIBLE); - - __add_wait_queue_tail_exclusive(&mutex->wait, &wait); - + + __add_wait_queue_tail_exclusive(&sem->wait, &wait); + /* check if we need to activate priority inheritance */ - if (edf_higher_prio(t, mutex->hp_waiter)) { - - struct task_struct *old_max_eff_prio; - struct task_struct *new_max_eff_prio; - struct task_struct *new_prio = NULL; - - if(mutex->hp_waiter) - TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", mutex->hp_waiter->comm, mutex->hp_waiter->pid); - else - TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); - - raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); - - //TRACE_TASK(owner, "Heap Before:\n"); - //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); - - old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - mutex->hp_waiter = t; - l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); - - binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); - - //TRACE_TASK(owner, "Heap After:\n"); - //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); - - new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - if(new_max_eff_prio != old_max_eff_prio) { - TRACE_TASK(t, "is new hp_waiter.\n"); - - if ((effective_priority(owner) == old_max_eff_prio) || - (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ - new_prio = new_max_eff_prio; - } - } - else { - TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); - } - - if(new_prio) { - nested_increase_priority_inheritance(owner, new_prio, &mutex->lock, flags); // unlocks lock. - } - else { - raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - unlock_fine_irqrestore(&mutex->lock, flags); - } - } - else { - TRACE_TASK(t, "no change in hp_waiter.\n"); - - unlock_fine_irqrestore(&mutex->lock, flags); + if (edf_higher_prio(t, sem->hp_waiter)) { + sem->hp_waiter = t; + if (edf_higher_prio(t, sem->owner)) + increase_priority_inheritance(sem->owner, sem->hp_waiter); } - - unlock_global_irqrestore(&dgl_lock, flags); - + TS_LOCK_SUSPEND; - + + /* release lock before sleeping */ + spin_unlock_irqrestore(&sem->wait.lock, flags); + /* We depend on the FIFO order. Thus, we don't need to recheck * when we wake up; we are guaranteed to have the lock since * there is only one wake up per release. */ - + schedule(); - + TS_LOCK_RESUME; - + /* Since we hold the lock, no other task will change * ->owner. We can thus check it without acquiring the spin * lock. */ - BUG_ON(mutex->owner != t); - - TRACE_TASK(t, "Acquired lock %d.\n", l->ident); - + BUG_ON(sem->owner != t); } else { - TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); - /* it's ours now */ - mutex->owner = t; - - raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); - binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); - raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); - - - unlock_fine_irqrestore(&mutex->lock, flags); - unlock_global_irqrestore(&dgl_lock, flags); - } - - return 0; -} - + sem->owner = t; -#ifdef CONFIG_LITMUS_DGL_SUPPORT -void select_next_lock_if_primary(struct litmus_lock *l, dgl_wait_state_t *dgl_wait) -{ - if(tsk_rt(dgl_wait->task)->blocked_lock == l) { - TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid); - tsk_rt(dgl_wait->task)->blocked_lock = NULL; - mb(); - select_next_lock(dgl_wait, l); // pick the next lock to be blocked on - } - else { - TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", l->ident, dgl_wait->task->comm, dgl_wait->task->pid); + spin_unlock_irqrestore(&sem->wait.lock, flags); } -} -#endif + return 0; +} -int gsnedf_rsm_mutex_unlock(struct litmus_lock* l) +int gsnedf_fmlp_unlock(struct litmus_lock* l) { - struct task_struct *t = current, *next = NULL; - struct rsm_mutex *mutex = rsm_mutex_from_lock(l); + struct task_struct *t = current, *next; + struct fmlp_semaphore *sem = fmlp_from_lock(l); unsigned long flags; - - struct task_struct *old_max_eff_prio; - - int wake_up_task = 1; - -#ifdef CONFIG_LITMUS_DGL_SUPPORT - dgl_wait_state_t *dgl_wait = NULL; -#endif - int err = 0; - - lock_global_irqsave(&dgl_lock, flags); - lock_fine_irqsave(&mutex->lock, flags); - - - if (mutex->owner != t) { + + spin_lock_irqsave(&sem->wait.lock, flags); + + if (sem->owner != t) { err = -EINVAL; - unlock_fine_irqrestore(&mutex->lock, flags); - unlock_global_irqrestore(&dgl_lock, flags); - return err; - } - - - raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); - - TRACE_TASK(t, "Freeing lock %d\n", l->ident); - //TRACE_TASK(t, "Heap Before:\n"); - //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0); - - //old_max_hp_waiter = *(binheap_top_entry(&tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node)->hp_waiter_ptr); - old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); - binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks); - - //raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); - - //TRACE_TASK(t, "Heap After:\n"); - //print_hp_waiters(tsk_rt(t)->hp_blocked_tasks.root, 0); - - if(tsk_rt(t)->inh_task){ - struct task_struct *new_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); - - if((new_max_eff_prio == NULL) || - ( (new_max_eff_prio != old_max_eff_prio) /* there was a change in eff prio */ && - (effective_priority(t) == old_max_eff_prio) /* and owner had the old eff prio */) ) - { - // old_max_eff_prio > new_max_eff_prio - - if(__edf_higher_prio(new_max_eff_prio, BASE, t, EFFECTIVE)) { - TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", - new_max_eff_prio->comm, new_max_eff_prio->pid, - t->comm, t->pid, tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); - WARN_ON(1); - } - - decrease_priority_inheritance(t, new_max_eff_prio); - } + goto out; } - - if(binheap_empty(&tsk_rt(t)->hp_blocked_tasks) && tsk_rt(t)->inh_task != NULL) - { - WARN_ON(tsk_rt(t)->inh_task != NULL); - TRACE_TASK(t, "No more locks are held, but eff_prio = %s/%d\n", - tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); - } - - raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); - - + /* check if there are jobs waiting for this resource */ -#ifdef CONFIG_LITMUS_DGL_SUPPORT - __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); - if(dgl_wait) { - next = dgl_wait->task; - //select_next_lock_if_primary(l, dgl_wait); - } -#else - next = __waitqueue_remove_first(&mutex->wait); -#endif + next = __waitqueue_remove_first(&sem->wait); if (next) { /* next becomes the resouce holder */ - mutex->owner = next; + sem->owner = next; TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); - -// if(tsk_rt(next)->blocked_lock == &mutex->litmus_lock) { // might be false for DGL. -// tsk_rt(next)->blocked_lock = NULL; -// mb(); -// } - + /* determine new hp_waiter if necessary */ - if (next == mutex->hp_waiter) { - + if (next == sem->hp_waiter) { TRACE_TASK(next, "was highest-prio waiter\n"); /* next has the highest priority --- it doesn't need to * inherit. However, we need to make sure that the * next-highest priority in the queue is reflected in * hp_waiter. */ - mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); - l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; - - if (mutex->hp_waiter) - TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); + sem->hp_waiter = find_hp_waiter(sem, next); + if (sem->hp_waiter) + TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n"); else TRACE("no further waiters\n"); - - raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); - - //TRACE_TASK(next, "Heap Before:\n"); - //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); - - binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node); - - //TRACE_TASK(next, "Heap After:\n"); - //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); - -#ifdef CONFIG_LITMUS_DGL_SUPPORT - if(dgl_wait) { - select_next_lock_if_primary(l, dgl_wait); - //wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); - --(dgl_wait->nr_remaining); - wake_up_task = (dgl_wait->nr_remaining == 0); - } -#endif - raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); - } - else { - /* Well, if 'next' is not the highest-priority waiter, - * then it (probably) ought to inherit the highest-priority + } else { + /* Well, if next is not the highest-priority waiter, + * then it ought to inherit the highest-priority * waiter's priority. */ - TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); - - raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); - - //TRACE_TASK(next, "Heap Before:\n"); - //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); - - binheap_add(&l->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, - struct nested_info, hp_binheap_node); - - -#ifdef CONFIG_LITMUS_DGL_SUPPORT - if(dgl_wait) { - select_next_lock_if_primary(l, dgl_wait); -// wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); - --(dgl_wait->nr_remaining); - wake_up_task = (dgl_wait->nr_remaining == 0); - } -#endif - - //TRACE_TASK(next, "Heap After:\n"); - //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); - - /* It is possible that 'next' *should* be the hp_waiter, but isn't - * because that update hasn't yet executed (update operation is - * probably blocked on mutex->lock). So only inherit if the top of - * 'next's top heap node is indeed the effective prio. of hp_waiter. - * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) - * since the effective priority of hp_waiter can change (and the - * update has not made it to this lock).) - */ -#ifdef CONFIG_LITMUS_DGL_SUPPORT - if((l->nest.hp_waiter_eff_prio != NULL) && (top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) - { - if(dgl_wait && tsk_rt(next)->blocked_lock) { - BUG_ON(wake_up_task); - if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) { - nested_increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. - goto out; // all spinlocks are released. bail out now. - } - } - else { - increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); - } - } - - raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); -#else - if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio)) - { - increase_priority_inheritance(next, l->nest.hp_waiter_eff_prio); - } - raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); -#endif - } - - if(wake_up_task) { - TRACE_TASK(next, "waking up since it is no longer blocked.\n"); - - tsk_rt(next)->blocked_lock = NULL; - mb(); - - wake_up_process(next); - } - else { - TRACE_TASK(next, "is still blocked.\n"); + increase_priority_inheritance(next, sem->hp_waiter); } - } - else { - /* becomes available */ - mutex->owner = NULL; - } - - unlock_fine_irqrestore(&mutex->lock, flags); - + + /* wake up next */ + wake_up_process(next); + } else + /* becomes available */ + sem->owner = NULL; + + /* we lose the benefit of priority inheritance (if any) */ + if (tsk_rt(t)->inh_task) + decrease_priority_inheritance(t, NULL); + out: - unlock_global_irqrestore(&dgl_lock, flags); - - return err; -} - - -void gsnedf_rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, - struct task_struct* t, - raw_spinlock_t* to_unlock, - unsigned long irqflags) -{ - struct rsm_mutex *mutex = rsm_mutex_from_lock(l); - - // relay-style locking - lock_fine(&mutex->lock); - unlock_fine(to_unlock); - - if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked - struct task_struct *owner = mutex->owner; - - struct task_struct *old_max_eff_prio; - struct task_struct *new_max_eff_prio; - - raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); - - old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) { - TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); - mutex->hp_waiter = t; - } - if(t == mutex->hp_waiter) { - // reflect the decreased priority in the heap node. - l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); - - BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); - BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks)); - - binheap_decrease(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); - } - - new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - - if(new_max_eff_prio != old_max_eff_prio) { - // new_max_eff_prio > old_max_eff_prio holds. - if ((effective_priority(owner) == old_max_eff_prio) || - (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))) { - - TRACE_CUR("Propagating inheritance to holder of lock %d.\n", l->ident); - - // beware: recursion - nested_increase_priority_inheritance(owner, new_max_eff_prio, &mutex->lock, irqflags); // unlocks mutex->lock - } - else { - TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", owner->comm, owner->pid); - raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - unlock_fine_irqrestore(&mutex->lock, irqflags); - } - } - else { - TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); - raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - unlock_fine_irqrestore(&mutex->lock, irqflags); - } - } - else { - struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; - - TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); - if(still_blocked) { - TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", still_blocked->ident); - if(still_blocked->ops->propagate_increase_inheritance) { - /* due to relay-style nesting of spinlocks (acq. A, acq. B, free A, free B) - we know that task 't' has not released any locks behind us in this - chain. Propagation just needs to catch up with task 't'. */ - still_blocked->ops->propagate_increase_inheritance(still_blocked, t, &mutex->lock, irqflags); - } - else { - TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); - unlock_fine_irqrestore(&mutex->lock, irqflags); - } - } - else { - unlock_fine_irqrestore(&mutex->lock, irqflags); - } - } -} - - -void gsnedf_rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, - struct task_struct* t, - raw_spinlock_t* to_unlock, - unsigned long irqflags) -{ - struct rsm_mutex *mutex = rsm_mutex_from_lock(l); - - // relay-style locking - lock_fine(&mutex->lock); - unlock_fine(to_unlock); - - if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked - if(t == mutex->hp_waiter) { - struct task_struct *owner = mutex->owner; - - struct task_struct *old_max_eff_prio; - struct task_struct *new_max_eff_prio; - - raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); - - old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); - mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); - l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? effective_priority(mutex->hp_waiter) : NULL; - binheap_add(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, struct nested_info, hp_binheap_node); - - new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - if((old_max_eff_prio != new_max_eff_prio) && - (effective_priority(owner) == old_max_eff_prio)) - { - // Need to set new effective_priority for owner - - struct task_struct *decreased_prio; - - TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", l->ident); - - if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { - TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", - (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", - (new_max_eff_prio) ? new_max_eff_prio->pid : -1, - owner->comm, - owner->pid, - l->ident); - - decreased_prio = new_max_eff_prio; - } - else { - TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n", - (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", - (new_max_eff_prio) ? new_max_eff_prio->pid : -1, - owner->comm, - owner->pid, - l->ident); - - decreased_prio = NULL; - } - - // beware: recursion - nested_decrease_priority_inheritance(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock - } - else { - raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - unlock_fine_irqrestore(&mutex->lock, irqflags); - } - } - else { - TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); - unlock_fine_irqrestore(&mutex->lock, irqflags); - } - } - else { - struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; - - TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); - if(still_blocked) { - TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", still_blocked->ident); - if(still_blocked->ops->propagate_decrease_inheritance) { - /* due to linked nesting of spinlocks (acq. A, acq. B, free A, free B) - we know that task 't' has not released any locks behind us in this - chain. propagation just needs to catch up with task 't' */ - still_blocked->ops->propagate_decrease_inheritance(still_blocked, t, &mutex->lock, irqflags); - } - else { - TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", still_blocked); - unlock_fine_irqrestore(&mutex->lock, irqflags); - } - } - else { - unlock_fine_irqrestore(&mutex->lock, irqflags); - } - } -} - - - -int gsnedf_rsm_mutex_close(struct litmus_lock* l) -{ - struct task_struct *t = current; - struct rsm_mutex *mutex = rsm_mutex_from_lock(l); - unsigned long flags; - - int owner; - - - lock_global_irqsave(&dgl_lock, flags); - lock_fine_irqsave(&mutex->lock, flags); - - owner = (mutex->owner == t); - - unlock_fine_irqrestore(&mutex->lock, flags); - unlock_global_irqrestore(&dgl_lock, flags); - - if (owner) - gsnedf_rsm_mutex_unlock(l); - - return 0; -} - -void gsnedf_rsm_mutex_free(struct litmus_lock* lock) -{ - kfree(rsm_mutex_from_lock(lock)); -} - -static struct litmus_lock_ops gsnedf_rsm_mutex_lock_ops = { - .close = gsnedf_rsm_mutex_close, - .lock = gsnedf_rsm_mutex_lock, - .unlock = gsnedf_rsm_mutex_unlock, - .deallocate = gsnedf_rsm_mutex_free, - .propagate_increase_inheritance = gsnedf_rsm_mutex_propagate_increase_inheritance, - .propagate_decrease_inheritance = gsnedf_rsm_mutex_propagate_decrease_inheritance, - -#ifdef CONFIG_LITMUS_DGL_SUPPORT -// .reserve = gsnedf_rsm_mutex_reserve, -// .unreserve = gsnedf_rsm_mutex_unreserve, - .dgl_lock = gsn_edf_rsm_mutex_dgl_lock, - .is_owner = gsn_edf_rsm_mutex_is_owner, - .enable_priority = gsn_edf_rsm_enable_priority, -#endif -}; - -static struct litmus_lock* gsnedf_new_rsm_mutex(void) -{ - struct rsm_mutex* mutex; - - mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); - if (!mutex) - return NULL; - - mutex->owner = NULL; - mutex->hp_waiter = NULL; - init_waitqueue_head(&mutex->wait); - - -#ifdef CONFIG_DEBUG_SPINLOCK - { - __raw_spin_lock_init(&mutex->lock, ((struct litmus_lock*)mutex)->cheat_lockdep, &((struct litmus_lock*)mutex)->key); - } -#else - raw_spin_lock_init(&mutex->lock); -#endif - - mutex->litmus_lock.ops = &gsnedf_rsm_mutex_lock_ops; - - ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; - - return &mutex->litmus_lock; -} - -/* **** lock constructor **** */ - - - - - - - -/* ******************** IKGLP ********************** */ - - -typedef struct ikglp_heap_node -{ - struct task_struct *task; - struct binheap_node node; -} ikglp_heap_node_t; - -static int ikglp_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) -{ - ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); - ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); - - BUG_ON(!d_a); - BUG_ON(!d_b); - - return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); -} - -static int ikglp_edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) -{ - ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); - ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); - - return __edf_higher_prio(d_b->task, BASE, d_a->task, BASE); -} - -struct fifo_queue; -struct ikglp_wait_state; - -typedef struct ikglp_donee_heap_node -{ - struct task_struct *task; - struct fifo_queue *fq; - struct ikglp_wait_state *donor_info; // cross-linked with ikglp_wait_state_t of donor - - struct binheap_node node; -} ikglp_donee_heap_node_t; - - -// Maintains the state of a request as it goes through the IKGLP -typedef struct ikglp_wait_state { - struct task_struct *task; // pointer back to the requesting task - - // Data for while waiting in FIFO Queue - wait_queue_t fq_node; - ikglp_heap_node_t global_heap_node; - ikglp_donee_heap_node_t donee_heap_node; - - // Data for while waiting in PQ - ikglp_heap_node_t pq_node; - - // Data for while waiting as a donor - ikglp_donee_heap_node_t *donee_info; // cross-linked with donee's ikglp_donee_heap_node_t - struct nested_info prio_donation; - struct binheap_node node; -} ikglp_wait_state_t; - - -static int ikglp_donor_edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b) -{ - ikglp_wait_state_t *d_a = binheap_entry(a, ikglp_wait_state_t, node); - ikglp_wait_state_t *d_b = binheap_entry(b, ikglp_wait_state_t, node); - - return __edf_higher_prio(d_a->task, BASE, d_b->task, BASE); -} - - -static int ikglp_edf_min_heap_donee_order(struct binheap_node *a, struct binheap_node *b) -{ - struct task_struct *prio_a, *prio_b; - - ikglp_donee_heap_node_t *d_a = binheap_entry(a, ikglp_donee_heap_node_t, node); - ikglp_donee_heap_node_t *d_b = binheap_entry(b, ikglp_donee_heap_node_t, node); - - if(!d_a->donor_info) { - prio_a = d_a->task; - } - else { - prio_a = d_a->donor_info->task; - BUG_ON(d_a->task != d_a->donor_info->donee_info->task); - } - - if(!d_b->donor_info) { - prio_b = d_b->task; - } - else { - prio_b = d_b->donor_info->task; - BUG_ON(d_b->task != d_b->donor_info->donee_info->task); - } - - // note reversed order - return __edf_higher_prio(prio_b, BASE, prio_a, BASE); -} - - -/* struct for semaphore with priority inheritance */ -struct fifo_queue -{ - wait_queue_head_t wait; - struct task_struct* owner; - - // used for bookkeepping - ikglp_heap_node_t global_heap_node; - ikglp_donee_heap_node_t donee_heap_node; - - struct task_struct* hp_waiter; - int count; /* number of waiters + holder */ - - struct nested_info nest; -}; - - -struct ikglp_semaphore -{ - struct litmus_lock litmus_lock; - - raw_spinlock_t lock; - raw_spinlock_t real_lock; - - int nr_replicas; // AKA k - int m; - - int max_fifo_len; // max len of a fifo queue - - struct binheap_handle top_m; // min heap, base prio - int top_m_size; // number of nodes in top_m - - struct binheap_handle not_top_m; // max heap, base prio - - struct binheap_handle donees; // min-heap, base prio - struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue - - /* data structures for holding requests */ - struct fifo_queue *fifo_queues; // array nr_replicas in length - struct binheap_handle priority_queue; // max-heap, base prio - struct binheap_handle donors; // max-heap, base prio -}; - -static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock) -{ - return container_of(lock, struct ikglp_semaphore, litmus_lock); -} - - -static inline int ikglp_get_idx(struct ikglp_semaphore *sem, - struct fifo_queue *queue) -{ - return (queue - &sem->fifo_queues[0]); -} - -static inline struct fifo_queue* ikglp_get_queue( - struct ikglp_semaphore *sem, - struct task_struct *holder) -{ - int i; - for(i = 0; i < sem->nr_replicas; ++i) - if(sem->fifo_queues[i].owner == holder) - return(&sem->fifo_queues[i]); - return(NULL); -} - -static struct task_struct* ikglp_find_hp_waiter( - struct fifo_queue *kqueue, - struct task_struct *skip) -{ - struct list_head *pos; - struct task_struct *queued, *found = NULL; - - list_for_each(pos, &kqueue->wait.task_list) { - queued = (struct task_struct*) list_entry(pos, wait_queue_t, task_list)->private; - - /* Compare task prios, find high prio task. */ - if (queued != skip && edf_higher_prio(queued, found)) - found = queued; - } - return found; -} - -static struct fifo_queue* ikglp_find_shortest( - struct ikglp_semaphore *sem, - struct fifo_queue *search_start) -{ - // we start our search at search_start instead of at the beginning of the - // queue list to load-balance across all resources. - struct fifo_queue* step = search_start; - struct fifo_queue* shortest = sem->shortest_fifo_queue; - - do { - step = (step+1 != &sem->fifo_queues[sem->nr_replicas]) ? - step+1 : &sem->fifo_queues[0]; - - if(step->count < shortest->count) { - shortest = step; - if(step->count == 0) - break; /* can't get any shorter */ - } - - }while(step != search_start); - - return(shortest); -} - -static inline struct task_struct* ikglp_mth_highest(struct ikglp_semaphore *sem) -{ - return binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node)->task; -} - -void print_global_list(struct binheap_node* n, int depth) -{ - ikglp_heap_node_t *global_heap_node; - char padding[81] = " "; - - if(n == NULL) { - TRACE_CUR("+-> %p\n", NULL); - return; - } - - global_heap_node = binheap_entry(n, ikglp_heap_node_t, node); - - if(depth*2 <= 80) - padding[depth*2] = '\0'; - - TRACE_CUR("%s+-> %s/%d\n", - padding, - global_heap_node->task->comm, - global_heap_node->task->pid); - - if(n->left) print_global_list(n->left, depth+1); - if(n->right) print_global_list(n->right, depth+1); -} - - -static void ikglp_add_global_list(struct ikglp_semaphore *sem, struct task_struct *t, ikglp_heap_node_t *node) -{ - - - node->task = t; - INIT_BINHEAP_NODE(&node->node); - - if(sem->top_m_size < sem->m) { - TRACE_CUR("Trivially adding %s/%d to top-m global list.\n", t->comm, t->pid); -// TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); -// print_global_list(sem->top_m.root, 1); - - binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); - ++(sem->top_m_size); - - TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); - print_global_list(sem->top_m.root, 1); - } - else if(__edf_higher_prio(t, BASE, ikglp_mth_highest(sem), BASE)) { - ikglp_heap_node_t *evicted = binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node); - - TRACE_CUR("Adding %s/%d to top-m and evicting %s/%d.\n", - t->comm, t->pid, - evicted->task->comm, evicted->task->pid); - -// TRACE_CUR("Not-Top-M Before:\n"); -// print_global_list(sem->not_top_m.root, 1); -// TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); -// print_global_list(sem->top_m.root, 1); - - - binheap_delete_root(&sem->top_m, ikglp_heap_node_t, node); - INIT_BINHEAP_NODE(&evicted->node); - binheap_add(&evicted->node, &sem->not_top_m, ikglp_heap_node_t, node); - - binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); - - TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); - print_global_list(sem->top_m.root, 1); - TRACE_CUR("Not-Top-M After:\n"); - print_global_list(sem->not_top_m.root, 1); - } - else { - TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n", t->comm, t->pid); -// TRACE_CUR("Not-Top-M Before:\n"); -// print_global_list(sem->not_top_m.root, 1); - - binheap_add(&node->node, &sem->not_top_m, ikglp_heap_node_t, node); - - TRACE_CUR("Not-Top-M After:\n"); - print_global_list(sem->not_top_m.root, 1); - } -} - - -static void ikglp_del_global_list(struct ikglp_semaphore *sem, struct task_struct *t, ikglp_heap_node_t *node) -{ - BUG_ON(!binheap_is_in_heap(&node->node)); - - TRACE_CUR("Removing %s/%d from global list.\n", t->comm, t->pid); - - if(binheap_is_in_this_heap(&node->node, &sem->top_m)) { - TRACE_CUR("%s/%d is in top-m\n", t->comm, t->pid); - -// TRACE_CUR("Not-Top-M Before:\n"); -// print_global_list(sem->not_top_m.root, 1); -// TRACE_CUR("Top-M Before (size = %d):\n", sem->top_m_size); -// print_global_list(sem->top_m.root, 1); - - - binheap_delete(&node->node, &sem->top_m); - - if(!binheap_empty(&sem->not_top_m)) { - ikglp_heap_node_t *promoted = binheap_top_entry(&sem->not_top_m, ikglp_heap_node_t, node); - - TRACE_CUR("Promoting %s/%d to top-m\n", promoted->task->comm, promoted->task->pid); - - binheap_delete_root(&sem->not_top_m, ikglp_heap_node_t, node); - INIT_BINHEAP_NODE(&promoted->node); - - binheap_add(&promoted->node, &sem->top_m, ikglp_heap_node_t, node); - } - else { - TRACE_CUR("No one to promote to top-m.\n"); - --(sem->top_m_size); - } - - TRACE_CUR("Top-M After (size = %d):\n", sem->top_m_size); - print_global_list(sem->top_m.root, 1); - TRACE_CUR("Not-Top-M After:\n"); - print_global_list(sem->not_top_m.root, 1); - } - else { -// TRACE_CUR("%s/%d is in not-top-m\n", t->comm, t->pid); -// TRACE_CUR("Not-Top-M Before:\n"); -// print_global_list(sem->not_top_m.root, 1); - - binheap_delete(&node->node, &sem->not_top_m); - - TRACE_CUR("Not-Top-M After:\n"); - print_global_list(sem->not_top_m.root, 1); - } -} - - -void print_donees(struct ikglp_semaphore *sem, struct binheap_node *n, int depth) -{ - ikglp_donee_heap_node_t *donee_node; - char padding[81] = " "; - struct task_struct* donor = NULL; - - if(n == NULL) { - TRACE_CUR("+-> %p\n", NULL); - return; - } - - donee_node = binheap_entry(n, ikglp_donee_heap_node_t, node); - - if(depth*2 <= 80) - padding[depth*2] = '\0'; - - if(donee_node->donor_info) { - donor = donee_node->donor_info->task; - } - - TRACE_CUR("%s+-> %s/%d (d: %s/%d) (fq: %d)\n", - padding, - donee_node->task->comm, - donee_node->task->pid, - (donor) ? donor->comm : "nil", - (donor) ? donor->pid : -1, - ikglp_get_idx(sem, donee_node->fq)); - - if(n->left) print_donees(sem, n->left, depth+1); - if(n->right) print_donees(sem, n->right, depth+1); -} - - -static void ikglp_add_donees(struct ikglp_semaphore *sem, struct fifo_queue *fq, struct task_struct *t, ikglp_donee_heap_node_t* node) -{ -// TRACE_CUR("Adding %s/%d to donee list.\n", t->comm, t->pid); -// TRACE_CUR("donees Before:\n"); -// print_donees(sem, sem->donees.root, 1); - - node->task = t; - node->donor_info = NULL; - node->fq = fq; - INIT_BINHEAP_NODE(&node->node); - - binheap_add(&node->node, &sem->donees, ikglp_donee_heap_node_t, node); - - TRACE_CUR("donees After:\n"); - print_donees(sem, sem->donees.root, 1); -} - - -static void ikglp_refresh_owners_prio_increase(struct task_struct *t, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) -{ - // priority of 't' has increased (note: 't' might already be hp_waiter). - if ((t == fq->hp_waiter) || edf_higher_prio(t, fq->hp_waiter)) { - struct task_struct *old_max_eff_prio; - struct task_struct *new_max_eff_prio; - struct task_struct *new_prio = NULL; - struct task_struct *owner = fq->owner; - - if(fq->hp_waiter) - TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", fq->hp_waiter->comm, fq->hp_waiter->pid); - else - TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); - - if(owner) - { - raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); - - //TRACE_TASK(owner, "Heap Before:\n"); - //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); - - old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - fq->hp_waiter = t; - fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); - - binheap_decrease(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); - - //TRACE_TASK(owner, "Heap After:\n"); - //print_hp_waiters(tsk_rt(owner)->hp_blocked_tasks.root, 0); - - new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - if(new_max_eff_prio != old_max_eff_prio) { - TRACE_TASK(t, "is new hp_waiter.\n"); - - if ((effective_priority(owner) == old_max_eff_prio) || - (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ - new_prio = new_max_eff_prio; - } - } - else { - TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); - } - - if(new_prio) { - // set new inheritance and propagate - TRACE_TASK(t, "Effective priority changed for owner %s/%d to %s/%d\n", - owner->comm, owner->pid, - new_prio->comm, new_prio->pid); - nested_increase_priority_inheritance(owner, new_prio, &sem->lock, flags); // unlocks lock. - } - else { - TRACE_TASK(t, "No change in effective priority (is %s/%d). Propagation halted.\n", - new_max_eff_prio->comm, new_max_eff_prio->pid); - raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - unlock_fine_irqrestore(&sem->lock, flags); - } - } - else { - fq->hp_waiter = t; - fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); - - TRACE_TASK(t, "no owner??\n"); - unlock_fine_irqrestore(&sem->lock, flags); - } - } - else { - TRACE_TASK(t, "hp_waiter is unaffected.\n"); - unlock_fine_irqrestore(&sem->lock, flags); - } -} - -// hp_waiter has decreased -static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) -{ - struct task_struct *owner = fq->owner; - - struct task_struct *old_max_eff_prio; - struct task_struct *new_max_eff_prio; - - if(!owner) { - TRACE_CUR("No owner. Returning.\n"); - unlock_fine_irqrestore(&sem->lock, flags); - return; - } - - raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); - - old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - binheap_delete(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); - fq->nest.hp_waiter_eff_prio = fq->hp_waiter; - binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks, struct nested_info, hp_binheap_node); - - new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - if((old_max_eff_prio != new_max_eff_prio) && - (effective_priority(owner) == old_max_eff_prio)) - { - // Need to set new effective_priority for owner - struct task_struct *decreased_prio; - - TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", ikglp_get_idx(sem, fq)); - - if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { - TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of fq %d.\n", - (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", - (new_max_eff_prio) ? new_max_eff_prio->pid : -1, - owner->comm, - owner->pid, - ikglp_get_idx(sem, fq)); - - decreased_prio = new_max_eff_prio; - } - else { - TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of fq %d.\n", - (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", - (new_max_eff_prio) ? new_max_eff_prio->pid : -1, - owner->comm, - owner->pid, - ikglp_get_idx(sem, fq)); - - decreased_prio = NULL; - } - - // beware: recursion - nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock - } - else { - TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); - raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - unlock_fine_irqrestore(&sem->lock, flags); - } -} - - -static void ikglp_remove_donation_from_owner(struct binheap_node *n, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) -{ - struct task_struct *owner = fq->owner; - - struct task_struct *old_max_eff_prio; - struct task_struct *new_max_eff_prio; - - BUG_ON(!owner); - - raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); - - old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - binheap_delete(n, &tsk_rt(owner)->hp_blocked_tasks); - - new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); - - if((old_max_eff_prio != new_max_eff_prio) && - (effective_priority(owner) == old_max_eff_prio)) - { - // Need to set new effective_priority for owner - struct task_struct *decreased_prio; - - TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", ikglp_get_idx(sem, fq)); - - if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { - TRACE_CUR("has greater base priority than base priority of owner of fq %d.\n", ikglp_get_idx(sem, fq)); - decreased_prio = new_max_eff_prio; - } - else { - TRACE_CUR("has lesser base priority than base priority of owner of fq %d.\n", ikglp_get_idx(sem, fq)); - decreased_prio = NULL; - } - - // beware: recursion - nested_decrease_priority_inheritance(owner, decreased_prio, &sem->lock, flags); // will unlock mutex->lock - } - else { - TRACE_TASK(owner, "No need to propagate priority decrease forward.\n"); - raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); - unlock_fine_irqrestore(&sem->lock, flags); - } -} - -static void ikglp_remove_donation_from_fq_waiter(struct task_struct *t, struct binheap_node *n) -{ - struct task_struct *old_max_eff_prio; - struct task_struct *new_max_eff_prio; - - raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); - - old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); - - binheap_delete(n, &tsk_rt(t)->hp_blocked_tasks); - - new_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); - - if((old_max_eff_prio != new_max_eff_prio) && - (effective_priority(t) == old_max_eff_prio)) - { - // Need to set new effective_priority for owner - struct task_struct *decreased_prio; - - if(__edf_higher_prio(new_max_eff_prio, BASE, t, BASE)) { - decreased_prio = new_max_eff_prio; - } - else { - decreased_prio = NULL; - } - - tsk_rt(t)->inh_task = decreased_prio; - } - - raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); -} - -static void ikglp_get_immediate(struct task_struct* t, struct fifo_queue *fq, struct ikglp_semaphore *sem, unsigned long flags) -{ - // resource available now - TRACE_CUR("queue %d: acquired immediately\n", ikglp_get_idx(sem, fq)); - - fq->owner = t; - - raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); - binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); - raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); - - ++(fq->count); - - ikglp_add_global_list(sem, t, &fq->global_heap_node); - ikglp_add_donees(sem, fq, t, &fq->donee_heap_node); - - sem->shortest_fifo_queue = ikglp_find_shortest(sem, sem->shortest_fifo_queue); - - unlock_fine_irqrestore(&sem->lock, flags); -} - - - -static void __ikglp_enqueue_on_fq( - struct ikglp_semaphore *sem, - struct fifo_queue* fq, - struct task_struct* t, - wait_queue_t *wait, - ikglp_heap_node_t *global_heap_node, - ikglp_donee_heap_node_t *donee_heap_node) -{ - /* resource is not free => must suspend and wait */ - TRACE_TASK(t, "Enqueuing on fq %d.\n", - ikglp_get_idx(sem, fq)); - - init_waitqueue_entry(wait, t); - - __add_wait_queue_tail_exclusive(&fq->wait, wait); - - ++(fq->count); - - // update global list. - if(likely(global_heap_node)) { - if(binheap_is_in_heap(&global_heap_node->node)) { - WARN_ON(1); - ikglp_del_global_list(sem, t, global_heap_node); - } - ikglp_add_global_list(sem, t, global_heap_node); - } - // update donor eligiblity list. - if(likely(donee_heap_node)) { -// if(binheap_is_in_heap(&donee_heap_node->node)) { -// WARN_ON(1); -// } - ikglp_add_donees(sem, fq, t, donee_heap_node); - } - - if(likely(sem->shortest_fifo_queue == fq)) { - sem->shortest_fifo_queue = ikglp_find_shortest(sem, fq); - } - - TRACE_TASK(t, "shortest queue is now %d\n", ikglp_get_idx(sem, fq)); -} - - -static void ikglp_enqueue_on_fq( - struct ikglp_semaphore *sem, - struct fifo_queue *fq, - ikglp_wait_state_t *wait, - unsigned long flags) -{ - /* resource is not free => must suspend and wait */ - TRACE_TASK(wait->task, "queue %d: Resource is not free => must suspend and wait.\n", - ikglp_get_idx(sem, fq)); - - INIT_BINHEAP_NODE(&wait->global_heap_node.node); - INIT_BINHEAP_NODE(&wait->donee_heap_node.node); - - __ikglp_enqueue_on_fq(sem, fq, wait->task, &wait->fq_node, &wait->global_heap_node, &wait->donee_heap_node); - - ikglp_refresh_owners_prio_increase(wait->task, fq, sem, flags); // unlocks sem->lock -} - - -static void __ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, ikglp_wait_state_t *wait) -{ - TRACE_TASK(wait->task, "goes to PQ.\n"); - - wait->pq_node.task = wait->task; // copy over task (little redundant...) - - binheap_add(&wait->pq_node.node, &sem->priority_queue, ikglp_heap_node_t, node); -} - -static void ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, ikglp_wait_state_t *wait) -{ - INIT_BINHEAP_NODE(&wait->global_heap_node.node); - INIT_BINHEAP_NODE(&wait->donee_heap_node.node); - INIT_BINHEAP_NODE(&wait->pq_node.node); - - __ikglp_enqueue_on_pq(sem, wait); -} - -void print_donors(struct binheap_node *n, int depth) -{ - ikglp_wait_state_t *donor_node; - char padding[81] = " "; - - if(n == NULL) { - TRACE_CUR("+-> %p\n", NULL); - return; - } - - donor_node = binheap_entry(n, ikglp_wait_state_t, node); - - if(depth*2 <= 80) - padding[depth*2] = '\0'; - - - TRACE_CUR("%s+-> %s/%d (donee: %s/%d)\n", - padding, - donor_node->task->comm, - donor_node->task->pid, - donor_node->donee_info->task->comm, - donor_node->donee_info->task->pid); - - if(n->left) print_donors(n->left, depth+1); - if(n->right) print_donors(n->right, depth+1); -} - - -static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, ikglp_wait_state_t* wait, unsigned long flags) -{ - struct task_struct *t = wait->task; - ikglp_donee_heap_node_t *donee_node = NULL; - struct task_struct *donee; - - struct task_struct *old_max_eff_prio; - struct task_struct *new_max_eff_prio; - struct task_struct *new_prio = NULL; - - INIT_BINHEAP_NODE(&wait->global_heap_node.node); - INIT_BINHEAP_NODE(&wait->donee_heap_node.node); - INIT_BINHEAP_NODE(&wait->pq_node.node); - INIT_BINHEAP_NODE(&wait->node); - -// TRACE_CUR("Adding %s/%d as donor.\n", t->comm, t->pid); -// TRACE_CUR("donors Before:\n"); -// print_donors(sem->donors.root, 1); - - // Add donor to the global list. - ikglp_add_global_list(sem, t, &wait->global_heap_node); - - // Select a donee - donee_node = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); - donee = donee_node->task; - - TRACE_TASK(t, "Donee selected: %s/%d\n", donee->comm, donee->pid); - - TRACE_CUR("Temporarily removing %s/%d to donee list.\n", donee->comm, donee->pid); -// TRACE_CUR("donees Before:\n"); -// print_donees(sem, sem->donees.root, 1); - - binheap_delete_root(&sem->donees, ikglp_donee_heap_node_t, node); // will re-add it shortly - - TRACE_CUR("donees After:\n"); - print_donees(sem, sem->donees.root, 1); - - - wait->donee_info = donee_node; - - // Add t to donor heap. - binheap_add(&wait->node, &sem->donors, ikglp_wait_state_t, node); - - // Now adjust the donee's priority. - - // Lock the donee's inheritance heap. - raw_spin_lock(&tsk_rt(donee)->hp_blocked_tasks_lock); - - old_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks); - - if(donee_node->donor_info) { - // Steal donation relation. Evict old donor to PQ. - - // Remove old donor from donor heap - ikglp_wait_state_t *old_wait = donee_node->donor_info; - struct task_struct *old_donor = old_wait->task; - - TRACE_TASK(t, "Donee (%s/%d) had donor %s/%d. Moving old donor to PQ.\n", - donee->comm, donee->pid, old_donor->comm, old_donor->pid); - - binheap_delete(&old_wait->node, &sem->donors); - - // Remove donation from donee's inheritance heap. - binheap_delete(&old_wait->prio_donation.hp_binheap_node, &tsk_rt(donee)->hp_blocked_tasks); - // WARNING: have not updated inh_prio! - - // Add old donor to PQ. - __ikglp_enqueue_on_pq(sem, old_wait); - - // Remove old donor from the global heap. - ikglp_del_global_list(sem, old_donor, &old_wait->global_heap_node); - } - - // Add back donee's node to the donees heap with increased prio - donee_node->donor_info = wait; - INIT_BINHEAP_NODE(&donee_node->node); - - - TRACE_CUR("Adding %s/%d back to donee list.\n", donee->comm, donee->pid); -// TRACE_CUR("donees Before:\n"); - print_donees(sem, sem->donees.root, 1); - - binheap_add(&donee_node->node, &sem->donees, ikglp_donee_heap_node_t, node); - - TRACE_CUR("donees After:\n"); - print_donees(sem, sem->donees.root, 1); - - - - // Add an inheritance/donation to the donee's inheritance heap. - wait->prio_donation.lock = (struct litmus_lock*)sem; - wait->prio_donation.hp_waiter_eff_prio = t; - wait->prio_donation.hp_waiter_ptr = NULL; - INIT_BINHEAP_NODE(&wait->prio_donation.hp_binheap_node); - - binheap_add(&wait->prio_donation.hp_binheap_node, &tsk_rt(donee)->hp_blocked_tasks, struct nested_info, hp_binheap_node); - - new_max_eff_prio = top_priority(&tsk_rt(donee)->hp_blocked_tasks); - - if(new_max_eff_prio != old_max_eff_prio) { - if ((effective_priority(donee) == old_max_eff_prio) || - (__edf_higher_prio(new_max_eff_prio, BASE, donee, EFFECTIVE))){ - TRACE_TASK(t, "Donation increases %s/%d's effective priority\n", donee->comm, donee->pid); - new_prio = new_max_eff_prio; - } -// else { -// // should be bug. donor would not be in top-m. -// TRACE_TASK(t, "Donation is not greater than base prio of %s/%d?\n", donee->comm, donee->pid); -// WARN_ON(1); -// } -// } -// else { -// // should be bug. donor would not be in top-m. -// TRACE_TASK(t, "No change in %s/%d's inheritance heap?\n", donee->comm, donee->pid); -// WARN_ON(1); - } - - if(new_prio) { - struct fifo_queue *donee_fq = donee_node->fq; - - if(donee != donee_fq->owner) { - TRACE_TASK(t, "%s/%d is not the owner. Propagating priority to owner %s/%d.\n", - donee->comm, donee->pid, - donee_fq->owner->comm, donee_fq->owner->pid); - - raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); - ikglp_refresh_owners_prio_increase(donee, donee_fq, sem, flags); // unlocks sem->lock - } - else { - TRACE_TASK(t, "%s/%d is the owner. Progatating priority immediatly.\n", - donee->comm, donee->pid); - nested_increase_priority_inheritance(donee, new_prio, &sem->lock, flags); // unlocks sem->lock and donee's heap lock - } - } - else { - TRACE_TASK(t, "No change in effective priority (it is %d/%s). BUG?\n", - new_max_eff_prio->comm, new_max_eff_prio->pid); - raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); - unlock_fine_irqrestore(&sem->lock, flags); - } - - - TRACE_CUR("donors After:\n"); - print_donors(sem->donors.root, 1); -} - - -static int gsnedf_ikglp_lock(struct litmus_lock* l) -{ - struct task_struct* t = current; - struct ikglp_semaphore *sem = ikglp_from_lock(l); - unsigned long flags = 0, real_flags; - struct fifo_queue *fq = NULL; - int replica = -EINVAL; - - ikglp_wait_state_t wait; - - if (!is_realtime(t)) - return -EPERM; - - raw_spin_lock_irqsave(&sem->real_lock, real_flags); - - lock_global_irqsave(&dgl_lock, flags); - lock_fine_irqsave(&sem->lock, flags); - - if(sem->shortest_fifo_queue->count == 0) { - // take available resource - replica = ikglp_get_idx(sem, sem->shortest_fifo_queue); - - ikglp_get_immediate(t, sem->shortest_fifo_queue, sem, flags); // unlocks sem->lock - - unlock_global_irqrestore(&dgl_lock, flags); - raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); - } - else - { - // we have to suspend. - - wait.task = t; // THIS IS CRITICALLY IMPORTANT!!! - - tsk_rt(t)->blocked_lock = (struct litmus_lock*)sem; // record where we are blocked - mb(); - - /* FIXME: interruptible would be nice some day */ - set_task_state(t, TASK_UNINTERRUPTIBLE); - - if(sem->shortest_fifo_queue->count < sem->max_fifo_len) { - // enqueue on fq - ikglp_enqueue_on_fq(sem, sem->shortest_fifo_queue, &wait, flags); // unlocks sem->lock - } - else { - - TRACE_CUR("IKGLP fifo queues are full.\n"); - - // no room in fifos. Go to PQ or donors. - - if(__edf_higher_prio(ikglp_mth_highest(sem), BASE, t, BASE)) { - // enqueue on PQ - ikglp_enqueue_on_pq(sem, &wait); - unlock_fine_irqrestore(&sem->lock, flags); - } - else { - // enqueue as donor - ikglp_enqueue_on_donor(sem, &wait, flags); // unlocks sem->lock - } - } - - unlock_global_irqrestore(&dgl_lock, flags); - raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); - - TS_LOCK_SUSPEND; - - schedule(); - - TS_LOCK_RESUME; - - fq = ikglp_get_queue(sem, t); - BUG_ON(!fq); - - replica = ikglp_get_idx(sem, fq); - } - - TRACE_CUR("Acquired lock %d, queue %d\n", - l->ident, replica); - - return replica; -} - -static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *donor_info) -{ - struct task_struct *t = donor_info->task; - - TRACE_CUR("Donor %s/%d being moved to fq %d\n", - t->comm, - t->pid, - ikglp_get_idx(sem, fq)); - - binheap_delete(&donor_info->node, &sem->donors); - - __ikglp_enqueue_on_fq(sem, fq, t, - &donor_info->fq_node, - NULL, // already in global_list, so pass null to prevent adding 2nd time. - //&donor_info->global_heap_node, - &donor_info->donee_heap_node); - - // warning: - // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. -} - -static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *wait) -{ - struct task_struct *t = wait->task; - - TRACE_CUR("PQ request %s/%d being moved to fq %d\n", - t->comm, - t->pid, - ikglp_get_idx(sem, fq)); - - binheap_delete(&wait->pq_node.node, &sem->priority_queue); - - __ikglp_enqueue_on_fq(sem, fq, t, - &wait->fq_node, - &wait->global_heap_node, - &wait->donee_heap_node); - // warning: - // ikglp_update_owners_prio(t, fq, sem, flags) has not been called. -} - -static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal(struct ikglp_semaphore* sem) -{ - /* must hold sem->lock */ - - struct fifo_queue *fq = NULL; - struct list_head *pos; - struct task_struct *queued; - int i; - - for(i = 0; i < sem->nr_replicas; ++i) { - if( (sem->fifo_queues[i].count > 1) && - (!fq || edf_higher_prio(sem->fifo_queues[i].hp_waiter, fq->hp_waiter)) ) { - - TRACE_CUR("hp_waiter on fq %d (%s/%d) has higher prio than hp_waiter on fq %d (%s/%d)\n", - ikglp_get_idx(sem, &sem->fifo_queues[i]), - sem->fifo_queues[i].hp_waiter->comm, - sem->fifo_queues[i].hp_waiter->pid, - (fq) ? ikglp_get_idx(sem, fq) : -1, - (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->comm : "nil") : "nilXX", - (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->pid : -1) : -2); - - fq = &sem->fifo_queues[i]; - - WARN_ON(!(fq->hp_waiter)); - } - } - - if(fq) { - struct task_struct *max_hp = fq->hp_waiter; - ikglp_wait_state_t* ret = NULL; - - TRACE_CUR("Searching for %s/%d on fq %d\n", - max_hp->comm, - max_hp->pid, - ikglp_get_idx(sem, fq)); - - BUG_ON(!max_hp); - - list_for_each(pos, &fq->wait.task_list) { - wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list); - - queued = (struct task_struct*) wait->private; - - TRACE_CUR("fq %d entry: %s/%d\n", ikglp_get_idx(sem, fq), queued->comm, queued->pid); - - /* Compare task prios, find high prio task. */ - if (queued == max_hp) { - TRACE_CUR("Found it!\n"); - ret = container_of(wait, ikglp_wait_state_t, fq_node); - } - } - -// list_for_each(pos, &fq->wait.task_list) { -// wait_queue_t *wait = list_entry(pos, wait_queue_t, task_list); -// -// queued = (struct task_struct*) wait->private; -// /* Compare task prios, find high prio task. */ -// if (queued == max_hp) { -// TRACE_CUR("Found it!\n"); -// return container_of(wait, ikglp_wait_state_t, fq_node); -// } -// } - - if(!ret) { - WARN_ON(1); - } - - return ret; - } - - return(NULL); -} - -static void ikglp_steal_to_fq(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *fq_wait) -{ - struct task_struct *t = fq_wait->task; - struct fifo_queue *fq_steal = fq_wait->donee_heap_node.fq; - - WARN_ON(t != fq_steal->hp_waiter); - - TRACE_CUR("FQ request %s/%d being moved to fq %d\n", - t->comm, - t->pid, - ikglp_get_idx(sem, fq)); - - fq_wait->donee_heap_node.fq = fq; // just to be safe - - - __remove_wait_queue(&fq_steal->wait, &fq_wait->fq_node); - --(fq_steal->count); - - fq_steal->hp_waiter = ikglp_find_hp_waiter(fq_steal, NULL); - TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", - ikglp_get_idx(sem, fq_steal), - (fq_steal->hp_waiter) ? fq_steal->hp_waiter->comm : "nil", - (fq_steal->hp_waiter) ? fq_steal->hp_waiter->pid : -1); - - - // Update shortest. - if(fq_steal->count < sem->shortest_fifo_queue->count) { - sem->shortest_fifo_queue = fq_steal; - } - - __ikglp_enqueue_on_fq(sem, fq, t, - &fq_wait->fq_node, - NULL, - NULL); - - // warning: We have not checked the priority inheritance of fq's owner yet. -} - - -static void ikglp_migrate_fq_to_owner_heap_nodes(struct ikglp_semaphore *sem, struct fifo_queue *fq, ikglp_wait_state_t *old_wait) -{ - struct task_struct *t = old_wait->task; - - BUG_ON(old_wait->donee_heap_node.fq != fq); - - TRACE_TASK(t, "Migrating wait_state to memory of queue %d.\n", ikglp_get_idx(sem, fq)); - - // need to migrate global_heap_node and donee_heap_node off of the stack - // to the nodes allocated for the owner of this fq. - - // TODO: Enhance binheap() to perform this operation in place. - - ikglp_del_global_list(sem, t, &old_wait->global_heap_node); // remove - fq->global_heap_node = old_wait->global_heap_node; // copy - ikglp_add_global_list(sem, t, &fq->global_heap_node); // re-add - - binheap_delete(&old_wait->donee_heap_node.node, &sem->donees); // remove - fq->donee_heap_node = old_wait->donee_heap_node; // copy - - if(fq->donee_heap_node.donor_info) { - // let donor know that our location has changed - BUG_ON(fq->donee_heap_node.donor_info->donee_info->task != t); // validate cross-link - fq->donee_heap_node.donor_info->donee_info = &fq->donee_heap_node; - } - INIT_BINHEAP_NODE(&fq->donee_heap_node.node); - binheap_add(&fq->donee_heap_node.node, &sem->donees, ikglp_donee_heap_node_t, node); // re-add -} - -static int gsnedf_ikglp_unlock(struct litmus_lock* l) -{ - struct ikglp_semaphore *sem = ikglp_from_lock(l); - struct task_struct *t = current; - struct task_struct *donee = NULL; - struct task_struct *next = NULL; - struct task_struct *new_on_fq = NULL; - - ikglp_wait_state_t *other_donor_info = NULL; - struct fifo_queue *to_steal = NULL; - struct fifo_queue *fq; - - unsigned long flags = 0, real_flags; - - int err = 0; - - raw_spin_lock_irqsave(&sem->real_lock, real_flags); - - lock_global_irqsave(&dgl_lock, flags); // TODO: Push this deeper - lock_fine_irqsave(&sem->lock, flags); - - fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. - - if (!fq) { - err = -EINVAL; - goto out; - } - - TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); - - - // Remove 't' from the heaps, but data in nodes will still be good. - ikglp_del_global_list(sem, t, &fq->global_heap_node); - binheap_delete(&fq->donee_heap_node.node, &sem->donees); - - // Move the next request into the FQ and update heaps as needed. - // We defer re-evaluation of priorities to later in the function. - if(fq->donee_heap_node.donor_info) { // move my doner to FQ - ikglp_wait_state_t *donor_info = fq->donee_heap_node.donor_info; - - new_on_fq = donor_info->task; - - TRACE_TASK(t, "Moving MY donor (%s/%d) to fq %d.\n", - new_on_fq->comm, new_on_fq->pid, - ikglp_get_idx(sem, fq)); - // donor moved to FQ - donee = t; - ikglp_move_donor_to_fq(sem, fq, donor_info); - // TODO: Terminate donation - } - else if(!binheap_empty(&sem->donors)) { // No donor, so move any donor to FQ - // move other donor to FQ - other_donor_info = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); - - new_on_fq = other_donor_info->task; - donee = other_donor_info->donee_info->task; - - // update the donee's heap position. - other_donor_info->donee_info->donor_info = NULL; // clear the cross-link - binheap_decrease(&other_donor_info->donee_info->node, &sem->donees); - - - TRACE_TASK(t, "Moving a donor (%s/%d) to fq %d.\n", - new_on_fq->comm, new_on_fq->pid, - ikglp_get_idx(sem, fq)); - - ikglp_move_donor_to_fq(sem, fq, other_donor_info); - - // TODO: Terminate donation - } - else if(!binheap_empty(&sem->priority_queue)) { // No donors, so move PQ - ikglp_heap_node_t *pq_node = binheap_top_entry(&sem->priority_queue, ikglp_heap_node_t, node); - ikglp_wait_state_t *pq_wait = container_of(pq_node, ikglp_wait_state_t, pq_node); - - new_on_fq = pq_wait->task; - - TRACE_TASK(t, "Moving a pq waiter (%s/%d) to fq %d.\n", - new_on_fq->comm, new_on_fq->pid, - ikglp_get_idx(sem, fq)); - - ikglp_move_pq_to_fq(sem, fq, pq_wait); - } - else if(fq->count == 1) { // No PQ and this queue is empty, so steal - // steal. - ikglp_wait_state_t *fq_wait; - - TRACE_TASK(t, "Looking to steal a request for fq %d...\n", - ikglp_get_idx(sem, fq)); - - fq_wait = ikglp_find_hp_waiter_to_steal(sem); - if(fq_wait) { - to_steal = fq_wait->donee_heap_node.fq; - - new_on_fq = fq_wait->task; - - TRACE_TASK(t, "Found %s/%d of fq %d to steal for fq %d...\n", - new_on_fq->comm, new_on_fq->pid, - ikglp_get_idx(sem, to_steal), - ikglp_get_idx(sem, fq)); - - ikglp_steal_to_fq(sem, fq, fq_wait); - - // TODO: Update prio of old queue. - } - else { - TRACE_TASK(t, "Found nothing to steal for fq %d.\n", - ikglp_get_idx(sem, fq)); - } - } - else { // move no one - } - - // 't' must drop all priority and clean up data structures before hand-off. - - // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST - raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); - { - int count = 0; - while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) { - binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks, struct nested_info, hp_binheap_node); - ++count; - } - decrease_priority_inheritance(t, NULL); - WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. - } - raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); - - - // Updating the owner and updating sem->shortest_fifo_queue - // could have been done sooner, but it is deffered, hoping - // that it will reduce thrashing of sem->shortest_fifo_queue - // assignment. - fq->owner = NULL; // no longer owned!! - --(fq->count); - if(fq->count < sem->shortest_fifo_queue->count) { - sem->shortest_fifo_queue = fq; - } - - // Now patch up other priorities. - // - // At most one of the following: - // if(donee && donee != t), decrease prio, propagate to owner, or onward - // if(to_steal), update owner's prio (hp_waiter has already been set) - // - - BUG_ON((other_donor_info != NULL) && (to_steal != NULL)); - - if(other_donor_info) { - struct fifo_queue *other_fq = other_donor_info->donee_info->fq; - - BUG_ON(!donee); - BUG_ON(donee == t); - - TRACE_TASK(t, "Terminating donation relation of donor %s/%d to donee %s/%d!\n", - other_donor_info->task->comm, other_donor_info->task->pid, - donee->comm, donee->pid); - - // need to terminate donation relation. - if(donee == other_fq->owner) { - TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n", - donee->comm, donee->pid, - ikglp_get_idx(sem, other_fq)); - - ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, other_fq, sem, flags); - lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! - } - else { - TRACE_TASK(t, "Donee %s/%d is an blocked in of fq %d.\n", - donee->comm, donee->pid, - ikglp_get_idx(sem, other_fq)); - - ikglp_remove_donation_from_fq_waiter(donee, &other_donor_info->prio_donation.hp_binheap_node); - if(donee == other_fq->hp_waiter) { - TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n", - donee->comm, donee->pid, - ikglp_get_idx(sem, other_fq)); - - other_fq->hp_waiter = ikglp_find_hp_waiter(other_fq, NULL); - TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", - ikglp_get_idx(sem, other_fq), - (other_fq->hp_waiter) ? other_fq->hp_waiter->comm : "nil", - (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : -1); - - ikglp_refresh_owners_prio_decrease(other_fq, sem, flags); // unlocks sem->lock. reacquire it. - lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! - } - } - } - else if(to_steal) { - TRACE_TASK(t, "Rechecking priority inheritance of fq %d, triggered by stealing.\n", - ikglp_get_idx(sem, to_steal)); - - ikglp_refresh_owners_prio_decrease(to_steal, sem, flags); // unlocks sem->lock. reacquire it. - lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! - } - - // check for new HP waiter. - if(new_on_fq) { - // fq->owner is null, so just update the hp_waiter without locking. - - if(new_on_fq == fq->hp_waiter) { - TRACE_TASK(t, "new_on_fq is already hp_waiter.\n", fq->hp_waiter->comm, fq->hp_waiter->pid); - fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); // set this just to be sure... - } - else if(edf_higher_prio(new_on_fq, fq->hp_waiter)) { - if(fq->hp_waiter) - TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", fq->hp_waiter->comm, fq->hp_waiter->pid); - else - TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); - - fq->hp_waiter = new_on_fq; - fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); - - TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", - ikglp_get_idx(sem, fq), - (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", - (fq->hp_waiter) ? fq->hp_waiter->pid : -1); - } - } - - - if(waitqueue_active(&fq->wait)) - { - wait_queue_t *wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list); - ikglp_wait_state_t *fq_wait = container_of(wait, ikglp_wait_state_t, fq_node); - next = (struct task_struct*) wait->private; - - __remove_wait_queue(&fq->wait, wait); - - TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", - ikglp_get_idx(sem, fq), - next->comm, next->pid); - - // migrate wait-state to fifo-memory. - ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait); - - /* next becomes the resouce holder */ - fq->owner = next; - tsk_rt(next)->blocked_lock = NULL; - - - /* determine new hp_waiter if necessary */ - if (next == fq->hp_waiter) { - - TRACE_TASK(next, "was highest-prio waiter\n"); - /* next has the highest priority --- it doesn't need to - * inherit. However, we need to make sure that the - * next-highest priority in the queue is reflected in - * hp_waiter. */ - fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL); - TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n", - ikglp_get_idx(sem, fq), - (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", - (fq->hp_waiter) ? fq->hp_waiter->pid : -1); - - fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ? effective_priority(fq->hp_waiter) : NULL; - - if (fq->hp_waiter) - TRACE_TASK(fq->hp_waiter, "is new highest-prio waiter\n"); - else - TRACE("no further waiters\n"); - - raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); - - //TRACE_TASK(next, "Heap Before:\n"); - //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); - - binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, struct nested_info, hp_binheap_node); - - //TRACE_TASK(next, "Heap After:\n"); - //print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); - - raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); - } - else { - /* Well, if 'next' is not the highest-priority waiter, - * then it (probably) ought to inherit the highest-priority - * waiter's priority. */ - TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n", - ikglp_get_idx(sem, fq), - (fq->hp_waiter) ? fq->hp_waiter->comm : "nil", - (fq->hp_waiter) ? fq->hp_waiter->pid : -1); - - raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); - - binheap_add(&fq->nest.hp_binheap_node, &tsk_rt(next)->hp_blocked_tasks, - struct nested_info, hp_binheap_node); - - /* It is possible that 'next' *should* be the hp_waiter, but isn't - * because that update hasn't yet executed (update operation is - * probably blocked on mutex->lock). So only inherit if the top of - * 'next's top heap node is indeed the effective prio. of hp_waiter. - * (We use fq->hp_waiter_eff_prio instead of effective_priority(hp_waiter) - * since the effective priority of hp_waiter can change (and the - * update has not made it to this lock).) - */ - if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == fq->nest.hp_waiter_eff_prio)) - { - if(fq->nest.hp_waiter_eff_prio) - increase_priority_inheritance(next, fq->nest.hp_waiter_eff_prio); - else - WARN_ON(1); - } - - raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); - } - - - // wake up the new resource holder! - wake_up_process(next); - } - -out: - unlock_fine_irqrestore(&sem->lock, flags); - unlock_global_irqrestore(&dgl_lock, flags); - - raw_spin_unlock_irqrestore(&sem->real_lock, real_flags); - - return err; -} - - -static int gsnedf_ikglp_close(struct litmus_lock* l) -{ - struct task_struct *t = current; - struct ikglp_semaphore *sem = ikglp_from_lock(l); - unsigned long flags; - - int owner = 0; - int i; - - raw_spin_lock_irqsave(&sem->real_lock, flags); - - for(i = 0; i < sem->nr_replicas; ++i) { - if(sem->fifo_queues[i].owner == t) { - owner = 1; - break; - } - } - - raw_spin_unlock_irqrestore(&sem->real_lock, flags); - - if (owner) - gsnedf_ikglp_unlock(l); - - return 0; -} - -static void gsnedf_ikglp_free(struct litmus_lock* l) -{ - struct ikglp_semaphore *sem = ikglp_from_lock(l); - - kfree(sem->fifo_queues); - kfree(sem); -} - -static struct litmus_lock_ops gsnedf_ikglp_lock_ops = { - .lock = gsnedf_ikglp_lock, - .unlock = gsnedf_ikglp_unlock, - - // ikglp can only be an outer-most lock. - .propagate_increase_inheritance = NULL, - .propagate_decrease_inheritance = NULL, - - .close = gsnedf_ikglp_close, - .deallocate = gsnedf_ikglp_free, -}; - -static struct litmus_lock* gsnedf_new_ikglp(void* __user arg) -{ - struct ikglp_semaphore* sem; - int nr_replicas = 0; - int i; - - if(!access_ok(VERIFY_READ, arg, sizeof(nr_replicas))) - { - return(NULL); - } - if(__copy_from_user(&nr_replicas, arg, sizeof(nr_replicas))) - { - return(NULL); - } - if(nr_replicas < 1) - { - return(NULL); - } - - sem = kmalloc(sizeof(*sem), GFP_KERNEL); - if(!sem) - { - return NULL; - } - - sem->fifo_queues = kmalloc(sizeof(struct fifo_queue)*nr_replicas, GFP_KERNEL); - if(!sem->fifo_queues) - { - kfree(sem); - return NULL; - } - - - sem->litmus_lock.ops = &gsnedf_ikglp_lock_ops; - -#ifdef CONFIG_DEBUG_SPINLOCK - { - __raw_spin_lock_init(&sem->lock, ((struct litmus_lock*)sem)->cheat_lockdep, &((struct litmus_lock*)sem)->key); - } -#else - raw_spin_lock_init(&sem->lock); -#endif - - raw_spin_lock_init(&sem->real_lock); - - sem->nr_replicas = nr_replicas; - sem->m = num_online_cpus(); // default 'm' to number of CPUs. - sem->max_fifo_len = (sem->m/nr_replicas) + ((sem->m%nr_replicas) != 0); - - TRACE("New IKGLP Sem: m = %d, k = %d, max fifo_len = %d\n", - sem->m, - sem->nr_replicas, - sem->max_fifo_len); - - for(i = 0; i < nr_replicas; ++i) - { - struct fifo_queue* q = &(sem->fifo_queues[i]); - - q->owner = NULL; - q->hp_waiter = NULL; - init_waitqueue_head(&q->wait); - q->count = 0; - - q->global_heap_node.task = NULL; - INIT_BINHEAP_NODE(&q->global_heap_node.node); - - q->donee_heap_node.task = NULL; - q->donee_heap_node.donor_info = NULL; - q->donee_heap_node.fq = NULL; - INIT_BINHEAP_NODE(&q->donee_heap_node.node); - - q->nest.lock = (struct litmus_lock*)sem; - q->nest.hp_waiter_eff_prio = NULL; - q->nest.hp_waiter_ptr = &q->hp_waiter; - INIT_BINHEAP_NODE(&q->nest.hp_binheap_node); - } - - sem->shortest_fifo_queue = &sem->fifo_queues[0]; - - sem->top_m_size = 0; - - // init heaps - INIT_BINHEAP_HANDLE(&sem->top_m, ikglp_edf_min_heap_base_priority_order); - INIT_BINHEAP_HANDLE(&sem->not_top_m, ikglp_edf_max_heap_base_priority_order); - INIT_BINHEAP_HANDLE(&sem->donees, ikglp_edf_min_heap_donee_order); - INIT_BINHEAP_HANDLE(&sem->priority_queue, ikglp_edf_max_heap_base_priority_order); - INIT_BINHEAP_HANDLE(&sem->donors, ikglp_donor_edf_max_heap_base_priority_order); - - return &sem->litmus_lock; -} - - - - - -#endif - - - - - - - - - - - -/* ******************** FMLP support ********************** */ - -/* struct for semaphore with priority inheritance */ -struct fmlp_semaphore { - struct litmus_lock litmus_lock; - - /* current resource holder */ - struct task_struct *owner; - - /* highest-priority waiter */ - struct task_struct *hp_waiter; - - /* FIFO queue of waiting tasks */ - wait_queue_head_t wait; -}; - -static inline struct fmlp_semaphore* fmlp_from_lock(struct litmus_lock* lock) -{ - return container_of(lock, struct fmlp_semaphore, litmus_lock); -} - -/* caller is responsible for locking */ -struct task_struct* find_hp_waiter(struct fmlp_semaphore *sem, - struct task_struct* skip) -{ - struct list_head *pos; - struct task_struct *queued, *found = NULL; - - list_for_each(pos, &sem->wait.task_list) { - queued = (struct task_struct*) list_entry(pos, wait_queue_t, - task_list)->private; - - /* Compare task prios, find high prio task. */ - if (queued != skip && edf_higher_prio(queued, found)) - found = queued; - } - return found; -} - -int gsnedf_fmlp_lock(struct litmus_lock* l) -{ - struct task_struct* t = current; - struct fmlp_semaphore *sem = fmlp_from_lock(l); - wait_queue_t wait; - unsigned long flags; - - if (!is_realtime(t)) - return -EPERM; - - spin_lock_irqsave(&sem->wait.lock, flags); - - if (sem->owner) { - /* resource is not free => must suspend and wait */ - - init_waitqueue_entry(&wait, t); - - /* FIXME: interruptible would be nice some day */ - set_task_state(t, TASK_UNINTERRUPTIBLE); - - __add_wait_queue_tail_exclusive(&sem->wait, &wait); - - /* check if we need to activate priority inheritance */ - if (edf_higher_prio(t, sem->hp_waiter)) { - sem->hp_waiter = t; - if (edf_higher_prio(t, sem->owner)) - increase_priority_inheritance(sem->owner, sem->hp_waiter); - } - - TS_LOCK_SUSPEND; - - /* release lock before sleeping */ - spin_unlock_irqrestore(&sem->wait.lock, flags); - - /* We depend on the FIFO order. Thus, we don't need to recheck - * when we wake up; we are guaranteed to have the lock since - * there is only one wake up per release. - */ - - schedule(); - - TS_LOCK_RESUME; - - /* Since we hold the lock, no other task will change - * ->owner. We can thus check it without acquiring the spin - * lock. */ - BUG_ON(sem->owner != t); - } else { - /* it's ours now */ - sem->owner = t; - - spin_unlock_irqrestore(&sem->wait.lock, flags); - } - - return 0; -} - -int gsnedf_fmlp_unlock(struct litmus_lock* l) -{ - struct task_struct *t = current, *next; - struct fmlp_semaphore *sem = fmlp_from_lock(l); - unsigned long flags; - int err = 0; - - spin_lock_irqsave(&sem->wait.lock, flags); - - if (sem->owner != t) { - err = -EINVAL; - goto out; - } - - /* check if there are jobs waiting for this resource */ - next = __waitqueue_remove_first(&sem->wait); - if (next) { - /* next becomes the resouce holder */ - sem->owner = next; - TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); - - /* determine new hp_waiter if necessary */ - if (next == sem->hp_waiter) { - TRACE_TASK(next, "was highest-prio waiter\n"); - /* next has the highest priority --- it doesn't need to - * inherit. However, we need to make sure that the - * next-highest priority in the queue is reflected in - * hp_waiter. */ - sem->hp_waiter = find_hp_waiter(sem, next); - if (sem->hp_waiter) - TRACE_TASK(sem->hp_waiter, "is new highest-prio waiter\n"); - else - TRACE("no further waiters\n"); - } else { - /* Well, if next is not the highest-priority waiter, - * then it ought to inherit the highest-priority - * waiter's priority. */ - increase_priority_inheritance(next, sem->hp_waiter); - } - - /* wake up next */ - wake_up_process(next); - } else - /* becomes available */ - sem->owner = NULL; - - /* we lose the benefit of priority inheritance (if any) */ - if (tsk_rt(t)->inh_task) - decrease_priority_inheritance(t, NULL); - -out: - spin_unlock_irqrestore(&sem->wait.lock, flags); - + spin_unlock_irqrestore(&sem->wait.lock, flags); + return err; } @@ -3561,7 +1122,7 @@ static struct litmus_lock_ops gsnedf_fmlp_lock_ops = { .lock = gsnedf_fmlp_lock, .unlock = gsnedf_fmlp_unlock, .deallocate = gsnedf_fmlp_free, - + #ifdef CONFIG_LITMUS_NESTED_LOCKING .propagate_increase_inheritance = NULL, .propagate_decrease_inheritance = NULL @@ -3599,17 +1160,17 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, /* Flexible Multiprocessor Locking Protocol */ *lock = gsnedf_new_fmlp(); break; - + #ifdef CONFIG_LITMUS_NESTED_LOCKING case RSM_MUTEX: *lock = gsnedf_new_rsm_mutex(); break; - + case IKGLP_SEM: *lock = gsnedf_new_ikglp(args); break; #endif - + default: err = -ENXIO; goto UNSUPPORTED_LOCK; @@ -3671,9 +1232,15 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { .activate_plugin = gsnedf_activate_plugin, #ifdef CONFIG_LITMUS_LOCKING .allocate_lock = gsnedf_allocate_lock, + .increase_prio = increase_priority_inheritance, + .decrease_prio = decrease_priority_inheritance, +#endif +#ifdef CONFIG_LITMUS_NESTED_LOCKING + .nested_increase_prio = nested_increase_priority_inheritance, + .nested_decrease_prio = nested_decrease_priority_inheritance, #endif #ifdef CONFIG_LITMUS_DGL_SUPPORT - .get_dgl_spinlock = gsn_edf_get_dgl_spinlock, + .get_dgl_spinlock = gsnedf_get_dgl_spinlock, #endif }; @@ -3692,11 +1259,11 @@ static int __init init_gsn_edf(void) INIT_BINHEAP_NODE(&entry->hn); } - -#ifdef CONFIG_LITMUS_DGL_SUPPORT + +#ifdef CONFIG_LITMUS_DGL_SUPPORT raw_spin_lock_init(&dgl_lock); #endif - + edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); return register_sched_plugin(&gsn_edf_plugin); } diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 77ae3eeb3966..20c67ff4fdce 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c @@ -111,23 +111,39 @@ static long litmus_dummy_deactivate_plugin(void) } #ifdef CONFIG_LITMUS_LOCKING - static long litmus_dummy_allocate_lock(struct litmus_lock **lock, int type, void* __user config) { return -ENXIO; } +static void litmus_dummy_increase_prio(struct task_struct* t, struct task_struct* prio_inh) +{ +} + +static void litmus_dummy_decrease_prio(struct task_struct* t, struct task_struct* prio_inh) +{ +} #endif -#ifdef CONFIG_LITMUS_DGL_SUPPORT +#ifdef CONFIG_LITMUS_NESTED_LOCKING +static void litmus_dummy_nested_increase_prio(struct task_struct* t, struct task_struct* prio_inh, + raw_spinlock_t *to_unlock, unsigned long irqflags) +{ +} +static void litmus_dummy_nested_decrease_prio(struct task_struct* t, struct task_struct* prio_inh, + raw_spinlock_t *to_unlock, unsigned long irqflags) +{ +} +#endif + +#ifdef CONFIG_LITMUS_DGL_SUPPORT static raw_spinlock_t* litmus_dummy_get_dgl_spinlock(struct task_struct *t) { BUG(); return NULL; } - #endif @@ -149,6 +165,12 @@ struct sched_plugin linux_sched_plugin = { .deactivate_plugin = litmus_dummy_deactivate_plugin, #ifdef CONFIG_LITMUS_LOCKING .allocate_lock = litmus_dummy_allocate_lock, + .increase_prio = litmus_dummy_increase_prio, + .decrease_prio = litmus_dummy_decrease_prio, +#endif +#ifdef CONFIG_LITMUS_NESTED_LOCKING + .nested_increase_prio = litmus_dummy_nested_increase_prio, + .nested_decrease_prio = litmus_dummy_nested_decrease_prio, #endif #ifdef CONFIG_LITMUS_DGL_SUPPORT .get_dgl_spinlock = litmus_dummy_get_dgl_spinlock, @@ -190,6 +212,12 @@ int register_sched_plugin(struct sched_plugin* plugin) CHECK(deactivate_plugin); #ifdef CONFIG_LITMUS_LOCKING CHECK(allocate_lock); + CHECK(increase_prio); + CHECK(decrease_prio); +#endif +#ifdef CONFIG_LITMUS_NESTED_LOCKING + CHECK(nested_increase_prio); + CHECK(nested_decrease_prio); #endif #ifdef CONFIG_LITMUS_DGL_SUPPORT CHECK(get_dgl_spinlock); -- cgit v1.2.2