From 6f3810f79aaf624b5c781dcf04a4203affcad903 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Mon, 8 Apr 2013 17:21:34 -0400 Subject: Fixed IKGLP request aborts. Fixes bug that failed to move new requests to the FIFO queue after an aborted request in a FIFO makes room. --- litmus/budget.c | 12 +- litmus/ikglp_lock.c | 641 +++++++++++++++++++++++++++++++--------------------- litmus/locking.c | 13 +- 3 files changed, 394 insertions(+), 272 deletions(-) (limited to 'litmus') diff --git a/litmus/budget.c b/litmus/budget.c index 4f692fd4a103..aa254349a85e 100644 --- a/litmus/budget.c +++ b/litmus/budget.c @@ -57,12 +57,12 @@ inline static void arm_enforcement_timer(struct task_struct* t, int force) } if (!force) { - /* Calling this when there is no budget left for the task - * makes no sense, unless the task is non-preemptive. */ - if (budget_exhausted(t)) { - TRACE_TASK(t, "can't arm timer because no budget remaining\n"); - return; - } +// /* Calling this when there is no budget left for the task +// * makes no sense, unless the task is non-preemptive. */ +// if (budget_exhausted(t)) { +// TRACE_TASK(t, "can't arm timer because no budget remaining\n"); +// return; +// } if ( (!budget_enforced(t) || (budget_enforced(t) && bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED))) diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c index 6d7ea24ce79b..3058aa8fb2fe 100644 --- a/litmus/ikglp_lock.c +++ b/litmus/ikglp_lock.c @@ -141,6 +141,98 @@ static inline struct task_struct* ikglp_mth_highest(struct ikglp_semaphore *sem) } +static void __ikglp_dump_pq(struct binheap_node *n, int depth) +{ + ikglp_heap_node_t *request; + char padding[81] = " "; + + if(n == NULL) { + TRACE("+-> %p\n", NULL); + return; + } + + request = binheap_entry(n, ikglp_heap_node_t, node); + + if(depth*2 <= 80) + padding[depth*2] = '\0'; + + + TRACE("%s+-> %s/%d\n", + padding, + request->task->comm, + request->task->pid); + + if(n->left) __ikglp_dump_pq(n->left, depth+1); + if(n->right) __ikglp_dump_pq(n->right, depth+1); +} + +static void __ikglp_dump_donors(struct binheap_node *n, int depth) +{ + ikglp_wait_state_t *donor_node; + char padding[81] = " "; + + if(n == NULL) { + TRACE("+-> %p\n", NULL); + return; + } + + donor_node = binheap_entry(n, ikglp_wait_state_t, node); + + if(depth*2 <= 80) + padding[depth*2] = '\0'; + + + TRACE("%s+-> %s/%d (donee: %s/%d)\n", + padding, + donor_node->task->comm, + donor_node->task->pid, + donor_node->donee_info->task->comm, + donor_node->donee_info->task->pid); + + if(n->left) __ikglp_dump_donors(n->left, depth+1); + if(n->right) __ikglp_dump_donors(n->right, depth+1); +} + +static void __ikglp_dump_fifoq(int i, struct fifo_queue* fq) +{ + TRACE(" FIFO %d: Owner = %s/%d, HP Waiter = %s/%d, # Waiters = %u\n", + i, + (fq->owner) ? fq->owner->comm : "null", + (fq->owner) ? fq->owner->pid : 0, + (fq->hp_waiter) ? fq->hp_waiter->comm : "null", + (fq->hp_waiter) ? fq->hp_waiter->pid : 0, + fq->count); + if (waitqueue_active(&fq->wait)) { + struct list_head *pos; + list_for_each(pos, &fq->wait.task_list) { + wait_queue_t *q = list_entry(pos, wait_queue_t, task_list); + struct task_struct *t = (struct task_struct*) q->private; + TRACE(" %s/%d (effective priority: %s/%d)\n", + t->comm, t->pid, + (tsk_rt(t)->inh_task) ? tsk_rt(t)->inh_task->comm : "null", + (tsk_rt(t)->inh_task) ? tsk_rt(t)->inh_task->pid : 0); + } + } +} + +static void __ikglp_dump_state(struct ikglp_semaphore *sem) +{ + int i; + TRACE("IKGLP Lock %d\n", sem->litmus_lock.ident); + TRACE("# Replicas: %u Max FIFO Len: %u Max in FIFOs: %u Cur # in FIFOs: %u\n", + sem->nr_replicas, sem->max_fifo_len, sem->max_in_fifos, sem->nr_in_fifos); + TRACE("# requests in top-m: %u\n", sem->top_m_size); + + for (i = 0; i < sem->nr_replicas; ++i) + __ikglp_dump_fifoq(i, &sem->fifo_queues[i]); + + TRACE(" PQ:\n"); + __ikglp_dump_pq(sem->priority_queue.root, 1); + + TRACE(" Donors:\n"); + __ikglp_dump_donors(sem->donors.root, 1); +} + #if 0 static void print_global_list(struct binheap_node* n, int depth) @@ -1133,9 +1225,8 @@ static void __drop_from_fq(struct ikglp_semaphore *sem, --(fq->count); #ifdef CONFIG_LITMUS_AFFINITY_LOCKING - if(sem->aff_obs) { + if(sem->aff_obs) sem->aff_obs->ops->notify_dequeue(sem->aff_obs, fq, t); - } #endif if(t == fq->hp_waiter) { @@ -1149,6 +1240,7 @@ static void __drop_from_fq(struct ikglp_semaphore *sem, // Update shortest. if(fq->count < sem->shortest_fifo_queue->count) sem->shortest_fifo_queue = fq; + --(sem->nr_in_fifos); wait->cur_q = IKGLP_INVL; } @@ -1201,68 +1293,125 @@ static void ikglp_migrate_fq_to_owner_heap_nodes(struct ikglp_semaphore *sem, ikglp_donee_heap_node_t, node); // re-add } -int ikglp_unlock(struct litmus_lock* l) -{ - struct ikglp_semaphore *sem = ikglp_from_lock(l); - struct task_struct *t = current; - struct task_struct *donee = NULL; - struct task_struct *next = NULL; - struct task_struct *new_on_fq = NULL; - struct fifo_queue *fq_of_new_on_fq = NULL; - ikglp_wait_state_t *other_donor_info = NULL; - struct fifo_queue *to_steal = NULL; - int need_steal_prio_reeval = 0; - struct fifo_queue *fq; -#ifdef CONFIG_LITMUS_DGL_SUPPORT - raw_spinlock_t *dgl_lock; -#endif +void ikglp_grant_replica_to_next(struct ikglp_semaphore *sem, struct fifo_queue *fq) +{ + wait_queue_t *wait; + ikglp_wait_state_t *fq_wait; + struct task_struct *next; - unsigned long flags = 0, more_flags; + BUG_ON(!waitqueue_active(&fq->wait)); - int err = 0; + wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list); + fq_wait = container_of(wait, ikglp_wait_state_t, fq_node); + next = (struct task_struct*) wait->private; - fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. + __remove_wait_queue(&fq->wait, wait); - if (!fq) { - err = -EINVAL; - goto out; - } + TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", + ikglp_get_idx(sem, fq), + next->comm, next->pid); - BUG_ON(l != tsk_rt(t)->outermost_lock); + // migrate wait-state to fifo-memory. + ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait); -#ifdef CONFIG_LITMUS_DGL_SUPPORT - dgl_lock = litmus->get_dgl_spinlock(t); + /* next becomes the resouce holder */ + fq->owner = next; + tsk_rt(next)->blocked_lock = NULL; + +#ifdef CONFIG_LITMUS_AFFINITY_LOCKING + if(sem->aff_obs) { + sem->aff_obs->ops->notify_acquired(sem->aff_obs, fq, next); + } #endif - lock_global_irqsave(dgl_lock, flags); - raw_spin_lock_irqsave(&sem->real_lock, more_flags); - lock_fine_irqsave(&sem->lock, flags); - TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); + /* determine new hp_waiter if necessary */ + if (next == fq->hp_waiter) { + TRACE_TASK(next, "was highest-prio waiter\n"); + /* next has the highest priority --- it doesn't need to + * inherit. However, we need to make sure that the + * next-highest priority in the queue is reflected in + * hp_waiter. */ + fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL); + TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n", + ikglp_get_idx(sem, fq), + (fq->hp_waiter) ? fq->hp_waiter->comm : "null", + (fq->hp_waiter) ? fq->hp_waiter->pid : 0); - // Remove 't' from the heaps, but data in nodes will still be good. - ikglp_del_global_list(sem, t, &fq->global_heap_node); - binheap_delete(&fq->donee_heap_node.node, &sem->donees); + fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ? + effective_priority(fq->hp_waiter) : NULL; - fq->owner = NULL; // no longer owned!! - --(fq->count); - if(fq->count < sem->shortest_fifo_queue->count) { - sem->shortest_fifo_queue = fq; + if (fq->hp_waiter) + TRACE_TASK(fq->hp_waiter, "is new highest-prio waiter\n"); + else + TRACE("no further waiters\n"); + + raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); + binheap_add(&fq->nest.hp_binheap_node, + &tsk_rt(next)->hp_blocked_tasks, + struct nested_info, + hp_binheap_node); + raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); } - --(sem->nr_in_fifos); + else { + /* Well, if 'next' is not the highest-priority waiter, + * then it (probably) ought to inherit the highest-priority + * waiter's priority. */ + TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n", + ikglp_get_idx(sem, fq), + (fq->hp_waiter) ? fq->hp_waiter->comm : "null", + (fq->hp_waiter) ? fq->hp_waiter->pid : 0); -#ifdef CONFIG_LITMUS_AFFINITY_LOCKING - if(sem->aff_obs) { - sem->aff_obs->ops->notify_dequeue(sem->aff_obs, fq, t); - sem->aff_obs->ops->notify_freed(sem->aff_obs, fq, t); + raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); + + binheap_add(&fq->nest.hp_binheap_node, + &tsk_rt(next)->hp_blocked_tasks, + struct nested_info, + hp_binheap_node); + + /* It is possible that 'next' *should* be the hp_waiter, but isn't + * because that update hasn't yet executed (update operation is + * probably blocked on mutex->lock). So only inherit if the top of + * 'next's top heap node is indeed the effective prio. of hp_waiter. + * (We use fq->hp_waiter_eff_prio instead of effective_priority(hp_waiter) + * since the effective priority of hp_waiter can change (and the + * update has not made it to this lock).) + */ + if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == + fq->nest.hp_waiter_eff_prio)) + { + if(fq->nest.hp_waiter_eff_prio) + litmus->increase_prio(next, fq->nest.hp_waiter_eff_prio); + else + WARN_ON(1); + } + + raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); } -#endif - // Move the next request into the FQ and update heaps as needed. - // We defer re-evaluation of priorities to later in the function. - if(fq->donee_heap_node.donor_info) { // move my donor to FQ - ikglp_wait_state_t *donor_info = fq->donee_heap_node.donor_info; + // wake up the new resource holder! + wake_up_for_lock(next); +} + + +void ikglp_move_next_to_fq(struct ikglp_semaphore *sem, + struct fifo_queue *fq, + struct task_struct *t, + ikglp_donee_heap_node_t *donee_node, + int allow_stealing) +{ + struct task_struct *donee = NULL; + struct task_struct *new_on_fq = NULL; + struct fifo_queue *fq_of_new_on_fq = NULL; + + ikglp_wait_state_t *other_donor_info = NULL; + struct fifo_queue *to_steal = NULL; + int need_steal_prio_reeval = 0; + unsigned long flags = 0; + + if (donee_node->donor_info) { + ikglp_wait_state_t *donor_info = donee_node->donor_info; new_on_fq = donor_info->task; @@ -1288,15 +1437,14 @@ int ikglp_unlock(struct litmus_lock* l) ikglp_get_idx(sem, fq_of_new_on_fq), ikglp_get_idx(sem, fq)); - ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, donor_info); } else if(!binheap_empty(&sem->donors)) { // No donor, so move any donor to FQ - // Select a donor + // Select a donor #ifdef CONFIG_LITMUS_AFFINITY_LOCKING other_donor_info = (sem->aff_obs) ? - sem->aff_obs->ops->advise_donor_to_fq(sem->aff_obs, fq) : - binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); + sem->aff_obs->ops->advise_donor_to_fq(sem->aff_obs, fq) : + binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); #else other_donor_info = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); #endif @@ -1327,7 +1475,6 @@ int ikglp_unlock(struct litmus_lock* l) ikglp_get_idx(sem, fq_of_new_on_fq), ikglp_get_idx(sem, fq)); - ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, other_donor_info); } else if(!binheap_empty(&sem->priority_queue)) { // No donors, so move PQ @@ -1359,7 +1506,7 @@ int ikglp_unlock(struct litmus_lock* l) ikglp_move_pq_to_fq(sem, fq_of_new_on_fq, pq_wait); } - else if(fq->count == 0) { // No PQ and this queue is empty, so steal. + else if(allow_stealing && fq->count == 0) { // No PQ and this queue is empty, so steal. ikglp_wait_state_t *fq_wait; TRACE_TASK(t, "Looking to steal a request for fq %d...\n", @@ -1367,8 +1514,8 @@ int ikglp_unlock(struct litmus_lock* l) #ifdef CONFIG_LITMUS_AFFINITY_LOCKING fq_wait = (sem->aff_obs) ? - sem->aff_obs->ops->advise_steal(sem->aff_obs, fq) : - ikglp_find_hp_waiter_to_steal(sem); + sem->aff_obs->ops->advise_steal(sem->aff_obs, fq) : + ikglp_find_hp_waiter_to_steal(sem); #else fq_wait = ikglp_find_hp_waiter_to_steal(sem); #endif @@ -1395,27 +1542,6 @@ int ikglp_unlock(struct litmus_lock* l) else { // move no one } - // 't' must drop all priority and clean up data structures before hand-off. - - // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST - // This kills any inheritance from a donor. - raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); - { - int count = 0; - TRACE_TASK(t, "discarding _all_ inheritance because IKGLP is outermost\n"); - while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) { - binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks, - struct nested_info, hp_binheap_node); - ++count; - } - if (count) { - litmus->decrease_prio(t, NULL, 0); - } - WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. - } - raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); - - // Now patch up other priorities. // @@ -1479,147 +1605,94 @@ int ikglp_unlock(struct litmus_lock* l) // check for new HP waiter. if(new_on_fq) { - if(fq == fq_of_new_on_fq) { - // fq->owner is null, so just update the hp_waiter without locking. - if(new_on_fq == fq->hp_waiter) { - TRACE_TASK(t, "new_on_fq is already hp_waiter.\n", - fq->hp_waiter->comm, fq->hp_waiter->pid); - fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); // set this just to be sure... - } - else if(litmus->compare(new_on_fq, fq->hp_waiter)) { - if(fq->hp_waiter) - TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", - fq->hp_waiter->comm, fq->hp_waiter->pid); - else - TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); - - fq->hp_waiter = new_on_fq; - fq->nest.hp_waiter_eff_prio = effective_priority(fq->hp_waiter); + ikglp_refresh_owners_prio_increase(new_on_fq, fq_of_new_on_fq, sem, flags); // unlocks sem->lock. reacquire it. + lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! + } - TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", - ikglp_get_idx(sem, fq), - (fq->hp_waiter) ? fq->hp_waiter->comm : "null", - (fq->hp_waiter) ? fq->hp_waiter->pid : 0); - } - } - else { - ikglp_refresh_owners_prio_increase(new_on_fq, fq_of_new_on_fq, sem, flags); // unlocks sem->lock. reacquire it. - lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! - } + /* we moved a request to an empty FQ. wake it up */ + if(unlikely(fq_of_new_on_fq && + fq_of_new_on_fq != fq && + fq_of_new_on_fq->count == 1)) { + ikglp_grant_replica_to_next(sem, fq_of_new_on_fq); } +} +int ikglp_unlock(struct litmus_lock* l) +{ + struct ikglp_semaphore *sem = ikglp_from_lock(l); + struct task_struct *t = current; + struct fifo_queue *fq; -wake_kludge: - if(waitqueue_active(&fq->wait)) - { - wait_queue_t *wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list); - ikglp_wait_state_t *fq_wait = container_of(wait, ikglp_wait_state_t, fq_node); - next = (struct task_struct*) wait->private; +#ifdef CONFIG_LITMUS_DGL_SUPPORT + raw_spinlock_t *dgl_lock; +#endif - __remove_wait_queue(&fq->wait, wait); + unsigned long flags = 0, more_flags; - TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", - ikglp_get_idx(sem, fq), - next->comm, next->pid); + int err = 0; - // migrate wait-state to fifo-memory. - ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait); + fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. - /* next becomes the resouce holder */ - fq->owner = next; - tsk_rt(next)->blocked_lock = NULL; + if (!fq) { + err = -EINVAL; + goto out; + } -#ifdef CONFIG_LITMUS_AFFINITY_LOCKING - if(sem->aff_obs) { - sem->aff_obs->ops->notify_acquired(sem->aff_obs, fq, next); - } +#ifdef CONFIG_LITMUS_DGL_SUPPORT + dgl_lock = litmus->get_dgl_spinlock(t); #endif + lock_global_irqsave(dgl_lock, flags); + raw_spin_lock_irqsave(&sem->real_lock, more_flags); + lock_fine_irqsave(&sem->lock, flags); - /* determine new hp_waiter if necessary */ - if (next == fq->hp_waiter) { - TRACE_TASK(next, "was highest-prio waiter\n"); - /* next has the highest priority --- it doesn't need to - * inherit. However, we need to make sure that the - * next-highest priority in the queue is reflected in - * hp_waiter. */ - fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL); - TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n", - ikglp_get_idx(sem, fq), - (fq->hp_waiter) ? fq->hp_waiter->comm : "null", - (fq->hp_waiter) ? fq->hp_waiter->pid : 0); - - fq->nest.hp_waiter_eff_prio = (fq->hp_waiter) ? - effective_priority(fq->hp_waiter) : NULL; - - if (fq->hp_waiter) - TRACE_TASK(fq->hp_waiter, "is new highest-prio waiter\n"); - else - TRACE("no further waiters\n"); + TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); - raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); + // Remove 't' from the heaps, but data in nodes will still be good. + ikglp_del_global_list(sem, t, &fq->global_heap_node); + binheap_delete(&fq->donee_heap_node.node, &sem->donees); -// TRACE_TASK(next, "Heap Before:\n"); -// print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); + fq->owner = NULL; // no longer owned!! + --(fq->count); + if(fq->count < sem->shortest_fifo_queue->count) { + sem->shortest_fifo_queue = fq; + } + --(sem->nr_in_fifos); - binheap_add(&fq->nest.hp_binheap_node, - &tsk_rt(next)->hp_blocked_tasks, - struct nested_info, - hp_binheap_node); +#ifdef CONFIG_LITMUS_AFFINITY_LOCKING + if(sem->aff_obs) { + sem->aff_obs->ops->notify_dequeue(sem->aff_obs, fq, t); + sem->aff_obs->ops->notify_freed(sem->aff_obs, fq, t); + } +#endif -// TRACE_TASK(next, "Heap After:\n"); -// print_hp_waiters(tsk_rt(next)->hp_blocked_tasks.root, 0); + // 't' must drop all priority and clean up data structures before hand-off. - raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); - } - else { - /* Well, if 'next' is not the highest-priority waiter, - * then it (probably) ought to inherit the highest-priority - * waiter's priority. */ - TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n", - ikglp_get_idx(sem, fq), - (fq->hp_waiter) ? fq->hp_waiter->comm : "null", - (fq->hp_waiter) ? fq->hp_waiter->pid : 0); - - raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); - - binheap_add(&fq->nest.hp_binheap_node, - &tsk_rt(next)->hp_blocked_tasks, - struct nested_info, - hp_binheap_node); - - /* It is possible that 'next' *should* be the hp_waiter, but isn't - * because that update hasn't yet executed (update operation is - * probably blocked on mutex->lock). So only inherit if the top of - * 'next's top heap node is indeed the effective prio. of hp_waiter. - * (We use fq->hp_waiter_eff_prio instead of effective_priority(hp_waiter) - * since the effective priority of hp_waiter can change (and the - * update has not made it to this lock).) - */ - if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == - fq->nest.hp_waiter_eff_prio)) - { - if(fq->nest.hp_waiter_eff_prio) - litmus->increase_prio(next, fq->nest.hp_waiter_eff_prio); - else - WARN_ON(1); - } + // DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST + // This kills any inheritance from a donor. + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); + { + int count = 0; + + TRACE_TASK(t, "discarding _all_ inheritance because IKGLP is outermost\n"); - raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); + while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) { + binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + ++count; } - // wake up the new resource holder! - wake_up_for_lock(next); + if (count) + litmus->decrease_prio(t, NULL, 0); + WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. } - if(fq_of_new_on_fq && fq_of_new_on_fq != fq && fq_of_new_on_fq->count == 1) { - // The guy we promoted when to an empty FQ. (Why didn't stealing pick this up?) - // Wake up the new guy too. + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); - BUG_ON(fq_of_new_on_fq->owner != NULL); + // Move the next request into the FQ and update heaps as needed. + // We defer re-evaluation of priorities to later in the function. + ikglp_move_next_to_fq(sem, fq, t, &fq->donee_heap_node, 1); - fq = fq_of_new_on_fq; - fq_of_new_on_fq = NULL; - goto wake_kludge; - } + if (waitqueue_active(&fq->wait)) + ikglp_grant_replica_to_next(sem, fq); unlock_fine_irqrestore(&sem->lock, flags); raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); @@ -1632,6 +1705,108 @@ out: } + +void ikglp_abort_request(struct ikglp_semaphore *sem, struct task_struct *t, unsigned long flags) +{ + ikglp_wait_state_t *wait = (ikglp_wait_state_t*)tsk_rt(t)->blocked_lock_data; + ikglp_donee_heap_node_t *donee_info; + struct task_struct *donee; + struct fifo_queue *donee_fq; + struct fifo_queue *fq = wait->fq; + + BUG_ON(!wait); + + /* drop the request from the proper IKGLP data structure and re-eval + * priority relations */ + switch(wait->cur_q) + { + case IKGLP_PQ: + // No one inherits from waiters in PQ. Just drop the request. + __drop_from_pq(sem, wait); + break; + + + case IKGLP_FQ: + ikglp_del_global_list(sem, t, &wait->global_heap_node); + binheap_delete(&wait->donee_heap_node.node, &sem->donees); + + /* remove the task from the FQ */ +#ifdef CONFIG_LITMUS_AFFINITY_LOCKING + if(sem->aff_obs) + sem->aff_obs->ops->notify_dequeue(sem->aff_obs, fq, t); +#endif + __drop_from_fq(sem, wait); + + // Drop any and all inheritance t receives. + raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); + { + int count = 0; + TRACE_TASK(t, "discarding _all_ inheritance because IKGLP is outermost\n"); + while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) { + binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks, + struct nested_info, hp_binheap_node); + ++count; + } + if (count) + litmus->decrease_prio(t, NULL, 0); + WARN_ON(count > 2); // should not be greater than 2. only local fq inh and donation can be possible. + } + raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); + + ikglp_refresh_owners_prio_decrease(wait->donee_heap_node.fq, sem, flags, 1); // unlocks sem->lock. reacquire it. + lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! + ikglp_move_next_to_fq(sem, fq, t, &wait->donee_heap_node, 1); + break; + + + case IKGLP_DONOR: + ikglp_del_global_list(sem, t, &wait->global_heap_node); + __drop_from_donor(sem, wait); + + /* update donee */ + donee_info = wait->donee_info; + donee_info->donor_info = NULL; // clear the cross-link + binheap_decrease(&donee_info->node, &sem->donees); + + donee = donee_info->task; + donee_fq = donee_info->fq; + if (donee == donee_fq->owner) { + TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n", + donee->comm, donee->pid, + ikglp_get_idx(sem, donee_fq)); + ikglp_remove_donation_from_owner(&wait->prio_donation.hp_binheap_node, donee_fq, sem, flags); // unlocks sem->lock. reacquire it. + lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! + } + else { + TRACE_TASK(t, "Donee %s/%d is blocked in of fq %d.\n", + donee->comm, donee->pid, + ikglp_get_idx(sem, donee_fq)); + + ikglp_remove_donation_from_fq_waiter(donee, &wait->prio_donation.hp_binheap_node); + if(donee == donee_fq->hp_waiter) { + TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n", + donee->comm, donee->pid, + ikglp_get_idx(sem, donee_fq)); + + donee_fq->hp_waiter = ikglp_find_hp_waiter(donee_fq, NULL); + TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", + ikglp_get_idx(sem, donee_fq), + (donee_fq->hp_waiter) ? donee_fq->hp_waiter->comm : "null", + (donee_fq->hp_waiter) ? donee_fq->hp_waiter->pid : 0); + + ikglp_refresh_owners_prio_decrease(donee_fq, sem, flags, 1); // unlocks sem->lock. reacquire it. + lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! + } + } + + break; + default: + BUG(); + } + + BUG_ON(wait->cur_q != IKGLP_INVL); /* state should now be invalid */ +} + void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t) { /* @@ -1642,9 +1817,6 @@ void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t) * * step 1: first check that we are actually blocked. * step 2: remove our request from ANY data structure: - * - donor heap - * - pq - * - fq * step 3: reissue the request */ @@ -1657,75 +1829,23 @@ void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t) blocked_lock = tsk_rt(t)->blocked_lock; if (blocked_lock == l) { - ikglp_wait_state_t *wait = (ikglp_wait_state_t*)tsk_rt(t)->blocked_lock_data; - ikglp_donee_heap_node_t *donee_info; - struct task_struct *donee; - struct fifo_queue *donee_fq; - BUG_ON(!wait); + ikglp_wait_state_t *wait; - /* drop the request from the proper IKGLP data structure and re-eval - * priority relations */ - switch(wait->cur_q) - { - case IKGLP_PQ: - // No one inherits from waiters in PQ. Just drop the request. - __drop_from_pq(sem, wait); - break; - case IKGLP_FQ: - __drop_from_fq(sem, wait); - ikglp_refresh_owners_prio_decrease(wait->donee_heap_node.fq, sem, flags, 1); // unlocks sem->lock. reacquire it. - lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! - break; - case IKGLP_DONOR: - __drop_from_donor(sem, wait); - /* update donee */ - donee_info = wait->donee_info; - donee_info->donor_info = NULL; // clear the cross-link - binheap_decrease(&donee_info->node, &sem->donees); - - donee = donee_info->task; - donee_fq = donee_info->fq; - if (donee == donee_fq->owner) { - TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n", - donee->comm, donee->pid, - ikglp_get_idx(sem, donee_fq)); - ikglp_remove_donation_from_owner(&wait->prio_donation.hp_binheap_node, donee_fq, sem, flags); // unlocks sem->lock. reacquire it. - lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! - } - else { - TRACE_TASK(t, "Donee %s/%d is blocked in of fq %d.\n", - donee->comm, donee->pid, - ikglp_get_idx(sem, donee_fq)); + TRACE_TASK(t, "IKGLP before reject:\n"); + __ikglp_dump_state(sem); - ikglp_remove_donation_from_fq_waiter(donee, &wait->prio_donation.hp_binheap_node); - if(donee == donee_fq->hp_waiter) { - TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. Rechecking hp_waiter.\n", - donee->comm, donee->pid, - ikglp_get_idx(sem, donee_fq)); + ikglp_abort_request(sem, t, flags); - donee_fq->hp_waiter = ikglp_find_hp_waiter(donee_fq, NULL); - TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", - ikglp_get_idx(sem, donee_fq), - (donee_fq->hp_waiter) ? donee_fq->hp_waiter->comm : "null", - (donee_fq->hp_waiter) ? donee_fq->hp_waiter->pid : 0); - - ikglp_refresh_owners_prio_decrease(donee_fq, sem, flags, 1); // unlocks sem->lock. reacquire it. - lock_fine_irqsave(&sem->lock, flags); // there should be no contention!!!! - } - } - - break; - default: - BUG(); - } - - BUG_ON(wait->cur_q != IKGLP_INVL); /* state should now be invalid */ + TRACE_TASK(t, "IKGLP after reject (before reissue):\n"); + __ikglp_dump_state(sem); /* now re-issue the request */ TRACE_TASK(t, "Reissuing a request for replica from lock %d.\n", l->ident); + wait = (ikglp_wait_state_t*)tsk_rt(t)->blocked_lock_data; if(sem->nr_in_fifos < sem->max_in_fifos) { + struct fifo_queue *fq; // enqueue somwhere @@ -1753,6 +1873,9 @@ void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t) ikglp_enqueue_on_donor(sem, wait, flags); // unlocks sem->lock } + TRACE_TASK(t, "IKGLP after reissue:\n"); + __ikglp_dump_state(sem); + raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); } else if (blocked_lock) { diff --git a/litmus/locking.c b/litmus/locking.c index 88905d4cff26..43afa920d3f1 100644 --- a/litmus/locking.c +++ b/litmus/locking.c @@ -157,18 +157,17 @@ asmlinkage long sys_litmus_unlock(int lock_od) entry = get_entry_for_od(lock_od); if (entry && is_lock(entry)) { l = get_lock(entry); + + if (l == tsk_rt(current)->outermost_lock) { + TRACE_CUR("Lock %d assumed to be outermost lock.\n", l->ident); + tsk_rt(current)->outermost_lock = NULL; + } + TRACE_CUR("Attempts to unlock %d\n", l->ident); err = l->ops->unlock(l); if (!err) { sched_trace_lock(current, l->ident, 0); - TRACE_CUR("Unlocked %d\n", l->ident); - - if (tsk_rt(current)->outermost_lock == l) { - TRACE_CUR("Lock %d assumed to be outermost lock.\n", l->ident); - tsk_rt(current)->outermost_lock = NULL; - WARN_ON(holds_locks(current)); - } } } -- cgit v1.2.2