From 3025aea8d0ed6ee4ab68281e5cbcc76ec4dab1e2 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Mon, 23 Apr 2012 19:18:19 -0400 Subject: Fix line-endings. :P --- litmus/ikglp_lock.c | 126 ++++++++++++++++++++++++++-------------------------- 1 file changed, 63 insertions(+), 63 deletions(-) diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c index b7e23029b849..2b50fb1c05fd 100644 --- a/litmus/ikglp_lock.c +++ b/litmus/ikglp_lock.c @@ -1184,7 +1184,7 @@ int ikglp_unlock(struct litmus_lock* l) unsigned long flags = 0, real_flags; int err = 0; - + #ifdef CONFIG_LITMUS_DGL_SUPPORT dgl_lock = litmus->get_dgl_spinlock(t); #endif @@ -1193,14 +1193,14 @@ int ikglp_unlock(struct litmus_lock* l) lock_global_irqsave(dgl_lock, flags); // TODO: Push this deeper lock_fine_irqsave(&sem->lock, flags); - + fq = ikglp_get_queue(sem, t); // returns NULL if 't' is not owner. - + if (!fq) { err = -EINVAL; goto out; - } - + } + TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); @@ -1900,7 +1900,7 @@ struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff, struct t (est_len < min_len) || /* i-th queue has shortest length */ ((est_len == min_len) && /* equal lengths, but one has fewer over-all users */ (*(aff->q_info[i].nr_cur_users) < min_nr_users))) { - + shortest = &aff->q_info[i]; min_len = est_len; min_nr_users = *(aff->q_info[i].nr_cur_users); @@ -1920,7 +1920,7 @@ struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff, struct t } } } - + if(shortest->q->count >= sem->max_fifo_len) { TRACE_CUR("selected fq %d is too long, but returning it anyway.\n", ikglp_get_idx(sem, shortest->q)); @@ -1963,24 +1963,24 @@ static ikglp_donee_heap_node_t* pick_donee(struct ikglp_affinity* aff, struct task_struct *donee; ikglp_donee_heap_node_t *donee_node; struct task_struct *mth_highest = ikglp_mth_highest(sem); - + lt_t now = litmus_clock(); - + // TRACE_CUR("fq %d: mth_highest: %s/%d, deadline = %d: (donor) = ??? ", // ikglp_get_idx(sem, fq), // mth_highest->comm, mth_highest->pid, -// (int)get_deadline(mth_highest) - now); - +// (int)get_deadline(mth_highest) - now); + if(fq->owner && fq->donee_heap_node.donor_info == NULL && - mth_highest != fq->owner && + mth_highest != fq->owner && litmus->__compare(mth_highest, BASE, fq->owner, BASE)) { donee = fq->owner; donee_node = &(fq->donee_heap_node); *dist_from_head = 0; - + BUG_ON(donee != donee_node->task); - + TRACE_CUR("picked owner of fq %d as donee\n", ikglp_get_idx(sem, fq)); @@ -1988,8 +1988,8 @@ static ikglp_donee_heap_node_t* pick_donee(struct ikglp_affinity* aff, } else if(waitqueue_active(&fq->wait)) { struct list_head *pos; - - + + // TRACE_CUR("fq %d: owner: %s/%d, deadline = %d: (donor) = %s/%d " // "(mth_highest != fq->owner) = %d " // "(mth_highest > fq->owner) = %d\n", @@ -2001,16 +2001,16 @@ static ikglp_donee_heap_node_t* pick_donee(struct ikglp_affinity* aff, // (fq->donee_heap_node.donor_info) ? fq->donee_heap_node.donor_info->task->pid : -1, // (mth_highest != fq->owner), // (litmus->__compare(mth_highest, BASE, fq->owner, BASE))); - - + + *dist_from_head = 1; - + // iterating from the start of the queue is nice since this means // the donee will be closer to obtaining a resource. list_for_each(pos, &fq->wait.task_list) { wait_queue_t *fq_wait = list_entry(pos, wait_queue_t, task_list); ikglp_wait_state_t *wait = container_of(fq_wait, ikglp_wait_state_t, fq_node); - + // TRACE_CUR("fq %d: waiter %d: %s/%d, deadline = %d (donor) = %s/%d " // "(mth_highest != wait->task) = %d " // "(mth_highest > wait->task) = %d\n", @@ -2021,40 +2021,40 @@ static ikglp_donee_heap_node_t* pick_donee(struct ikglp_affinity* aff, // (wait->donee_heap_node.donor_info) ? wait->donee_heap_node.donor_info->task->comm : "nil", // (wait->donee_heap_node.donor_info) ? wait->donee_heap_node.donor_info->task->pid : -1, // (mth_highest != wait->task), -// (litmus->__compare(mth_highest, BASE, wait->task, BASE))); - - +// (litmus->__compare(mth_highest, BASE, wait->task, BASE))); + + if(!has_donor(fq_wait) && mth_highest != wait->task && litmus->__compare(mth_highest, BASE, wait->task, BASE)) { donee = (struct task_struct*) fq_wait->private; donee_node = &wait->donee_heap_node; - + BUG_ON(donee != donee_node->task); - + TRACE_CUR("picked waiter in fq %d as donee\n", ikglp_get_idx(sem, fq)); - + goto out; } ++(*dist_from_head); } } - + donee = NULL; donee_node = NULL; *dist_from_head = sem->max_fifo_len + 1; - + TRACE_CUR("Found no one to be donee in fq %d!\n", ikglp_get_idx(sem, fq)); - + out: - + TRACE_CUR("Candidate donee for fq %d is %s/%d (dist_from_head = %d)\n", ikglp_get_idx(sem, fq), (donee) ? (donee)->comm : "nil", (donee) ? (donee)->pid : -1, *dist_from_head); - + return donee_node; } @@ -2074,10 +2074,10 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection( ikglp_donee_heap_node_t *donee_node; gpu_migration_dist_t distance; int start, i, j; - + ikglp_donee_heap_node_t *default_donee; ikglp_wait_state_t *default_donee_donor_info; - + if(tsk_rt(donor)->last_gpu < 0) { // no affinity. just return the min prio, like standard IKGLP // TODO: Find something closer to the head of the queue?? @@ -2086,8 +2086,8 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection( node); goto out; } - - + + // Temporarily break any donation relation the default donee (the lowest // prio task in the FIFO queues) to make it eligible for selection below. // @@ -2095,40 +2095,40 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection( // the default donee throug affinity-aware selection, before returning // from this function so we don't screw up our heap ordering. // The standard IKGLP algorithm will steal the donor relationship if needed. - default_donee = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); + default_donee = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); default_donee_donor_info = default_donee->donor_info; // back-up donor relation default_donee->donor_info = NULL; // temporarily break any donor relation. - + // initialize our search donee_node = NULL; distance = MIG_NONE; - + // TODO: The below search logic may work well for locating nodes to steal // when an FQ goes idle. Validate this code and apply it to stealing. - + // begin search with affinity GPU. start = gpu_to_base_replica(aff, tsk_rt(donor)->last_gpu); - i = start; + i = start; do { // "for each gpu" / "for each aff->nr_rsrc" gpu_migration_dist_t temp_distance = gpu_migration_distance(start, i); - + // only interested in queues that will improve our distance if(temp_distance < distance || donee_node == NULL) { int dist_from_head = sem->max_fifo_len + 1; - + TRACE_CUR("searching for donor on GPU %d", i); - + // visit each queue and pick a donee. bail as soon as we find // one for this class. for(j = 0; j < aff->nr_simult; ++j) { int temp_dist_from_head; - ikglp_donee_heap_node_t *temp_donee_node; + ikglp_donee_heap_node_t *temp_donee_node; struct fifo_queue *fq; - + fq = &(sem->fifo_queues[i + j*aff->nr_rsrc]); temp_donee_node = pick_donee(aff, fq, &temp_dist_from_head); - + if(temp_dist_from_head < dist_from_head) { // we check all the FQs for this GPU to spread priorities @@ -2137,7 +2137,7 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection( dist_from_head = temp_dist_from_head; } } - + if(dist_from_head != sem->max_fifo_len + 1) { TRACE_CUR("found donee %s/%d and is the %d-th waiter.\n", donee_node->task->comm, donee_node->task->pid, @@ -2151,23 +2151,23 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection( TRACE_CUR("skipping GPU %d (distance = %d, best donor " "distance = %d)\n", i, temp_distance, distance); } - + i = (i+1 < aff->nr_rsrc) ? i+1 : 0; // increment with wrap-around } while (i != start); - - + + // restore old donor info state. default_donee->donor_info = default_donee_donor_info; if(!donee_node) { - donee_node = default_donee; - + donee_node = default_donee; + TRACE_CUR("Could not find a donee. We have to steal one.\n"); WARN_ON(default_donee->donor_info == NULL); } - + out: - + TRACE_CUR("Selected donee %s/%d on fq %d (GPU %d) for %s/%d with affinity for GPU %d\n", donee_node->task->comm, donee_node->task->pid, ikglp_get_idx(sem, donee_node->fq), @@ -2186,15 +2186,15 @@ static void __find_closest_donor(int target_gpu, { ikglp_wait_state_t *this_donor = binheap_entry(donor_node, ikglp_wait_state_t, node); - + int this_dist = gpu_migration_distance(target_gpu, tsk_rt(this_donor->task)->last_gpu); - + // TRACE_CUR("%s/%d: dist from target = %d\n", // this_donor->task->comm, // this_donor->task->pid, // this_dist); - + if(this_dist < *cur_dist) { // take this donor *cur_dist = this_dist; @@ -2210,7 +2210,7 @@ static void __find_closest_donor(int target_gpu, *cur_closest = this_donor; } } - + if(donor_node->left) __find_closest_donor(target_gpu, donor_node->left, cur_closest, cur_dist); if(donor_node->right) __find_closest_donor(target_gpu, donor_node->right, cur_closest, cur_dist); } @@ -2219,21 +2219,21 @@ ikglp_wait_state_t* gpu_ikglp_advise_donor_to_fq(struct ikglp_affinity* aff, str { // Huristic strategy: Find donor with the closest affinity to fq. // Tie-break on priority. - + // We need to iterate over all the donors to do this. Unfortunatly, // our donors are organized in a heap. We'll visit each node with a // recurisve call. This is realitively safe since there are only sem->m // donors, at most. We won't recurse too deeply to have to worry about // our stack. (even with 128 CPUs, our nest depth is at most 7 deep). - + struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); ikglp_wait_state_t *donor = NULL; int distance = MIG_NONE; int gpu = replica_to_gpu(aff, ikglp_get_idx(sem, fq)); ikglp_wait_state_t* default_donor = binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); - + __find_closest_donor(gpu, sem->donors.root, &donor, &distance); - + TRACE_CUR("Selected donor %s/%d (distance = %d) to move to fq %d " "(non-aff wanted %s/%d). differs = %d\n", donor->task->comm, donor->task->pid, @@ -2242,7 +2242,7 @@ ikglp_wait_state_t* gpu_ikglp_advise_donor_to_fq(struct ikglp_affinity* aff, str default_donor->task->comm, default_donor->task->pid, (donor->task != default_donor->task) ); - + return(donor); } -- cgit v1.2.2