From cb71b522bb144a838b457304382d63b131695a8c Mon Sep 17 00:00:00 2001 From: Bryan Ward Date: Thu, 18 Apr 2013 15:52:46 -0400 Subject: Appears to be working. --- include/litmus/fdso.h | 2 +- include/litmus/sched_trace.h | 4 +-- litmus/sched_psn_edf.c | 69 +++++++++++++++++++++++--------------------- 3 files changed, 39 insertions(+), 36 deletions(-) diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h index 85a649e2722d..a13edfc6366c 100644 --- a/include/litmus/fdso.h +++ b/include/litmus/fdso.h @@ -14,7 +14,7 @@ #define MAX_OBJECT_DESCRIPTORS 32 -typedef unsigned int resource_mask_t; +typedef unsigned long resource_mask_t; typedef enum { MIN_OBJ_TYPE = 0, diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index fa7042f744cb..b19961c488aa 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h @@ -195,8 +195,8 @@ feather_callback void do_sched_trace_task_tardy(unsigned long id, #define trace_litmus_switch_to(t) #define trace_litmus_switch_away(prev) #define trace_litmus_task_completion(t, forced) -#define trace_litmus_task_block(t) -#define trace_litmus_task_resume(t) +#define trace_litmus_task_block(t,i) +#define trace_litmus_task_resume(t,i) #define trace_litmus_sys_release(start) #define trace_litmus_task_exit(t) #define trace_litmus_task_tardy(t) diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 6f4d4adcec01..466d45d9f6bd 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -418,6 +418,9 @@ struct dgl_semaphore { /* bitmask of resources that are currently locked. */ resource_mask_t locked; + /* bitmask of the resources that are logically locked. */ + resource_mask_t logically_locked; + /* bitmask of resources in the file descriptor table that are controlled by * this dgl_semaphore. */ @@ -429,6 +432,11 @@ struct dgl_semaphore { */ bool boosted[NR_CPUS]; + /* Ensure that a task cannot acquire if there is an earlier-issued request + * on that processor. + */ + bool logically_boosted[NR_CPUS]; + /* FIFO queue of waiting tasks */ wait_queue_head_t wait; }; @@ -644,7 +652,7 @@ bool is_mask_valid(struct litmus_lock* l, resource_mask_t mask) #define for_each_bit(field, idx) \ for (idx = find_first_bit(&field, sizeof(field)*8); \ idx < sizeof(field)*8; \ - idx = find_next_bit(&field, sizeof(field)*8, idx)) + idx = find_next_bit(&field, sizeof(field)*8, idx+1)) int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resources) { @@ -665,12 +673,14 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc t->resources = resources; spin_lock_irqsave(&sem->wait.lock, flags); + // if sem->locked & resources == 0, then all resources are available, // otherwise we must suspend. - if (sem->locked & resources){ + if (waitqueue_active(&sem->wait) || sem->logically_locked & resources || + sem->logically_boosted[task_cpu(t)]){ - STRACE("Resources locked, suspending\n"); + STRACE("Resources unavailable, suspending\n"); init_waitqueue_entry(&wait, t); @@ -678,6 +688,9 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc __add_wait_queue_tail_exclusive(&sem->wait, &wait); + sem->logically_boosted[task_cpu(t)] = true; + sem->logically_locked |= resources; + TS_LOCK_SUSPEND; spin_unlock_irqrestore(&sem->wait.lock, flags); @@ -687,9 +700,10 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc TS_LOCK_RESUME; } else { - STRACE("Acquired a resource\n"); + STRACE("Acquired resource(s)\n"); - sem->locked = sem->locked | resources; + sem->locked |= resources; + sem->logically_locked |= resources; // if a job requests a resource, then it was scheduled, and therefore // there was not another boosted job, so this is safe. @@ -698,10 +712,12 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc boost_priority(t); sem->boosted[task_cpu(t)] = true; + sem->logically_boosted[task_cpu(t)] = true; spin_unlock_irqrestore(&sem->wait.lock, flags); } + // tracing what resources are used when. for_each_bit(resources, resource) sched_trace_server_switch_to(resource, 0, t->pid, get_job_no(t), get_partition(t)); @@ -709,23 +725,12 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc return 0; } -inline int num_boosted(struct dgl_semaphore *sem) -{ - int ret = 0; - int i; - for(i = 0; i < NR_CPUS; i++){ - ret += sem->boosted[i]; - } - return ret; -} - int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resources) { struct task_struct *t = current, *tsk; struct dgl_semaphore *sem = dgl_from_lock(l); unsigned long flags; - int err = 0, resource; - resource_mask_t logically_locked; + int err = 0, resource, i; struct list_head *pos, *tmp; TRACE_CUR("Trying to unlock a DGL\n"); @@ -754,31 +759,26 @@ int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resou STRACE("Released all resources\n"); unboost_priority(t); sem->boosted[task_cpu(t)] = false; + //n.b., logically_boosted[task_cpu(t)] may be reset to true in the + //subsequent iteration. + sem->logically_boosted[task_cpu(t)] = false; } else { // update t->resources to reflect the resources currently owned. STRACE("Unlocked a subset of locked resources\n"); t->resources = t->resources & ~resources; } - logically_locked = sem->locked; + sem->logically_locked = sem->locked; + for(i = 0; i < NR_CPUS; i++) + sem->logically_boosted[i] = sem->boosted[i]; + // iterate through the waitqueue and unlock ready tasks. Also recreate logically_locked. list_for_each_safe(pos, tmp, &sem->wait.task_list) { tsk = (struct task_struct*) list_entry(pos, wait_queue_t, task_list)->private; - STRACE_TASK(tsk, "Evaluating\n"); - - if ( (logically_locked == -1) || (num_boosted(sem) == NR_CPUS) ){ - STRACE_TASK(tsk, "All procs boosted, or all resources locked\n"); - break; - } - - //STRACE_TASK(tsk, "Logically locked: %o\n", logically_locked); - //STRACE_TASK(tsk, "tsk->resources: %o\n", tsk->resources); - //STRACE_TASK(tsk, "!(tsk->resources & logically_locked): %o\n", !(tsk->resources & logically_locked)); - //STRACE_TASK(tsk, "!sem->boosted: %d\n", !sem->boosted[task_cpu(tsk)]); // the resources requested are unlocked, tsk acquires its resources - if( !(tsk->resources & logically_locked) && !sem->boosted[task_cpu(tsk)]) { + if( !(tsk->resources & sem->logically_locked) && !sem->logically_boosted[task_cpu(tsk)]) { STRACE_TASK(tsk, "Acquired a resource\n"); @@ -791,8 +791,8 @@ int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resou wake_up_process(tsk); } - - logically_locked = logically_locked | tsk->resources; + sem->logically_locked |= tsk->resources; + sem->logically_boosted[task_cpu(tsk)] = true; } for_each_bit(resources, resource) @@ -847,10 +847,13 @@ static struct litmus_lock* psnedf_new_dgl(void) return NULL; sem->locked = 0; + sem->logically_locked = 0; sem->dgl_resources = 0; - for(i = 0; i < NR_CPUS; i++) + for(i = 0; i < NR_CPUS; i++){ sem->boosted[i] = false; + sem->logically_boosted[i] = false; + } init_waitqueue_head(&sem->wait); sem->litmus_lock.ops = &psnedf_dgl_lock_ops; -- cgit v1.2.2