From 06051baff7db5e0c1b80d7b2a873b022191cdcec Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Wed, 21 Sep 2011 18:48:49 -0400 Subject: Formatting --- litmus/sched_mc.c | 100 +++++++++++++++++++----------------------------------- 1 file changed, 35 insertions(+), 65 deletions(-) (limited to 'litmus/sched_mc.c') diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 1f2b13c6a219..0ec03b3949d2 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c @@ -4,10 +4,6 @@ * Implementation of the Mixed Criticality scheduling algorithm. * * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010) - * - * This version uses the simple approach and serializes all scheduling - * decisions by the use of a queue lock. This is probably not the - * best way to do it, but it should suffice for now. */ #include @@ -75,7 +71,6 @@ typedef struct { } domain_data_t; static cpu_entry_t* cpus[NR_CPUS]; -static raw_spinlock_t global_lock; #define domain_data(dom) (container_of(dom, domain_data_t, domain)) #define is_global(dom) (domain_data(dom)->heap) @@ -86,8 +81,7 @@ static raw_spinlock_t global_lock; (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) #define crit_cpu(ce) \ (container_of((void*)((ce) - (ce)->level), cpu_entry_t, crit_entries)) - -/* useful debug macros */ +/* Useful debug macros */ #define TS "(%s/%d:%d:%s)" #define TA(t) (t) ? (is_ghost(t)) ? "ghost" : t->comm : "NULL", (t) ? t->pid : 1, \ (t) ? t->rt_param.job_params.job_no : 1, \ @@ -115,7 +109,6 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) second = b->value; first_link = first->linked; second_link = second->linked; - if (!first->usable || !second->usable) { return second->usable && first->usable; } else if (!first_link || !second_link) { @@ -134,7 +127,6 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) static noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr) { struct task_struct *next = dom->peek_ready(dom); - if (!next || !curr) { return next && !curr; } else { @@ -150,8 +142,7 @@ static noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr) static inline crit_entry_t* lowest_prio_cpu(domain_t *dom) { struct bheap *heap = domain_data(dom)->heap; - struct bheap_node* hn; - hn = bheap_peek(cpu_lower_prio, heap); + struct bheap_node* hn = bheap_peek(cpu_lower_prio, heap); return (hn) ? hn->value : NULL; } @@ -161,11 +152,9 @@ static inline crit_entry_t* lowest_prio_cpu(domain_t *dom) */ static void update_ghost_time(struct task_struct *p) { - u64 delta, clock; - + u64 clock = litmus_clock(); + u64 delta = clock - p->se.exec_start; BUG_ON(!is_ghost(p)); - clock = litmus_clock(); - delta = clock - p->se.exec_start; if (unlikely ((s64)delta < 0)) { delta = 0; TRACE_TASK(p, "WARNING: negative time delta"); @@ -236,7 +225,6 @@ static void link_task_to_crit(crit_entry_t *ce, } static void check_for_preempt(domain_t*); - /** * job_arrival() - Called when a task re-enters the system. * Caller must hold no locks. @@ -247,7 +235,6 @@ static void job_arrival(struct task_struct *task) TRACE_TASK(task, "Job arriving"); BUG_ON(!task); - raw_spin_lock(dom->lock); if (can_requeue(task)) { dom->requeue(dom, task); @@ -271,16 +258,16 @@ static void job_arrival(struct task_struct *task) */ static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task) { - int i; + int i = entry_level(entry); TRACE_TASK(task, "Linking to P%d", entry->cpu); BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); BUG_ON(task && is_ghost(task)); - i = entry_level(entry); if (task){ set_rt_flags(task, RT_F_RUNNING); } entry->linked = task; + /* Higher criticality crit entries are now usable */ for (; i < entry_level(entry) + 1; i++) { TRACE_CRIT_ENTRY(&entry->crit_entries[i], "now usable"); entry->crit_entries[i].usable = 1; @@ -295,9 +282,8 @@ static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task) * Caller must hold the lock for @dom and @ce's CPU lock. Returns 1 if * a physically preemption occurred. */ -static int preempt(domain_t *dom, crit_entry_t *ce) +static void preempt(domain_t *dom, crit_entry_t *ce) { - int rv = 0; struct task_struct *task = dom->take_ready(dom); cpu_entry_t *entry = crit_cpu(ce); @@ -309,14 +295,11 @@ static int preempt(domain_t *dom, crit_entry_t *ce) dom->requeue(dom, ce->linked); } link_task_to_crit(ce, task); - /* Preempt actual execution if this is a running task */ if (!is_ghost(task)) { link_task_to_cpu(entry, task); preempt_if_preemptable(entry->scheduled, entry->cpu); - rv = 1; } - return rv; } /** @@ -335,7 +318,6 @@ static void update_crit_levels(cpu_entry_t *entry) for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { ce = &entry->crit_entries[i]; tasks[i] = ce->linked; - TRACE_CRIT_ENTRY(ce, "not usable"); ce->usable = 0; if (ce->linked) { link_task_to_crit(ce, NULL); @@ -380,7 +362,6 @@ static void check_for_preempt(domain_t *dom) } else /* Partitioned */ { ce = domain_data(dom)->crit_entry; entry = crit_cpu(ce); - raw_spin_lock(&entry->lock); if (ce->usable && dom->preempt_needed(dom, ce->linked)) { preempt(dom, ce); @@ -406,11 +387,12 @@ static void remove_from_all(struct task_struct* task) BUG_ON(!task); raw_spin_lock(dom->lock); + if (task->rt_param.linked_on != NO_CPU) { entry = cpus[task->rt_param.linked_on]; - raw_spin_lock(&entry->lock); - /* Unlink if task is still linked post lock */ + + /* Unlink only if task is still linked post lock */ ce = &entry->crit_entries[tsk_mc_crit(task)]; if (task->rt_param.linked_on != NO_CPU) { BUG_ON(entry->linked != task); @@ -420,13 +402,11 @@ static void remove_from_all(struct task_struct* task) link_task_to_cpu(entry, NULL); } } - BUG_ON(is_queued(task)); - if (update) { + if (update) update_crit_levels(entry); - } else { + else raw_spin_unlock(&entry->lock); - } } else if (is_queued(task)) { /* This is an interesting situation: t is scheduled, * but was just recently unlinked. It cannot be @@ -437,6 +417,7 @@ static void remove_from_all(struct task_struct* task) */ remove((rt_domain_t*)get_task_domain(task)->data, task); } + BUG_ON(is_queued(task)); raw_spin_unlock(dom->lock); } @@ -468,9 +449,8 @@ static void job_completion(struct task_struct *task, int forced) if (tsk_mc_data(task)->mc_job.ghost_budget == 0) { tsk_mc_data(task)->mc_job.is_ghost = 0; prepare_for_next_period(task); - if (is_released(task, litmus_clock())) { + if (is_released(task, litmus_clock())) sched_trace_task_release(task); - } } /* Requeue non-blocking tasks */ @@ -484,18 +464,16 @@ static void job_completion(struct task_struct *task, int forced) static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) { unsigned long flags; - crit_entry_t *ce; + crit_entry_t *ce = container_of(timer, crit_entry_t, timer);; struct task_struct *tmp = NULL; local_irq_save(flags); - - ce = container_of(timer, crit_entry_t, timer); TRACE_CRIT_ENTRY(ce, "Ghost exhausted firing"); /* Due to race conditions, we cannot just set the linked * task's budget to 0 as it may no longer be the task * for which this timer was armed. Instead, update the running - * task time + * task time and see if this causes exhaustion. */ raw_spin_lock(&crit_cpu(ce)->lock); if (ce->linked && is_ghost(ce->linked)) { @@ -524,11 +502,9 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) domain_t *dom = get_task_domain(first); raw_spin_lock_irqsave(dom->lock, flags); - TRACE_TASK(first, "Jobs released"); __merge_ready(rt, tasks); check_for_preempt(dom); - raw_spin_unlock_irqrestore(dom->lock, flags); } @@ -538,21 +514,18 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) */ static void mc_task_new(struct task_struct *t, int on_rq, int running) { - unsigned long flags; - cpu_entry_t* entry; - enum crit_level level; - - TRACE("New mixed criticality task %d\n", t->pid); + unsigned long flags; + cpu_entry_t* entry; + enum crit_level level = tsk_mc_crit(t); local_irq_save(flags); + TRACE("New mixed criticality task %d\n", t->pid); /* Assign domain */ - level = tsk_mc_crit(t); - if (level < CRIT_LEVEL_C) { + if (level < CRIT_LEVEL_C) entry = cpus[get_partition(t)]; - } else { + else entry = cpus[task_cpu(t)]; - } t->rt_param._domain = entry->crit_entries[level].domain; /* Setup job params */ @@ -579,12 +552,10 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) static void mc_task_wake_up(struct task_struct *task) { unsigned long flags; - lt_t now; - + lt_t now = litmus_clock(); local_irq_save(flags); - TRACE_TASK(task, "Wakes up"); - now = litmus_clock(); + TRACE_TASK(task, "Wakes up"); if (is_tardy(task, now)) { /* Task missed its last release */ release_at(task, now); @@ -648,8 +619,8 @@ static long mc_admit_task(struct task_struct* task) return 0; } -/* - * Return next task which should be scheduled. +/** + * mc_schedule() - Return next task which should be scheduled. */ static struct task_struct* mc_schedule(struct task_struct * prev) { @@ -694,7 +665,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev) if ((out_of_time || sleep) && !blocks && !preempt) job_completion(entry->scheduled, !sleep); /* Global scheduled tasks must wait for a deschedule before they - * can rejoin a global domain. See comment in job_arrival. + * can rejoin a global domain. Requeue them here. */ else if (global && preempt && !blocks) job_arrival(entry->scheduled); @@ -705,6 +676,9 @@ static struct task_struct* mc_schedule(struct task_struct * prev) ce = &entry->crit_entries[i]; dom = ce->domain; + /* Swap locks. We cannot acquire a domain lock while + * holding an entry lock or deadlocks will happen. + */ raw_spin_unlock(&entry->lock); raw_spin_lock(dom->lock); raw_spin_lock(&entry->lock); @@ -714,33 +688,33 @@ static struct task_struct* mc_schedule(struct task_struct * prev) dom->take_ready(dom); link_task_to_crit(ce, dtask); ready_task = (is_ghost(dtask)) ? NULL : dtask; + + /* Task found! */ if (ready_task) { link_task_to_cpu(entry, ready_task); raw_spin_unlock(dom->lock); update_crit_levels(entry); raw_spin_lock(&entry->lock); - continue; + goto picked; } } raw_spin_unlock(dom->lock); } + picked: /* Schedule next task */ next = entry->linked; entry->scheduled = next; if (entry->scheduled) entry->scheduled->rt_param.scheduled_on = entry->cpu; - sched_state_task_picked(); raw_spin_unlock(&entry->lock); local_irq_restore(flags); - if (next) TRACE_TASK(next, "Scheduled at %llu", litmus_clock()); else if (exists && !next) TRACE("Becomes idle at %llu\n", litmus_clock()); - return next; } @@ -784,7 +758,6 @@ static void init_crit_entry(crit_entry_t *ce, enum crit_level level, ce->node = node; ce->domain = &dom_data->domain; ce->usable = 1; - hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ce->timer.function = mc_ghost_exhausted; } @@ -813,9 +786,7 @@ static void init_global_domain(domain_data_t *dom_data, enum crit_level level, entry = cpus[cpu]; node = &nodes[cpu]; ce = &entry->crit_entries[level]; - init_crit_entry(ce, level, dom_data, node); - bheap_node_init(&ce->node, ce); bheap_insert(cpu_lower_prio, heap, node); } @@ -836,12 +807,11 @@ static int __init init_mc(void) domain_data_t *dom_data; raw_spinlock_t *a_dom, *b_dom, *c_dom; /* For lock debugger */ - raw_spin_lock_init(&global_lock); - for_each_online_cpu(cpu) { entry = &per_cpu(_mc_cpus, cpu); cpus[cpu] = entry; + /* CPU */ entry->cpu = cpu; entry->scheduled = NULL; entry->linked = NULL; -- cgit v1.2.2