From b26432a616a3aaad55ea404cd88d37fd1e345af1 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Tue, 9 Oct 2012 01:27:45 -0400 Subject: A task's blocking time no longer takes from its server's budget. --- litmus/dgl.c | 6 -- litmus/domain.c | 2 +- litmus/jobs.c | 6 +- litmus/sched_mc.c | 171 +++++++++++++++++++++++++++++++++--------------------- 4 files changed, 109 insertions(+), 76 deletions(-) (limited to 'litmus') diff --git a/litmus/dgl.c b/litmus/dgl.c index cced7c259735..7331855d43f7 100644 --- a/litmus/dgl.c +++ b/litmus/dgl.c @@ -199,12 +199,6 @@ static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource, litmus_reschedule(greq->cpu); dgl->running++; - if (greq->task) { - BUG_ON(tsk_rt(greq->task)->linked_on == NO_CPU); - set_rt_flags(greq->task, RT_F_RUNNING); - sched_trace_task_resume(greq->task); - } - dgl->cpu_acquired(greq->cpu); } diff --git a/litmus/domain.c b/litmus/domain.c index 0852f30b428e..4fc8705c1e81 100644 --- a/litmus/domain.c +++ b/litmus/domain.c @@ -3,7 +3,7 @@ #include -void dummy_acquire(struct task_struct *t){}; +int dummy_acquire(struct task_struct *t){return 1;}; void dummy_release(struct task_struct *t){}; void domain_init(domain_t *dom, diff --git a/litmus/jobs.c b/litmus/jobs.c index 6ba40db9639f..fa55283e2134 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c @@ -1,7 +1,5 @@ /* litmus/jobs.c - common job control code - * TODO: modified heavily for sched_mc */ - #include #include @@ -34,13 +32,15 @@ static inline void setup_release(struct task_struct *t, struct rt_job *job, static inline void setup_kernel_release(struct task_struct *t, lt_t release) { + lt_t now = litmus_clock(); + BUG_ON(!t); /* Record lateness before we set up the next job's * release and deadline. Lateness may be negative. */ t->rt_param.job_params.lateness = - (long long)litmus_clock() - + (long long)now - (long long)t->rt_param.job_params.deadline; t->rt.time_slice = 1; diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 065c767be846..b34ec4ee9e59 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c @@ -3,6 +3,7 @@ * Implementation of the Mixed Criticality scheduling algorithm. * * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010) + * TODO: optimize reschedule */ #include #include @@ -40,7 +41,7 @@ */ struct cpu_entry { int cpu; - int lock_acquired; + enum crit_level crit_signal; struct task_struct* scheduled; struct task_struct* will_schedule; struct task_struct* linked; @@ -85,12 +86,11 @@ static int interrupt_cpu; #define get_crit_entry_for(cpu, level) (&per_cpu(cpus, cpu).crit_entries[level]) /* - * Put in requests for resources needed by @t. If @t is a server, this will - * set @t's np flag to reflect resources held by @t's children. + * Put in requests for resources needed by @t. */ -static void acquire_resources(struct task_struct *t) +static int acquire_resources(struct task_struct *t) { - int cpu; + int cpu, acquired; /* Can't contend for resources if not logically running */ BUG_ON(tsk_rt(t)->linked_on == NO_CPU); @@ -111,7 +111,10 @@ static void acquire_resources(struct task_struct *t) make_np(t); } + acquired = has_resources(t, cpu); raw_spin_unlock(&dgl_lock); + + return acquired; } static void release_resources(struct task_struct *t) @@ -244,17 +247,34 @@ static inline void cancel_ghost(struct crit_entry *ce) /* * Arm ghost timer. Will merge timers if the option is specified. */ -static inline void arm_ghost(struct crit_entry *ce, lt_t fire) +static inline void start_crit(struct crit_entry *ce) { + lt_t fire; + struct task_struct *task = ce->linked; + + BUG_ON(ce->state != CS_ACTIVE); + + if (is_ghost(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) { + /* There is a level-A timer that will force a + * preemption, so we don't set this for level-A + * tasks. Otherwise reset the budget timer + */ + task->se.exec_start = litmus_clock(); + fire = task->se.exec_start + budget_remaining(task); + #ifdef CONFIG_MERGE_TIMERS - add_event(crit_cpu(ce)->event_group, &ce->event, fire); + add_event(crit_cpu(ce)->event_group, &ce->event, fire); #else - __hrtimer_start_range_ns(&ce->timer, - ns_to_ktime(fire), - 0 /* delta */, - HRTIMER_MODE_ABS_PINNED, - 0 /* no wakeup */); + __hrtimer_start_range_ns(&ce->timer, + ns_to_ktime(fire), + 0 /* delta */, + HRTIMER_MODE_ABS_PINNED, + 0 /* no wakeup */); #endif + } + + sched_trace_server_switch_to(ce_sid(ce), 0, -task->pid, + get_rt_job(task)); } /* @@ -313,21 +333,13 @@ static void link_task_to_crit(struct crit_entry *ce, /* Actually link task */ ce->linked = task; if (task) { + /* Block if task cannot acquire resources */ task->rt_param.linked_on = crit_cpu(ce)->cpu; - if (is_ghost(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) { - /* There is a level-A timer that will force a - * preemption, so we don't set this for level-A - * tasks. Otherwise reset the budget timer - */ - task->se.exec_start = litmus_clock(); - when_to_fire = task->se.exec_start + budget_remaining(task); - arm_ghost(ce, when_to_fire); - } - sched_trace_server_switch_to(ce_sid(ce), 0, -task->pid, - get_rt_job(ce->linked)); - if (!is_ghost(task)) - ce->domain->acquire_resources(task); + if (is_ghost(task) || ce->domain->acquire_resources(task)) + start_crit(ce); + else + ce->state = CS_BLOCKED; } } @@ -356,7 +368,10 @@ static void job_arrival(struct task_struct *task) * causing the system to crash when the task is scheduled * in two places simultaneously. */ - TRACE_MC_TASK(task, "Delayed arrival of scheduled task\n"); + TRACE_MC_TASK(task, "Delayed arrival of scheduled task, " + "linked: %d, sched: %d, queued: %d\n", + tsk_rt(task)->linked_on, tsk_rt(task)->scheduled_on, + is_queued(task)); } raw_spin_unlock(dom->lock); } @@ -592,9 +607,10 @@ static void check_for_preempt(struct domain *dom) ce->linked != entry->linked; higher_prio = mc_preempt_needed(dom, ce->linked); - if (was_ghost) + if (was_ghost) { preempt_cpu(entry, ce->linked); - else if (higher_prio) + start_crit(ce); + } else if (higher_prio) preempt_crit(dom, ce); if (was_ghost || higher_prio) { @@ -695,7 +711,8 @@ static void job_completion(struct task_struct *task, int forced) } now = litmus_clock(); - if (lt_before(get_user_release(task), now) || forced) { + if (lt_before(get_user_release(task), now)) { + TRACE_TASK(task, "Executable task going back to running\n"); set_rt_flags(task, RT_F_RUNNING); } @@ -990,6 +1007,9 @@ out: return ret; } +/* + * Caller must hold the entry lock. + */ void pick_next_task(struct cpu_entry *entry) { int i; @@ -997,6 +1017,8 @@ void pick_next_task(struct cpu_entry *entry) struct domain *dom; struct task_struct *dtask, *ready_task; + STRACE("Picking next task\n"); + for (i = 0; i < NUM_CRIT_LEVELS && !entry->linked; i++) { ce = &entry->crit_entries[i]; dom = ce->domain; @@ -1043,10 +1065,10 @@ void pick_next_task(struct cpu_entry *entry) static struct task_struct* mc_schedule(struct task_struct* prev) { unsigned long flags; - struct crit_entry *ce; + int out_of_time, sleep, preempt, exists, blocks, global, lower; struct cpu_entry* entry = &__get_cpu_var(cpus); - int out_of_time, sleep, preempt, exists, blocks, global, lower, update; struct task_struct *next = NULL; + struct crit_entry *ce; local_irq_save(flags); @@ -1061,7 +1083,6 @@ static struct task_struct* mc_schedule(struct task_struct* prev) raw_spin_lock(&entry->lock); - /* Sanity checking */ BUG_ON(entry->scheduled && entry->scheduled != prev); BUG_ON(entry->scheduled && !is_realtime(prev)); BUG_ON(is_realtime(prev) && !entry->scheduled); @@ -1069,15 +1090,14 @@ static struct task_struct* mc_schedule(struct task_struct* prev) /* Determine state */ exists = entry->scheduled != NULL; blocks = exists && !is_running(entry->scheduled); - out_of_time = exists && budget_enforced(entry->scheduled) && - budget_exhausted(entry->scheduled); + out_of_time = exists && budget_exhausted(entry->scheduled); sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; global = exists && is_global_task(entry->scheduled); preempt = entry->scheduled != entry->linked; lower = exists && preempt && entry->linked && tsk_mc_crit(entry->scheduled) > tsk_mc_crit(entry->linked); - TRACE(TS " blocks:%d out_of_time:%d sleep:%d preempt:%d, now: %llu\n", + TRACE(TS " block:%d oot:%d sleep:%d preempt:%d, now: %llu\n", TA(prev), blocks, out_of_time, sleep, preempt, litmus_clock()); if (exists) @@ -1085,7 +1105,6 @@ static struct task_struct* mc_schedule(struct task_struct* prev) raw_spin_unlock(&entry->lock); - #ifdef CONFIG_PLUGIN_MC_REDIRECT if (smp_processor_id() == interrupt_cpu) fix_global_levels(); @@ -1109,47 +1128,39 @@ static struct task_struct* mc_schedule(struct task_struct* prev) job_arrival(entry->scheduled); } + /* Call before processing signals so any subsequent signal will cause + * a reschedule. + */ + sched_state_task_picked(); + + /* A remote processor unblocked one of our crit levels */ + if (entry->crit_signal != NUM_CRIT_LEVELS) { + ce = &entry->crit_entries[entry->crit_signal]; + check_for_preempt(ce->domain); + entry->crit_signal = NUM_CRIT_LEVELS; + } + raw_spin_lock(&entry->lock); /* Pick next task if none is linked */ if (!entry->linked) pick_next_task(entry); - /* Set this now so that any reschedule signals received after this - * point will cause another reschedule - */ - sched_state_task_picked(); - - /* Ghost task acquired lock, is no longer ghost */ - update = 0; - if (entry->lock_acquired < NUM_CRIT_LEVELS) { - ce = &entry->crit_entries[entry->lock_acquired]; - if (ce->linked && !is_ghost(ce->linked) && - ce->linked != entry->linked) { - link_task_to_cpu(entry, ce->linked); - update = 1; - } - entry->lock_acquired = NUM_CRIT_LEVELS; - } - /* Schedule next task */ next = entry->linked; if (next) next->rt_param.scheduled_on = entry->cpu; entry->will_schedule = next; - if (update) - update_crit_levels(entry); /* Will release lock */ - else - raw_spin_unlock(&entry->lock); + raw_spin_unlock_irqrestore(&entry->lock, flags); - local_irq_restore(flags); if (next) { - BUG_ON(!get_rt_job(next)); + BUG_ON(!is_released(next, litmus_clock())); TRACE_MC_TASK(next, "Picked this task\n"); } else if (exists && !next) STRACE("CPU %d becomes idle at %llu\n", entry->cpu, litmus_clock()); + return next; } @@ -1370,23 +1381,45 @@ static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt, #endif } +/* + * Setup and send signal to CPU for resource acquisition. To avoid touching + * CPU locks, all CPU state modifications are delayed until the signal is + * processed. + */ static void cpu_acquired(int cpu) { struct cpu_entry *entry = &per_cpu(cpus, cpu); - STRACE("Lock acquired for cpu %d\n", cpu); - entry->lock_acquired = CRIT_LEVEL_B; - litmus_reschedule(entry->cpu); + struct crit_entry *ce = &entry->crit_entries[CRIT_LEVEL_B]; + + TRACE_CRIT_ENTRY(ce, "Acquired lock\n"); + + BUG_ON(!ce->linked); + BUG_ON(get_rt_flags(ce->linked) & RT_F_SLEEP); + + set_rt_flags(ce->linked, RT_F_RUNNING); + sched_trace_task_resume(ce->linked); + + if (ce->state == CS_BLOCKED) { + entry->crit_signal = CRIT_LEVEL_B; + /* Yes this is ok for race conditions, but only because no other + * state will ever apply to a partitioned crit entry + */ + ce->state = CS_ACTIVE; + litmus_reschedule(cpu); + } } struct domain_data *ce_domain_for(int); static int __init init_mc(void) { - int cpu; - struct cpu_entry *entry; - struct domain_data *dom_data; + int cpu, name_size; + char *lock_name; rt_domain_t *rt; raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */ + struct cpu_entry *entry; + struct domain_data *dom_data; struct ce_dom_data *ce_data; + struct lock_class_key *lock_key; for_each_online_cpu(cpu) { entry = &per_cpu(cpus, cpu); @@ -1395,9 +1428,15 @@ static int __init init_mc(void) entry->cpu = cpu; entry->scheduled = NULL; entry->linked = NULL; - entry->lock_acquired = NUM_CRIT_LEVELS; + entry->crit_signal = NUM_CRIT_LEVELS; + /* Trick lockdep for CPU locks */ + name_size = sizeof(*lock_name) * LITMUS_LOCKDEP_NAME_MAX_LEN; + lock_name = kmalloc(name_size, GFP_ATOMIC); + lock_key = kmalloc(sizeof(*lock_key), GFP_ATOMIC); raw_spin_lock_init(&entry->lock); + LOCKDEP_DYNAMIC_ALLOC(&entry->lock, lock_key, lock_name, + "entry%d", cpu); #ifdef CONFIG_PLUGIN_MC_REDIRECT raw_spin_lock_init(&entry->redir_lock); -- cgit v1.2.2