From 2a4e168d2932ff470b898a57794cd87ee1a3d2a4 Mon Sep 17 00:00:00 2001 From: Jonathan Herman Date: Fri, 23 Sep 2011 13:29:37 -0400 Subject: transferring machines --- include/litmus/sched_plugin.h | 2 + litmus/sched_mc.c | 99 ++++++++++++++++++++++++++++--------------- 2 files changed, 67 insertions(+), 34 deletions(-) diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 6e7cabdddae8..32c23974e45a 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h @@ -11,6 +11,8 @@ #include #endif +struct litmus_lock; + /************************ setup/tear down ********************/ typedef long (*activate_plugin_t) (void); diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 6ece904ff257..3e3aaa126d3f 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c @@ -260,7 +260,59 @@ static void job_arrival(struct task_struct *task) } } -static void 1 +/** + * low_prio_arrival() - If CONFIG_PLUGIN_MC_REDIRECT is enabled, will + * redirect lower priority job_arrival work to the interrupt_cpu. + */ +static void low_prio_arrival(struct task_struct *task) +{ + cpu_entry_t *entry; + +#ifdef CONFIG_PLUGIN_MC_REDIRECT +#ifndef CONFIG_PLUGIN_MC_REDIRECT_ALL + if (!is_global_task(task)) + goto arrive; +#endif + if (smp_processor_id() != interrupt_cpu) { + entry = cpus[smp_processor_id()]; + raw_spin_lock(&entry->redir_lock); + list_add(&tsk_rt(task)->list, &entry->redir); + raw_spin_unlock(&entry->redir_lock); + litmus_reschedule(interrupt_cpu); + } else +#endif + { + arrive: + job_arrival(task); + } +} + +#ifdef CONFIG_PLUGIN_MC_REDIRECT +/** + * fix_global_levels() - Execute redirected job arrivals on this cpu. + */ +static void fix_global_levels(void) +{ + int c; + cpu_entry_t *e; + struct list_head *pos, *safe; + struct task_struct *t; + + TRACE("Fixing global levels\n"); + for_each_online_cpu(c) { + e = cpus[c]; + raw_spin_lock(&e->redir_lock); + list_for_each_safe(pos, safe, &e->redir) { + t = list_entry(pos, struct task_struct, rt_param.list); + TRACE_TASK(t, "Arriving yo"); + BUG_ON(is_queued(t)); + list_del_init(pos); + job_arrival(t); + } + raw_spin_unlock(&e->redir_lock); + } +} +#endif /** * link_task_to_cpu() - Logically run a task on a CPU. @@ -312,33 +364,6 @@ static void preempt(domain_t *dom, crit_entry_t *ce) } } -#ifdef CONFIG_PLUGIN_MC_REDIRECT -/** - * fix_global_levels() - Execute redirected job arrivals on this cpu. - */ -static void fix_global_levels(void) -{ - int c; - cpu_entry_t *e; - struct list_head *pos, *safe; - struct task_struct *t; - - TRACE("Fixing global levels\n"); - for_each_online_cpu(c) { - e = cpus[c]; - raw_spin_lock(&e->redir_lock); - list_for_each_safe(pos, safe, &e->redir) { - t = list_entry(pos, struct task_struct, rt_param.list); - TRACE_TASK(t, "Arriving yo"); - BUG_ON(is_queued(t)); - list_del_init(pos); - job_arrival(t); - } - raw_spin_unlock(&e->redir_lock); - } -} -#endif - /** * update_crit_levels() - Update criticality entries for the new cpu state. * This should be called after a new task has been linked to @entry. @@ -366,8 +391,8 @@ static void update_crit_levels(cpu_entry_t *entry) for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { ce = &entry->crit_entries[i]; TRACE("Checking %s\n", ce->domain->name); - if (!tasks[i]) continue; - job_arrival(tasks[i]); + if (tasks[i]) + low_prio_arrival(tasks[i]); } } @@ -664,7 +689,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev) domain_t *dom; crit_entry_t *ce; cpu_entry_t* entry = cpus[smp_processor_id()]; - int i, out_of_time, sleep, preempt, exists, blocks, global; + int i, out_of_time, sleep, preempt, exists, blocks, global, lower; struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; local_irq_save(flags); @@ -682,6 +707,8 @@ static struct task_struct* mc_schedule(struct task_struct * prev) sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; global = exists && is_global_task(entry->scheduled); preempt = entry->scheduled != entry->linked; + lower = preempt && entry->linked && + tsk_mc_crit(entry->scheduled) < tsk_mc_crit(entry->linked); if (exists) { entry->scheduled->rt_param.scheduled_on = NO_CPU; @@ -705,10 +732,14 @@ static struct task_struct* mc_schedule(struct task_struct * prev) if ((out_of_time || sleep) && !blocks && !preempt) job_completion(entry->scheduled, !sleep); /* Global scheduled tasks must wait for a deschedule before they - * can rejoin a global domain. Requeue them here. + * can rejoin the global state. Rejoin them here. */ - else if (global && preempt && !blocks) - job_arrival(entry->scheduled); + else if (global && preempt && !blocks) { + if (lower) + low_prio_arrival(entry->scheduled); + else + job_arrival(entry->scheduled); + } /* Pick next task if none is linked */ raw_spin_lock(&entry->lock); -- cgit v1.2.2