From efe3e4c46fe86c815ada23a4cfbc89b6e55e14f1 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Fri, 6 Dec 2013 13:20:27 -0500 Subject: Add PGM support to C-FL This patch applies the same PGM changes made to C-EDF to C-FL. --- litmus/sched_cfl_split.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 53 insertions(+), 1 deletion(-) (limited to 'litmus/sched_cfl_split.c') diff --git a/litmus/sched_cfl_split.c b/litmus/sched_cfl_split.c index 7d9302eb296b..af2b28230c72 100644 --- a/litmus/sched_cfl_split.c +++ b/litmus/sched_cfl_split.c @@ -49,6 +49,10 @@ #include #endif +#ifdef CONFIG_SCHED_PGM +#include +#endif + /* to configure the cluster size */ #include #include @@ -587,6 +591,49 @@ static struct task_struct* cflsplit_schedule(struct task_struct * prev) TRACE_TASK(prev, "will be preempted by %s/%d\n", entry->linked->comm, entry->linked->pid); +#ifdef CONFIG_SCHED_PGM + if (exists) { + if (is_pgm_sending(entry->scheduled)) { + if (!is_pgm_satisfied(entry->scheduled)) { + if (!is_priority_boosted(entry->scheduled)) { + TRACE_TASK(entry->scheduled, "is sending PGM tokens and needs boosting.\n"); + BUG_ON(is_pgm_satisfied(entry->scheduled)); + + /* We are either sending tokens or waiting for tokes. + If waiting: Boost priority so we'll be scheduled + immediately when needed tokens arrive. + If sending: Boost priority so no one (specifically, our + consumers) will preempt us while signalling the token + transmission. + */ + tsk_rt(entry->scheduled)->priority_boosted = 1; + tsk_rt(entry->scheduled)->boost_start_time = litmus_clock(); + + if (likely(!blocks)) { + unlink(entry->scheduled); + cflsplit_job_arrival(entry->scheduled); + } + } + } + else { /* sending is satisfied */ + tsk_rt(entry->scheduled)->ctrl_page->pgm_sending = 0; + tsk_rt(entry->scheduled)->ctrl_page->pgm_satisfied = 0; + + if (is_priority_boosted(entry->scheduled)) { + TRACE_TASK(entry->scheduled, + "is done sending PGM tokens must relinquish boosting.\n"); + /* clear boosting */ + tsk_rt(entry->scheduled)->priority_boosted = 0; + if(likely(!blocks)) { + /* recheck priority */ + unlink(entry->scheduled); + cflsplit_job_arrival(entry->scheduled); + } + } + } + } + } +#endif /* If a task blocks we have no choice but to reschedule. */ @@ -639,12 +686,13 @@ static struct task_struct* cflsplit_schedule(struct task_struct * prev) entry->scheduled->rt_param.scheduled_on = NO_CPU; TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); } - } else + } else { /* Only override Linux scheduler if we have a real-time task * scheduled that needs to continue. */ if (exists) next = prev; + } sched_state_task_picked(); raw_spin_unlock(&cluster->cluster_lock); @@ -751,6 +799,10 @@ static void cflsplit_task_wake_up(struct task_struct *task) cflsplit_release_at(task, now); sched_trace_task_release(task); } + if (is_pgm_waiting(task)) { + /* shift out release/deadline, if needed */ + setup_pgm_release(task); + } cflsplit_job_arrival(task); raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); } -- cgit v1.2.2