From 7bbf3205ae1979cb41fd2a0dfdd103656bf8e84e Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Thu, 21 Mar 2013 18:43:43 -0400 Subject: SOBLIV draining support for C-EDF. Adds support for suspension-oblivous budget draining to C-EDF. Also changes how jobs with exhausted budget in C-EDF are treated: jobs are early released until they catch up. --- include/litmus/budget.h | 27 ++++-- include/litmus/litmus.h | 6 +- include/litmus/locking.h | 8 ++ include/litmus/rt_param.h | 30 +++++-- include/litmus/sched_trace.h | 9 +- litmus/budget.c | 77 +++++++++++++++-- litmus/edf_common.c | 2 + litmus/litmus.c | 2 - litmus/sched_cedf.c | 196 ++++++++++++++++++++++++++++++++++++++----- litmus/sched_gsn_edf.c | 13 ++- litmus/sched_pfp.c | 13 ++- litmus/sched_psn_edf.c | 13 ++- litmus/sched_task_trace.c | 4 +- 13 files changed, 337 insertions(+), 63 deletions(-) diff --git a/include/litmus/budget.h b/include/litmus/budget.h index 2a3511245f7a..72f04777e0b0 100644 --- a/include/litmus/budget.h +++ b/include/litmus/budget.h @@ -54,15 +54,17 @@ struct enforcement_timer typedef void (*scheduled_t)(struct task_struct* t); typedef void (*blocked_t)(struct task_struct* t); -typedef void (*preempt_or_sleep_t)(struct task_struct* t); -typedef void (*exhausted_t)(struct task_struct* t); +typedef void (*preempt_t)(struct task_struct* t); +typedef void (*sleep_t)(struct task_struct* t); +typedef enum hrtimer_restart (*exhausted_t)(struct task_struct* t); typedef void (*exit_t)(struct task_struct* t); struct budget_tracker_ops { scheduled_t on_scheduled; /* called from litmus_schedule(). */ blocked_t on_blocked; /* called from plugin::schedule() */ - preempt_or_sleep_t on_preempt_or_sleep; /* called from plugin::schedule() */ + preempt_t on_preempt; /* called from plugin::schedule() */ + sleep_t on_sleep; /* called from plugin::schedule() */ exit_t on_exit; /* task exiting rt mode */ @@ -84,15 +86,30 @@ enum BT_FLAGS }; /* Functions for simple DRAIN_SIMPLE policy common - * to every scheduler. Scheduler must provided + * to every scheduler. Scheduler must provide * implementation for simple_on_exhausted(). */ void simple_on_scheduled(struct task_struct* t); void simple_on_blocked(struct task_struct* t); -void simple_on_preempt_or_sleep(struct task_struct* t); +void simple_on_preempt(struct task_struct* t); +void simple_on_sleep(struct task_struct* t); void simple_on_exit(struct task_struct* t); +/* Functions for DRAIN_SOBLIV policy common + * to every scheduler. Scheduler must provide + * implementation for sobliv_on_exhausted(). + * + * Limitation: Quantum budget tracking is unsupported. + */ +void sobliv_on_scheduled(struct task_struct* t); +void sobliv_on_blocked(struct task_struct* t); +void sobliv_on_sleep(struct task_struct* t); +/* Use the DRAIN_SIMPLE implementations */ +#define sobliv_on_preempt simple_on_preempt +#define sobliv_on_exit simple_on_exit + + void init_budget_tracker(struct budget_tracker* bt, const struct budget_tracker_ops* ops); diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index f6ea5f6e80ee..ce24e62eee81 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -70,7 +70,11 @@ void litmus_exit_task(struct task_struct *tsk); #define get_period(t) (tsk_rt(t)->task_params.period) #define get_release(t) (tsk_rt(t)->job_params.release) #define get_lateness(t) (tsk_rt(t)->job_params.lateness) -#define get_budget_timer(t) (tsk_rt(t)->job_params.budget_timer) +#define get_backlog(t) (tsk_rt(t)->job_params.backlog) + +#define has_backlog(t) (get_backlog(t) != 0) + +#define get_budget_timer(t) (tsk_rt(t)->budget) #define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) #define base_priority(t) (t) diff --git a/include/litmus/locking.h b/include/litmus/locking.h index 3ae6692dbe95..962ad5e6726a 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h @@ -229,5 +229,13 @@ struct litmus_lock_ops { void suspend_for_lock(void); int wake_up_for_lock(struct task_struct* t); +/* thread safe?? */ +#ifndef CONFIG_LITMUS_NESTED_LOCKING +#define holds_locks(tsk) \ + (tsk_rt(t)->num_locks_held || tsk_rt(t)->num_local_locks_held) +#else +#define holds_locks(tsk) \ + (tsk_rt(t)->num_locks_held || tsk_rt(t)->num_local_locks_held || !binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) #endif +#endif diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 887075b908ca..499ecd899fcd 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -257,6 +257,19 @@ struct rt_job { * Increase this sequence number when a job is released. */ unsigned int job_no; + + /* Increments each time a job is forced to complete by + * budget exhaustion. If a job completes without remaining + * budget, the next job will be early-released _without_ + * pushing back its deadline. job_backlog is decremented once + * per early release. This behavior continues until + * backlog == 0. + */ + unsigned int backlog; + + /* denotes if the current job is a backlogged job that was caused + * by an earlier budget exhaustion */ + unsigned int is_backlogged_job:1; }; struct pfair_param; @@ -387,6 +400,14 @@ struct rt_param { unsigned int num_local_locks_held; #endif +#ifdef CONFIG_LITMUS_NESTED_LOCKING + raw_spinlock_t hp_blocked_tasks_lock; + struct binheap hp_blocked_tasks; + + /* pointer to lock upon which is currently blocked */ + struct litmus_lock* blocked_lock; +#endif + /* user controlled parameters */ struct rt_task task_params; @@ -401,15 +422,6 @@ struct rt_param { */ struct task_struct* inh_task; -#ifdef CONFIG_LITMUS_NESTED_LOCKING - raw_spinlock_t hp_blocked_tasks_lock; - struct binheap hp_blocked_tasks; - - /* pointer to lock upon which is currently blocked */ - struct litmus_lock* blocked_lock; -#endif - - #ifdef CONFIG_REALTIME_AUX_TASKS unsigned int is_aux_task:1; unsigned int aux_ready:1; diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 0785db39b2fc..9a7e6fa1e6b6 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h @@ -52,11 +52,10 @@ struct st_switch_away_data { /* A process was switched away from on a given CPU. struct st_completion_data { /* A job completed. */ u64 when; - u8 forced:1; /* Set to 1 if job overran and kernel advanced to the - * next task automatically; set to 0 otherwise. - */ - u8 __uflags:7; - u8 __unused[7]; + u64 backlog_remaining:62; + u8 was_backlog_job:1; + u8 forced:1; /* Set to 1 if job overran and kernel advanced to the + * next task automatically; set to 0 otherwise. */ } __attribute__((packed)); struct st_block_data { /* A task blocks. */ diff --git a/litmus/budget.c b/litmus/budget.c index 559c54709acc..15de83bc584e 100644 --- a/litmus/budget.c +++ b/litmus/budget.c @@ -38,10 +38,13 @@ inline static void cancel_enforcement_timer(struct task_struct* t) } } + + inline static void arm_enforcement_timer(struct task_struct* t) { struct enforcement_timer* et; - lt_t when_to_fire; + lt_t when_to_fire, remaining_budget; + lt_t now; unsigned long flags; BUG_ON(!t); @@ -80,9 +83,11 @@ inline static void arm_enforcement_timer(struct task_struct* t) goto out; } - when_to_fire = litmus_clock() + budget_remaining(t); + now = litmus_clock(); + remaining_budget = budget_remaining(t); + when_to_fire = now + remaining_budget; - TRACE_TASK(t, "bremaining: %ld, when_to_fire: %ld\n", budget_remaining(t), when_to_fire); + TRACE_TASK(t, "budget remaining: %ld, when_to_fire: %ld\n", remaining_budget, when_to_fire); __hrtimer_start_range_ns(&et->timer, ns_to_ktime(when_to_fire), @@ -107,6 +112,9 @@ void send_sigbudget(struct task_struct* t) } } +/* + * DRAIN_SIMPLE + */ void simple_on_scheduled(struct task_struct* t) { @@ -118,7 +126,7 @@ void simple_on_scheduled(struct task_struct* t) } } -static void __simple_on_unscheduled(struct task_struct* t) +inline static void __simple_on_unscheduled(struct task_struct* t) { BUG_ON(!t); @@ -132,7 +140,12 @@ void simple_on_blocked(struct task_struct* t) __simple_on_unscheduled(t); } -void simple_on_preempt_or_sleep(struct task_struct* t) +void simple_on_preempt(struct task_struct* t) +{ + __simple_on_unscheduled(t); +} + +void simple_on_sleep(struct task_struct* t) { __simple_on_unscheduled(t); } @@ -142,12 +155,53 @@ void simple_on_exit(struct task_struct* t) __simple_on_unscheduled(t); } +/* + * DRAIN_SOBLIV + */ +void sobliv_on_scheduled(struct task_struct* t) +{ + BUG_ON(!t); + + if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { + if (tsk_rt(t)->budget.timer.armed) { + TRACE_TASK(t, "budget timer already armed.\n"); + } + else { + arm_enforcement_timer(t); + } + } +} + +void sobliv_on_blocked(struct task_struct* t) +{ + /* NOOP */ + TRACE_TASK(t, "sobliv: budget drains while suspended.\n"); +} + +void sobliv_on_sleep(struct task_struct* t) +{ + if (budget_precisely_tracked(t)) { + /* kludge. callback called before job_completion logic runs, so + * we need to do some logic of our own to figure out if there is a + * backlog after this job (it is completing since sleep is asserted) + * completes. */ + int no_backlog = (!has_backlog(t) || /* no backlog */ + /* the last backlogged job is completing */ + (get_backlog(t) == 1 && tsk_rt(t)->job_params.is_backlogged_job)); + if (no_backlog) + cancel_enforcement_timer(t); + else + TRACE_TASK(t, "not cancelling timer because there is time for backlogged work.\n"); + } +} static enum hrtimer_restart __on_timeout(struct hrtimer *timer) { + enum hrtimer_restart restart; unsigned long flags; + struct budget_tracker* bt = container_of( container_of(timer, @@ -168,9 +222,13 @@ static enum hrtimer_restart __on_timeout(struct hrtimer *timer) tsk_rt(t)->budget.timer.armed = 0; raw_spin_unlock_irqrestore(&bt->timer.lock, flags); - bt->ops->on_exhausted(t); + restart = bt->ops->on_exhausted(t); + + raw_spin_lock_irqsave(&bt->timer.lock, flags); + tsk_rt(t)->budget.timer.armed = (restart == HRTIMER_RESTART); + raw_spin_unlock_irqrestore(&bt->timer.lock, flags); - return HRTIMER_NORESTART; + return restart; } @@ -181,7 +239,8 @@ void init_budget_tracker(struct budget_tracker* bt, const struct budget_tracker_ BUG_ON(!ops->on_scheduled); BUG_ON(!ops->on_blocked); - BUG_ON(!ops->on_preempt_or_sleep); + BUG_ON(!ops->on_preempt); + BUG_ON(!ops->on_sleep); BUG_ON(!ops->on_exhausted); memset(bt, 0, sizeof(*bt)); @@ -189,4 +248,4 @@ void init_budget_tracker(struct budget_tracker* bt, const struct budget_tracker_ hrtimer_init(&bt->timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); bt->timer.timer.function = __on_timeout; bt->ops = ops; -} \ No newline at end of file +} diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 52ccac998142..76ed1056ef6f 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c @@ -214,6 +214,8 @@ klmirqd_tie_break: */ int pid_break; + /* TODO: INCORPERATE job_params::backlog INTO TIE-BREAKING */ + #if defined(CONFIG_EDF_TIE_BREAK_LATENESS) /* Tie break by lateness. Jobs with greater lateness get * priority. This should spread tardiness across all tasks, diff --git a/litmus/litmus.c b/litmus/litmus.c index e8130e362c84..10d9e545a831 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c @@ -442,8 +442,6 @@ static void reinit_litmus_state(struct task_struct* p, int restore) binheap_order_t prio_order = NULL; #endif - TRACE_TASK(p, "reinit_litmus_state: restore = %d\n", restore); - if (restore) { /* Safe user-space provided configuration data. * and allocated page. */ diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 8fe646f1f0c5..fd1b80ac6090 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -315,7 +315,8 @@ static noinline void requeue(struct task_struct* task) /* sanity check before insertion */ BUG_ON(is_queued(task)); - if (is_early_releasing(task) || is_released(task, litmus_clock())) { + if (is_early_releasing(task) || is_released(task, litmus_clock()) || + tsk_rt(task)->job_params.is_backlogged_job) { #ifdef CONFIG_REALTIME_AUX_TASKS if (unlikely(tsk_rt(task)->is_aux_task && task->state != TASK_RUNNING && !tsk_rt(task)->aux_ready)) { /* aux_task probably transitioned to real-time while it was blocked */ @@ -327,7 +328,7 @@ static noinline void requeue(struct task_struct* task) __add_ready(&cluster->domain, task); } else { - TRACE_TASK(task, "not requeueing non-yet-released job\n"); + TRACE_TASK(task, "not requeueing not-yet-released job\n"); } } @@ -413,18 +414,59 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) static noinline void job_completion(struct task_struct *t, int forced) { int do_release = 0; + int do_backlogged_job = 0; lt_t now; + BUG_ON(!t); + now = litmus_clock(); + + /* DO BACKLOG TRACKING */ + + /* job completed with budget remaining */ + if (get_release_policy(t) != SPORADIC) { + /* only jobs we know that will call sleep_next_job() can use backlogging */ + if (!forced) { + /* was it a backlogged job that completed? */ + if (tsk_rt(t)->job_params.is_backlogged_job) { + BUG_ON(!get_backlog(t)); + --get_backlog(t); + + TRACE_TASK(t, "completed backlogged job\n"); + } + } + else { + /* budget was exhausted - force early release */ + ++get_backlog(t); + TRACE_TASK(t, "adding backlogged job\n"); + } + do_backlogged_job = has_backlog(t); + TRACE_TASK(t, "number of backlogged jobs: %u\n", + get_backlog(t)); + } + + + /* SETUP FOR THE NEXT JOB */ + sched_trace_task_completion(t, forced); - now = litmus_clock(); - TRACE_TASK(t, "job_completion() at %llu.\n", now); + TRACE_TASK(t, "job_completion() at %llu (forced = %d).\n", + now, forced); /* set flags */ tsk_rt(t)->completed = 1; - /* prepare for next period */ - prepare_for_next_period(t); + + if (!forced && do_backlogged_job) { + /* don't advance deadline/refresh budget. use the remaining budget for + * the backlogged job. */ + } + else { + if (do_backlogged_job) { + TRACE_TASK(t, "refreshing budget with early release for backlogged job.\n"); + } + + prepare_for_next_period(t); + } do_release = (is_early_releasing(t) || is_released(t, now)); if (do_release) { @@ -437,14 +479,30 @@ static noinline void job_completion(struct task_struct *t, int forced) /* release or arm next job */ tsk_rt(t)->completed = 0; if (is_running(t)) { - if (!do_release) - add_release(&task_cpu_cluster(t)->domain, t); - else + /* is our next job a backlogged job? */ + if (do_backlogged_job) { + TRACE_TASK(t, "next job is a backlogged job.\n"); + tsk_rt(t)->job_params.is_backlogged_job = 1; + } + else { + TRACE_TASK(t, "next job is a regular job.\n"); + tsk_rt(t)->job_params.is_backlogged_job = 0; + } + + if (do_release || do_backlogged_job) { cedf_job_arrival(t); + } + else { + add_release(&task_cpu_cluster(t)->domain, t); + } + } + else { + BUG_ON(!forced); + TRACE_TASK(t, "job exhausted budget while sleeping\n"); } } -static void cedf_simple_on_exhausted(struct task_struct *t) +static enum hrtimer_restart cedf_simple_on_exhausted(struct task_struct *t) { /* Assumption: t is scheduled on the CPU executing this callback */ @@ -458,18 +516,94 @@ static void cedf_simple_on_exhausted(struct task_struct *t) /* np tasks will be preempted when they become * preemptable again */ + TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n"); + litmus_reschedule_local(); set_will_schedule(); - TRACE("cedf_scheduler_tick: " - "%d is preemptable " - " => FORCE_RESCHED\n", t->pid); } else if (is_user_np(t)) { - TRACE("cedf_scheduler_tick: " - "%d is non-preemptable, " - "preemption delayed.\n", t->pid); + TRACE_TASK(t, "is non-preemptable, preemption delayed.\n"); + request_exit_np(t); } } + + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) +{ + enum hrtimer_restart restart = HRTIMER_NORESTART; + + /* t may or may not be scheduled */ + + if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { + /* signal exhaustion */ + + /* Tasks should block SIG_BUDGET if they cannot gracefully respond to + * the signal while suspended. SIG_BUDGET is an rt-signal, so it will + * be queued and received when SIG_BUDGET is unblocked */ + send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */ + } + + if (budget_enforced(t) && !bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)) { + if (is_np(t) && is_user_np(t)) { + TRACE_TASK(t, "is non-preemptable, preemption delayed.\n"); + + bt_flag_set(t, BTF_BUDGET_EXHAUSTED); + request_exit_np(t); + } + else { + /* where do we need to call resched? */ + int cpu = (tsk_rt(t)->linked_on != NO_CPU) ? + tsk_rt(t)->linked_on : tsk_rt(t)->scheduled_on; + if (cpu == smp_processor_id()) { + TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n"); + + bt_flag_set(t, BTF_BUDGET_EXHAUSTED); + litmus_reschedule_local(); + set_will_schedule(); + } + else if (cpu != NO_CPU) { + TRACE_TASK(t, "is preemptable on remote cpu (%d) => FORCE_RESCHED\n", cpu); + + bt_flag_set(t, BTF_BUDGET_EXHAUSTED); + litmus_reschedule(cpu); + } + else { + BUG_ON(cpu != NO_CPU); +#ifdef CONFIG_LITMUS_LOCKING + if (holds_locks(t)) { + /* TODO: Integration with Litmus locking protocols */ + TRACE_TASK(t, "prevented lock holder from postponing deadline.\n"); + } + else { +#endif + /* force job completion */ + cedf_domain_t* cluster = task_cpu_cluster(t); + unsigned long flags; + lt_t remaining; + + TRACE_TASK(t, "blocked, postponing deadline\n"); + + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); + job_completion(t, 1); /* refreshes budget */ + + hrtimer_forward_now(&get_budget_timer(t).timer.timer, + ns_to_ktime(budget_remaining(t))); + remaining = hrtimer_get_expires_ns(&get_budget_timer(t).timer.timer); + + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); + + TRACE_TASK(t, "rearmed timer to %ld\n", remaining); + restart = HRTIMER_RESTART; +#ifdef CONFIG_LITMUS_LOCKING + } +#endif + } + } + } + + return restart; } @@ -873,8 +1007,10 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) if (tsk_rt(prev)->budget.ops) { if (blocks) tsk_rt(prev)->budget.ops->on_blocked(prev); - else if (preempt || sleep) - tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev); + else if (sleep) + tsk_rt(prev)->budget.ops->on_sleep(prev); + else if (preempt) + tsk_rt(prev)->budget.ops->on_preempt(prev); } /* If a task blocks we have no choice but to reschedule. @@ -1156,12 +1292,23 @@ static struct budget_tracker_ops cedf_drain_simple_ops = { .on_scheduled = simple_on_scheduled, .on_blocked = simple_on_blocked, - .on_preempt_or_sleep = simple_on_preempt_or_sleep, + .on_preempt = simple_on_preempt, + .on_sleep = simple_on_sleep, .on_exit = simple_on_exit, .on_exhausted = cedf_simple_on_exhausted, }; +static struct budget_tracker_ops cedf_drain_sobliv_ops = +{ + .on_scheduled = sobliv_on_scheduled, + .on_blocked = sobliv_on_blocked, + .on_preempt = sobliv_on_preempt, + .on_sleep = sobliv_on_sleep, + .on_exit = sobliv_on_exit, + + .on_exhausted = cedf_sobliv_on_exhausted, +}; static long cedf_admit_task(struct task_struct* tsk) { @@ -1173,6 +1320,17 @@ static long cedf_admit_task(struct task_struct* tsk) case DRAIN_SIMPLE: init_budget_tracker(&tsk_rt(tsk)->budget, &cedf_drain_simple_ops); break; + case DRAIN_SOBLIV: + /* budget_policy and budget_signal_policy cannot be quantum-based */ + if (!budget_quantum_tracked(tsk) && budget_precisely_tracked(tsk)) { + init_budget_tracker(&tsk_rt(tsk)->budget, &cedf_drain_sobliv_ops); + } + else { + TRACE_TASK(tsk, "QUANTUM_ENFORCEMENT and QUANTUM_SIGNALS is " + "unsupported with DRAIN_SOBLIV.\n"); + return -EINVAL; + } + break; default: TRACE_TASK(tsk, "Unsupported budget draining mode.\n"); return -EINVAL; diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 15ac94038702..2950e39b054e 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -421,7 +421,7 @@ static noinline void job_completion(struct task_struct *t, int forced) gsnedf_job_arrival(t); } -static void gsnedf_simple_on_exhausted(struct task_struct *t) +static enum hrtimer_restart gsnedf_simple_on_exhausted(struct task_struct *t) { /* Assumption: t is scheduled on the CPU executing this callback */ @@ -446,6 +446,8 @@ static void gsnedf_simple_on_exhausted(struct task_struct *t) request_exit_np(t); } } + + return HRTIMER_NORESTART; } /* gsnedf_tick - this function is called for every local timer @@ -849,8 +851,10 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) if (tsk_rt(prev)->budget.ops) { if (blocks) tsk_rt(prev)->budget.ops->on_blocked(prev); - else if (preempt || sleep) - tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev); + else if (sleep) + tsk_rt(prev)->budget.ops->on_sleep(prev); + else if (preempt) + tsk_rt(prev)->budget.ops->on_preempt(prev); } /* If a task blocks we have no choice but to reschedule. @@ -1122,7 +1126,8 @@ static struct budget_tracker_ops gsnedf_drain_simple_ops = { .on_scheduled = simple_on_scheduled, .on_blocked = simple_on_blocked, - .on_preempt_or_sleep = simple_on_preempt_or_sleep, + .on_preempt = simple_on_preempt, + .on_sleep = simple_on_sleep, .on_exit = simple_on_exit, .on_exhausted = gsnedf_simple_on_exhausted, diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c index 4a8b8e084f6e..33f861ab0056 100644 --- a/litmus/sched_pfp.c +++ b/litmus/sched_pfp.c @@ -132,7 +132,7 @@ static void job_completion(struct task_struct* t, int forced) sched_trace_task_release(t); } -static void pfp_simple_on_exhausted(struct task_struct *t) +static enum hrtimer_restart pfp_simple_on_exhausted(struct task_struct *t) { /* Assumption: t is scheduled on the CPU executing this callback */ @@ -157,6 +157,8 @@ static void pfp_simple_on_exhausted(struct task_struct *t) request_exit_np(t); } } + + return HRTIMER_NORESTART; } static void pfp_tick(struct task_struct *t) @@ -214,8 +216,10 @@ static struct task_struct* pfp_schedule(struct task_struct * prev) if (tsk_rt(prev)->budget.ops) { if (blocks) tsk_rt(prev)->budget.ops->on_blocked(prev); - else if (preempt || sleep) - tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev); + else if (sleep) + tsk_rt(prev)->budget.ops->on_sleep(prev); + else if (preempt) + tsk_rt(prev)->budget.ops->on_preempt(prev); } /* If a task blocks we have no choice but to reschedule. @@ -1713,7 +1717,8 @@ static struct budget_tracker_ops pfp_drain_simple_ops = { .on_scheduled = simple_on_scheduled, .on_blocked = simple_on_blocked, - .on_preempt_or_sleep = simple_on_preempt_or_sleep, + .on_preempt = simple_on_preempt, + .on_sleep = simple_on_sleep, .on_exit = simple_on_exit, .on_exhausted = pfp_simple_on_exhausted, diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 3b3edfe908ff..c06db8b434cd 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c @@ -164,7 +164,7 @@ static void job_completion(struct task_struct* t, int forced) prepare_for_next_period(t); } -static void psnedf_simple_on_exhausted(struct task_struct *t) +static enum hrtimer_restart psnedf_simple_on_exhausted(struct task_struct *t) { /* Assumption: t is scheduled on the CPU executing this callback */ @@ -189,6 +189,8 @@ static void psnedf_simple_on_exhausted(struct task_struct *t) request_exit_np(t); } } + + return HRTIMER_NORESTART; } static void psnedf_tick(struct task_struct *t) @@ -246,8 +248,10 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) if (tsk_rt(prev)->budget.ops) { if (blocks) tsk_rt(prev)->budget.ops->on_blocked(prev); - else if (preempt || sleep) - tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev); + else if (sleep) + tsk_rt(prev)->budget.ops->on_sleep(prev); + else if (preempt) + tsk_rt(prev)->budget.ops->on_preempt(prev); } /* If a task blocks we have no choice but to reschedule. @@ -647,7 +651,8 @@ static struct budget_tracker_ops psnedf_drain_simple_ops = { .on_scheduled = simple_on_scheduled, .on_blocked = simple_on_blocked, - .on_preempt_or_sleep = simple_on_preempt_or_sleep, + .on_preempt = simple_on_preempt, + .on_sleep = simple_on_sleep, .on_exit = simple_on_exit, .on_exhausted = psnedf_simple_on_exhausted, diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 8d75437e7771..e243b8007826 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c @@ -192,7 +192,9 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, struct task_struct *t = (struct task_struct*) _task; struct st_event_record* rec = get_record(ST_COMPLETION, t); if (rec) { - rec->data.completion.when = now(); + rec->data.completion.when = now(); + rec->data.completion.backlog_remaining = tsk_rt(t)->job_params.job_backlog; + rec->data.completion.was_backlog_job = tsk_rt(t)->job_params.is_backlogged_job; rec->data.completion.forced = forced; put_record(rec); } -- cgit v1.2.2