From 3324865fc5792b9d755d46cafa42c74b5037bba5 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Mon, 1 Apr 2013 18:58:46 -0400 Subject: SOBLIV: Drain budget while task is in top-m only. Also fixed numerous bugs... --- include/litmus/bheap.h | 4 + include/litmus/budget.h | 46 ++++- include/litmus/litmus.h | 2 +- include/litmus/nvidia_info.h | 4 + include/litmus/rt_param.h | 1 - include/litmus/sched_plugin.h | 5 - litmus/bheap.c | 23 +++ litmus/budget.c | 149 ++++++++------ litmus/nvidia_info.c | 17 +- litmus/sched_cedf.c | 454 +++++++++++++++++++++++++++++------------- litmus/sched_gsn_edf.c | 8 +- litmus/sched_litmus.c | 4 +- litmus/sched_plugin.c | 6 - 13 files changed, 501 insertions(+), 222 deletions(-) diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h index 49f7e44bc0a5..4fded5724b28 100644 --- a/include/litmus/bheap.h +++ b/include/litmus/bheap.h @@ -48,6 +48,10 @@ static inline int bheap_empty(struct bheap* heap) // return heap->size; //} +typedef void (*bheap_for_all_t)(struct bheap_node* node, void* args); + +void bheap_for_all(struct bheap* heap, bheap_for_all_t fn, void* args); + /* insert (and reinitialize) a node into the heap */ void bheap_insert(bheap_prio_t higher_prio, struct bheap* heap, diff --git a/include/litmus/budget.h b/include/litmus/budget.h index 8e426a71f03d..08d5e0970d1d 100644 --- a/include/litmus/budget.h +++ b/include/litmus/budget.h @@ -4,6 +4,8 @@ #include #include +#include + struct enforcement_timer { raw_spinlock_t lock; @@ -15,17 +17,22 @@ typedef void (*scheduled_t)(struct task_struct* t); typedef void (*blocked_t)(struct task_struct* t); typedef void (*preempt_t)(struct task_struct* t); typedef void (*sleep_t)(struct task_struct* t); +typedef void (*wakeup_t)(struct task_struct* t); typedef enum hrtimer_restart (*exhausted_t)(struct task_struct* t); typedef void (*exit_t)(struct task_struct* t); typedef void (*inherit_t)(struct task_struct* t, struct task_struct* prio_inh); typedef void (*disinherit_t)(struct task_struct* t, struct task_struct* prio_inh); +typedef void (*enter_top_m_t)(struct task_struct* t); +typedef void (*exit_top_m_t)(struct task_struct* t); + struct budget_tracker_ops { scheduled_t on_scheduled; /* called from litmus_schedule(). */ blocked_t on_blocked; /* called from plugin::schedule() */ preempt_t on_preempt; /* called from plugin::schedule() */ sleep_t on_sleep; /* called from plugin::schedule() */ + wakeup_t on_wakeup; exit_t on_exit; /* task exiting rt mode */ @@ -33,6 +40,9 @@ struct budget_tracker_ops inherit_t on_inherit; disinherit_t on_disinherit; + + enter_top_m_t on_enter_top_m; + exit_top_m_t on_exit_top_m; }; struct budget_tracker @@ -40,13 +50,17 @@ struct budget_tracker struct enforcement_timer timer; const struct budget_tracker_ops* ops; unsigned long flags; + + struct binheap_node top_m_node; + lt_t suspend_timestamp; }; /* budget tracker flags */ enum BT_FLAGS { - BTF_BUDGET_EXHAUSTED = 0, - BTF_SIG_BUDGET_SENT = 1, + BTF_BUDGET_EXHAUSTED = 0, + BTF_SIG_BUDGET_SENT = 1, + BTF_IS_TOP_M = 2, }; /* Functions for simple DRAIN_SIMPLE policy common @@ -66,16 +80,38 @@ void simple_on_exit(struct task_struct* t); * * Limitation: Quantum budget tracking is unsupported. */ -void sobliv_on_scheduled(struct task_struct* t); +//void sobliv_on_scheduled(struct task_struct* t); void sobliv_on_blocked(struct task_struct* t); -void sobliv_on_sleep(struct task_struct* t); +void sobliv_on_wakeup(struct task_struct* t); +//void sobliv_on_sleep(struct task_struct* t); +//void sobliv_on_preempt(struct task_struct* t); /* Use the DRAIN_SIMPLE implementations */ -#define sobliv_on_preempt simple_on_preempt #define sobliv_on_exit simple_on_exit void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh); void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh); +void sobliv_on_enter_top_m(struct task_struct* t); +void sobliv_on_exit_top_m(struct task_struct* t); + void sobliv_revaluate_task(struct task_struct* t); +#define budget_state_machine(t, evt) \ + do { \ + if (get_budget_timer(t).ops && \ + get_budget_timer(t).ops->evt != NULL) { \ + get_budget_timer(t).ops->evt(t); \ + } \ + }while(0) + +#define budget_state_machine2(a, b, evt) \ + do { \ + if (get_budget_timer(a).ops && \ + get_budget_timer(b).ops && \ + get_budget_timer(a).ops->evt != NULL && \ + get_budget_timer(b).ops->evt != NULL) {\ + get_budget_timer(a).ops->evt(a, b); \ + } \ + }while(0) + void init_budget_tracker(struct budget_tracker* bt, const struct budget_tracker_ops* ops); diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 4fa705e65f0c..4e74101a5619 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -166,7 +166,7 @@ static inline void set_inh_task_linkback(struct task_struct* t, struct task_stru while(!success) { int b = find_first_zero_bit(&tsk_rt(linkto)->used_linkback_slots, - sizeof(tsk_rt(linkto)->used_linkback_slots)); + BITS_PER_BYTE*sizeof(tsk_rt(linkto)->used_linkback_slots)); BUG_ON(b > MAX_IDX); diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h index 7db4a32af734..f1477fb9dc33 100644 --- a/include/litmus/nvidia_info.h +++ b/include/litmus/nvidia_info.h @@ -45,6 +45,10 @@ long enable_gpu_owner(struct task_struct *t); /* call when the GPU-holding task, t, resumes */ long disable_gpu_owner(struct task_struct *t); +/* call when the GPU-holding task, t, had a priority change due to budget + * exhaustion */ +long recheck_gpu_owner(struct task_struct* t); + /* call when the GPU-holding task, t, increases its priority */ int gpu_owner_increase_priority(struct task_struct *t); diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 43a7e2126bf4..3f3aa240778f 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -427,7 +427,6 @@ struct rt_param { struct task_struct** inh_task_linkbacks; /* array. BITS_PER_LONG elements. */ unsigned long used_linkback_slots; - #ifdef CONFIG_REALTIME_AUX_TASKS unsigned int is_aux_task:1; unsigned int aux_ready:1; diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index d9e3a46129f4..82e62e8283e9 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h @@ -35,9 +35,6 @@ typedef struct task_struct* (*schedule_t)(struct task_struct * prev); */ typedef void (*finish_switch_t)(struct task_struct *prev); -/* trigger a reschedule of 't' if 't' is running. */ -typedef void (*check_schedule_t)(struct task_struct *t); - /********************* task state changes ********************/ /* Called to setup a new real-time task. @@ -132,8 +129,6 @@ struct sched_plugin { scheduler_tick_t tick; schedule_t schedule; finish_switch_t finish_switch; - check_schedule_t check_schedule; - /* syscall backend */ complete_job_t complete_job; diff --git a/litmus/bheap.c b/litmus/bheap.c index 45e1db36fa36..403c09cc9e81 100644 --- a/litmus/bheap.c +++ b/litmus/bheap.c @@ -21,6 +21,29 @@ void bheap_node_init(struct bheap_node** _h, void* value) } +static void __bheap_for_all(struct bheap_node *h, bheap_for_all_t fn, void* args) +{ + /* pre-order */ + fn(h, args); + + /* depth-first */ + if (h->child) + __bheap_for_all(h->child, fn, args); + if (h->next) + __bheap_for_all(h->next, fn, args); +} + +void bheap_for_all(struct bheap* heap, bheap_for_all_t fn, void* args) +{ + struct bheap_node *head; + + BUG_ON(!heap); + BUG_ON(!fn); + + head = heap->head; + __bheap_for_all(head, fn, args); +} + /* make child a subtree of root */ static void __bheap_link(struct bheap_node* root, struct bheap_node* child) diff --git a/litmus/budget.c b/litmus/budget.c index 779506abf119..718458925fb4 100644 --- a/litmus/budget.c +++ b/litmus/budget.c @@ -159,63 +159,112 @@ void simple_on_exit(struct task_struct* t) * DRAIN_SOBLIV */ -void sobliv_on_scheduled(struct task_struct* t) +void sobliv_on_blocked(struct task_struct* t) { - BUG_ON(!t); - - if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { - if (tsk_rt(t)->budget.timer.armed) - TRACE_TASK(t, "budget timer already armed.\n"); - else - arm_enforcement_timer(t); + if (bt_flag_is_set(t, BTF_IS_TOP_M)) { + if (tsk_rt(t)->budget.timer.armed) { + /* there is a fraction of time where we're double-counting the + * time tracked by the rq and suspension time. + * TODO: Do this recording closer to suspension time. */ + tsk_rt(t)->budget.suspend_timestamp = litmus_clock(); + + TRACE_TASK(t, "budget drains while suspended.\n"); + } + else { + TRACE_TASK(t, "budget timer not armed?\n"); + WARN_ON(1); + } } +} - if (tsk_rt(t)->inh_task) - BUG_ON(is_running(tsk_rt(t)->inh_task)); +void sobliv_on_wakeup(struct task_struct* t) +{ + if (bt_flag_is_set(t, BTF_IS_TOP_M)) { + /* we're waking up while in top-m. record the time spent + * suspended while draining in exec_cost. suspend_timestamp was + * either set when we entered top-m while asleep, or when we + * blocked. */ + lt_t suspend_cost; + BUG_ON(!tsk_rt(t)->budget.suspend_timestamp); + suspend_cost = litmus_clock() - tsk_rt(t)->budget.suspend_timestamp; + TRACE_TASK(t, "budget consumed while suspended: %llu\n", suspend_cost); + get_exec_time(t) += suspend_cost; + } } -void sobliv_on_blocked(struct task_struct* t) +void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh) { - /* NOOP */ - TRACE_TASK(t, "sobliv: budget drains while suspended.\n"); + /* TODO: Budget credit accounting. */ + + BUG_ON(!prio_inh); + TRACE_TASK(t, "called %s\n", __FUNCTION__); } -void sobliv_on_sleep(struct task_struct* t) +void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh) { - if (budget_precisely_tracked(t)) { - /* kludge. callback called before job_completion logic runs, so - * we need to do some logic of our own to figure out if there is a - * backlog after this job (it is completing since sleep is asserted) - * completes. */ - int no_backlog = (!has_backlog(t) || /* no backlog */ - /* the last backlogged job is completing */ - (get_backlog(t) == 1 && tsk_rt(t)->job_params.is_backlogged_job)); - if (no_backlog) - cancel_enforcement_timer(t); - else - TRACE_TASK(t, "not cancelling timer because there is time for backlogged work.\n"); - } + /* TODO: Budget credit accounting. */ + TRACE_TASK(t, "called %s\n", __FUNCTION__); } -void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh) +void sobliv_on_enter_top_m(struct task_struct* t) { -// BUG_ON(!prio_inh); -// -// if (budget_precisely_tracked(t)) { -// TRACE_TASK(t, "inheriting from %s/%d. stop draining own budget.\n", -// prio_inh->comm, prio_inh->pid); -// cancel_enforcement_timer(t); -// } + if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { + if (tsk_rt(t)->budget.timer.armed) + TRACE_TASK(t, "budget timer already armed.\n"); + else { + /* if we're blocked, then record the time at which we started measuring */ + if (!is_running(t)) + tsk_rt(t)->budget.suspend_timestamp = litmus_clock(); + + /* the callback will handle it if it is executing */ + if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) + arm_enforcement_timer(t); + else + TRACE_TASK(t, "within callback context. deferring timer arm.\n"); + } + } } -void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh) +void sobliv_on_exit_top_m(struct task_struct* t) { -// if (!prio_inh && budget_precisely_tracked(t)) { -// TRACE_TASK(t, "assuming base priority. start draining own budget.\n"); -// arm_enforcement_timer(t); -// } + if (budget_precisely_tracked(t)) { + if (tsk_rt(t)->budget.timer.armed) { + + if (!is_running(t)) { + /* the time at which we started draining budget while suspended + * is recorded in evt_timestamp. evt_timestamp was set either + * when 't' exited the top-m while suspended or when 't' + * blocked. */ + lt_t suspend_cost; + BUG_ON(!tsk_rt(t)->budget.suspend_timestamp); + suspend_cost = litmus_clock() - tsk_rt(t)->budget.suspend_timestamp; + TRACE_TASK(t, "budget consumed while suspended: %llu\n", suspend_cost); + get_exec_time(t) += suspend_cost; + + /* timer should have fired before now */ + if (get_exec_time(t) + 1000000/10 > get_exec_cost(t)) { + TRACE_TASK(t, "budget overrun while suspended by over 1/10 " + "millisecond! timer should have already fired!\n"); + WARN_ON(1); + } + } + + TRACE_TASK(t, "stops draining budget\n"); + /* the callback will handle it if it is executing */ + if (!hrtimer_callback_running(&tsk_rt(t)->budget.timer.timer)) { + /* TODO: record a timestamp if the task isn't running */ + cancel_enforcement_timer(t); + } + else + TRACE_TASK(t, "within callback context. skipping operation.\n"); + } + else { + TRACE_TASK(t, "was not draining budget\n"); + } + } } + void sobliv_revaluate_task(struct task_struct* t) { #ifdef CONFIG_LITMUS_NESTED_LOCKING @@ -256,18 +305,6 @@ void sobliv_revaluate_task(struct task_struct* t) /* TODO: If we hold an OMLP-family outmost lock, then we may * need to move a task into a fifo queue */ - - - -// /* anyone who inherits from me may need to be rescheduled */ -// linkback = tsk_rt(t)->inh_task_linkback; -// if (linkback) { -// /* TODO: IS THIS THREAD SAFE???? */ -// TRACE_TASK(t, "Checking if inheritor %s/%d needs to be rescheduled.\n", -// linkback->comm, -// linkback->pid); -// litmus->check_schedule(linkback); -// } } @@ -311,17 +348,11 @@ static enum hrtimer_restart __on_timeout(struct hrtimer *timer) void init_budget_tracker(struct budget_tracker* bt, const struct budget_tracker_ops* ops) { BUG_ON(!bt); - BUG_ON(!ops); - - BUG_ON(!ops->on_scheduled); - BUG_ON(!ops->on_blocked); - BUG_ON(!ops->on_preempt); - BUG_ON(!ops->on_sleep); - BUG_ON(!ops->on_exhausted); memset(bt, 0, sizeof(*bt)); raw_spin_lock_init(&bt->timer.lock); hrtimer_init(&bt->timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); bt->timer.timer.function = __on_timeout; bt->ops = ops; + INIT_BINHEAP_NODE(&bt->top_m_node); } diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c index e87e56542a23..c96a209231a2 100644 --- a/litmus/nvidia_info.c +++ b/litmus/nvidia_info.c @@ -785,7 +785,7 @@ long enable_gpu_owner(struct task_struct *t) BUG_ON(!is_realtime(t)); - gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); + gpu = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus)); if (binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { TRACE_CUR("task %s/%d is already active on GPU %d\n", t->comm, t->pid, gpu); @@ -853,7 +853,7 @@ long disable_gpu_owner(struct task_struct *t) BUG_ON(!is_realtime(t)); - gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); + gpu = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus)); if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { // TRACE_CUR("task %s/%d is not active on GPU %d\n", t->comm, t->pid, gpu); @@ -916,7 +916,14 @@ out: } - +long recheck_gpu_owner(struct task_struct* t) +{ + /* TODO: blend implementation of disable/enable */ + int retval = disable_gpu_owner(t); + if (!retval) + retval = enable_gpu_owner(t); + return retval +} @@ -940,7 +947,7 @@ int gpu_owner_increase_priority(struct task_struct *t) BUG_ON(!is_realtime(t)); BUG_ON(!tsk_rt(t)->held_gpus); - gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); + gpu = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus)); if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { TRACE_CUR("nv klmirqd may not inherit from %s/%d on GPU %d\n", @@ -1013,7 +1020,7 @@ int gpu_owner_decrease_priority(struct task_struct *t) BUG_ON(!is_realtime(t)); BUG_ON(!tsk_rt(t)->held_gpus); - gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); + gpu = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus)); if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { TRACE_CUR("nv klmirqd may not inherit from %s/%d on GPU %d\n", diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 4551fb851dbd..fc174c464a17 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c @@ -143,6 +143,11 @@ typedef struct clusterdomain { #ifdef CONFIG_LITMUS_DGL_SUPPORT raw_spinlock_t dgl_lock; #endif + + int top_m_size; + struct binheap top_m; + struct binheap not_top_m; + } cedf_domain_t; /* a cedf_domain per cluster; allocation is done at init/activation time */ @@ -164,6 +169,141 @@ static int num_gpu_clusters; static unsigned int gpu_cluster_size; #endif +inline static struct task_struct* binheap_node_to_task(struct binheap_node *bn) +{ + struct budget_tracker *bt = binheap_entry(bn, struct budget_tracker, top_m_node); + struct task_struct *t = + container_of( + container_of(bt, struct rt_param, budget), + struct task_struct, + rt_param); + return t; +} + +static int cedf_max_heap_base_priority_order(struct binheap_node *a, + struct binheap_node *b) +{ + struct task_struct* t_a = binheap_node_to_task(a); + struct task_struct* t_b = binheap_node_to_task(b); + return __edf_higher_prio(t_a, BASE, t_b, BASE); +} + +static int cedf_min_heap_base_priority_order(struct binheap_node *a, + struct binheap_node *b) +{ + struct task_struct* t_a = binheap_node_to_task(a); + struct task_struct* t_b = binheap_node_to_task(b); + return __edf_higher_prio(t_b, BASE, t_a, BASE); +} + +static void cedf_track_in_top_m(struct task_struct *t) +{ + /* cluster lock must be held */ + cedf_domain_t *cluster = task_cpu_cluster(t); + struct budget_tracker *bt; + struct task_struct *mth_highest; + + //BUG_ON(binheap_is_in_heap(&tsk_rt(t)->budget.top_m_node)); + if (binheap_is_in_heap(&tsk_rt(t)->budget.top_m_node)) { + TRACE_TASK(t, "apparently already being tracked. top-m?: %s\n", + (bt_flag_is_set(t, BTF_IS_TOP_M)) ? "Yes":"No"); + return; + } + + /* TODO: do cluster_size-1 if release master is in this cluster */ + if (cluster->top_m_size < cluster_size) { + TRACE_TASK(t, "unconditionally adding task to top-m.\n"); + binheap_add(&tsk_rt(t)->budget.top_m_node, &cluster->top_m, + struct budget_tracker, top_m_node); + ++cluster->top_m_size; + bt_flag_set(t, BTF_IS_TOP_M); + budget_state_machine(t,on_enter_top_m); + + return; + } + bt = binheap_top_entry(&cluster->top_m, struct budget_tracker, top_m_node); + mth_highest = + container_of( + container_of(bt, struct rt_param, budget), + struct task_struct, + rt_param); + + if (__edf_higher_prio(t, BASE, mth_highest, BASE)) { + + TRACE_TASK(t, "adding to top-m (evicting %s/%d)\n", + mth_highest->comm, mth_highest->pid); + + binheap_delete_root(&cluster->top_m, struct budget_tracker, top_m_node); + INIT_BINHEAP_NODE(&tsk_rt(mth_highest)->budget.top_m_node); + binheap_add(&tsk_rt(mth_highest)->budget.top_m_node, + &cluster->not_top_m, + struct budget_tracker, top_m_node); + budget_state_machine(mth_highest,on_exit_top_m); + bt_flag_clear(mth_highest, BTF_IS_TOP_M); + + binheap_add(&tsk_rt(t)->budget.top_m_node, &cluster->top_m, + struct budget_tracker, top_m_node); + bt_flag_set(t, BTF_IS_TOP_M); + budget_state_machine(t,on_enter_top_m); + } + else { + TRACE_TASK(t, "adding to not-top-m\n"); + binheap_add(&tsk_rt(t)->budget.top_m_node, + &cluster->not_top_m, + struct budget_tracker, top_m_node); + } +} + +static void cedf_untrack_in_top_m(struct task_struct *t) +{ + /* cluster lock must be held */ + cedf_domain_t *cluster = task_cpu_cluster(t); + + if (!binheap_is_in_heap(&tsk_rt(t)->budget.top_m_node)) { + TRACE_TASK(t, "is not being tracked\n"); /* BUG() on this case? */ + return; + } + + if (bt_flag_is_set(t, BTF_IS_TOP_M)) { + + TRACE_TASK(t, "removing task from top-m\n"); + + /* delete t's entry */ + binheap_delete(&tsk_rt(t)->budget.top_m_node, &cluster->top_m); + budget_state_machine(t,on_exit_top_m); + bt_flag_clear(t, BTF_IS_TOP_M); + + /* move a task over from the overflow heap */ + if(!binheap_empty(&cluster->not_top_m)) { + struct budget_tracker *bt = + binheap_top_entry(&cluster->not_top_m, struct budget_tracker, top_m_node); + struct task_struct *to_move = + container_of( + container_of(bt, struct rt_param, budget), + struct task_struct, + rt_param); + + TRACE_TASK(to_move, "being promoted to top-m\n"); + + binheap_delete_root(&cluster->not_top_m, struct budget_tracker, top_m_node); + INIT_BINHEAP_NODE(&tsk_rt(to_move)->budget.top_m_node); + + binheap_add(&tsk_rt(to_move)->budget.top_m_node, + &cluster->top_m, + struct budget_tracker, top_m_node); + bt_flag_set(to_move, BTF_IS_TOP_M); + budget_state_machine(t,on_enter_top_m); + } + else { + --cluster->top_m_size; + } + } + else { + TRACE_TASK(t, "removing task from not-top-m\n"); + binheap_delete(&tsk_rt(t)->budget.top_m_node, &cluster->not_top_m); + } +} + #ifdef CONFIG_LITMUS_DGL_SUPPORT static raw_spinlock_t* cedf_get_dgl_spinlock(struct task_struct *t) @@ -230,6 +370,11 @@ static noinline void link_task_to_cpu(struct task_struct* linked, /* Currently linked task is set to be unlinked. */ if (entry->linked) { entry->linked->rt_param.linked_on = NO_CPU; + +#ifdef CONFIG_LITMUS_LOCKING + if (tsk_rt(entry->linked)->inh_task) + clear_inh_task_linkback(entry->linked, tsk_rt(entry->linked)->inh_task); +#endif } /* Link new task to CPU. */ @@ -258,8 +403,14 @@ static noinline void link_task_to_cpu(struct task_struct* linked, linked = tmp; } } - if (linked) /* might be NULL due to swap */ + if (linked) { /* might be NULL due to swap */ linked->rt_param.linked_on = entry->cpu; + +#ifdef CONFIG_LITMUS_LOCKING + if (tsk_rt(linked)->inh_task) + set_inh_task_linkback(linked, tsk_rt(linked)->inh_task); +#endif + } } entry->linked = linked; #ifdef WANT_ALL_SCHED_EVENTS @@ -397,6 +548,14 @@ static noinline void cedf_job_arrival(struct task_struct* task) check_for_preemptions(cluster); } +static void cedf_track_on_release(struct bheap_node* n, void* dummy) +{ + struct task_struct* t = bheap2task(n); + TRACE_TASK(t, "released\n"); + + cedf_track_in_top_m(t); +} + static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) { cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); @@ -404,6 +563,8 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) raw_spin_lock_irqsave(&cluster->cluster_lock, flags); + bheap_for_all(tasks, cedf_track_on_release, NULL); + __merge_ready(&cluster->domain, tasks); check_for_preemptions(cluster); @@ -447,6 +608,7 @@ static noinline void job_completion(struct task_struct *t, int forced) } + /* SETUP FOR THE NEXT JOB */ sched_trace_task_completion(t, forced); @@ -466,6 +628,7 @@ static noinline void job_completion(struct task_struct *t, int forced) */ } else { + cedf_untrack_in_top_m(t); prepare_for_next_period(t); if (do_backlogged_job) { @@ -496,6 +659,7 @@ static noinline void job_completion(struct task_struct *t, int forced) } if (do_release || do_backlogged_job) { + cedf_track_in_top_m(t); cedf_job_arrival(t); } else { @@ -504,7 +668,9 @@ static noinline void job_completion(struct task_struct *t, int forced) } else { BUG_ON(!forced); + /* budget was refreshed and job early released */ TRACE_TASK(t, "job exhausted budget while sleeping\n"); + cedf_track_in_top_m(t); } } @@ -536,26 +702,6 @@ static enum hrtimer_restart cedf_simple_on_exhausted(struct task_struct *t) return HRTIMER_NORESTART; } -static void cedf_check_schedule(struct task_struct* t) -{ - int cpu; - - cpu = (tsk_rt(t)->linked_on != NO_CPU) ? - tsk_rt(t)->linked_on : tsk_rt(t)->scheduled_on; - if (cpu == smp_processor_id()) { - TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n"); - litmus_reschedule_local(); - set_will_schedule(); - } - else if (cpu != NO_CPU) { - TRACE_TASK(t, "is preemptable on remote cpu (%d) => FORCE_RESCHED\n", cpu); - litmus_reschedule(cpu); - } - else { - TRACE_TASK(t, "is not running, so no rescheduling necessary.\n"); - } -} - static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) { enum hrtimer_restart restart = HRTIMER_NORESTART; @@ -598,7 +744,6 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) else { lt_t remaining; cedf_domain_t* cluster = task_cpu_cluster(t); - int do_prio_reeval = 0; unsigned long flags; BUG_ON(cpu != NO_CPU); @@ -611,47 +756,74 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) TRACE_TASK(t, "blocked, postponing deadline\n"); raw_spin_lock_irqsave(&cluster->cluster_lock, flags); - job_completion(t, 1); /* refreshes budget */ + job_completion(t, 1); /* refreshes budget and pushes out deadline */ #ifdef CONFIG_LITMUS_LOCKING - /* Decrease in base-priority is masked by inheritance, so - * we do not need to recheck any prior scheduling decisions - * or established inheritance relations. */ - do_prio_reeval = (tsk_rt(t)->inh_task == NULL); - - /* drop the lock to make prio propagation easy... may need to - * do this all within cluster lock if there are races... */ - raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); - - if (do_prio_reeval) - sobliv_revaluate_task(t); - else - TRACE_TASK(t, "skipping reevaluation since inheritance " - "masks change in base-priority.\n"); - - - /* push any changed state... */ - if (do_prio_reeval && tsk_rt(t)->used_linkback_slots) { + { int i; - raw_spin_lock_irqsave(&cluster->cluster_lock, flags); - /* any running task that inherits from t may need to be rescheduled */ - for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots, - sizeof(tsk_rt(t)->used_linkback_slots)); + /* any linked task that inherits from 't' needs to have their + * cpu-position re-evaluated. we have to do this in two passes. + * pass 1: remove nodes from heap s.t. heap is in known good state. + * pass 2: re-add nodes. + * + */ + for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots)); + i < BITS_PER_LONG; + i = find_next_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots), i+1)) + { + struct task_struct *to_update = tsk_rt(t)->inh_task_linkbacks[i]; + BUG_ON(!to_update); + if (tsk_rt(to_update)->linked_on != NO_CPU) { + cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, tsk_rt(to_update)->linked_on); + BUG_ON(!binheap_is_in_heap(&entry->hn)); + binheap_delete(&entry->hn, &cluster->cpu_heap); + } + } + for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots)); i < BITS_PER_LONG; - i = find_next_bit(&tsk_rt(t)->used_linkback_slots, - sizeof(tsk_rt(t)->used_linkback_slots), i+1)) { - cedf_check_schedule(tsk_rt(t)->inh_task_linkbacks[i]); + i = find_next_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots), i+1)) + { + struct task_struct *to_update = tsk_rt(t)->inh_task_linkbacks[i]; + BUG_ON(!to_update); + if (tsk_rt(to_update)->linked_on != NO_CPU) { + cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, tsk_rt(to_update)->linked_on); + binheap_add(&entry->hn, &cluster->cpu_heap, cpu_entry_t, hn); + } } - raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); } #endif + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); - hrtimer_forward_now(&get_budget_timer(t).timer.timer, - ns_to_ktime(budget_remaining(t))); - remaining = hrtimer_get_expires_ns(&get_budget_timer(t).timer.timer); +#ifdef CONFIG_LITMUS_LOCKING + /* Check our inheritance and propagate any changes forward. */ + sobliv_revaluate_task(t); +#endif + /* No need to recheck priority of AUX tasks. They will always + * inherit from 't' if they are enabled. Their prio change was + * captured by the cpu-heap operations above. */ - TRACE_TASK(t, "rearmed timer to %ld\n", remaining); - restart = HRTIMER_RESTART; +#ifdef CONFIG_LITMUS_NVIDIA + /* Re-eval priority of GPU interrupt threads. */ + if(tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) + recheck_gpu_owner(t); +#endif + +#ifdef CONFIG_LITMUS_LOCKING + /* double-check that everything is okay */ + raw_spin_lock_irqsave(&cluster->cluster_lock, flags); + check_for_preemptions(cluster); + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); +#endif + + /* we need to set up the budget timer since we're within the callback. */ + if (bt_flag_is_set(t, BTF_IS_TOP_M)) { + hrtimer_forward_now(&get_budget_timer(t).timer.timer, + ns_to_ktime(budget_remaining(t))); + remaining = hrtimer_get_expires_ns(&get_budget_timer(t).timer.timer); + + TRACE_TASK(t, "rearmed timer to %ld\n", remaining); + restart = HRTIMER_RESTART; + } } } } @@ -672,7 +844,7 @@ static void cedf_tick(struct task_struct* t) tsk_rt(t)->budget.ops && budget_quantum_tracked(t) && budget_exhausted(t)) { TRACE_TASK(t, "budget exhausted\n"); - tsk_rt(t)->budget.ops->on_exhausted(t); + budget_state_machine(t,on_exhausted); } } @@ -1057,14 +1229,12 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) #endif /* Do budget stuff */ - if (tsk_rt(prev)->budget.ops) { - if (blocks) - tsk_rt(prev)->budget.ops->on_blocked(prev); - else if (sleep) - tsk_rt(prev)->budget.ops->on_sleep(prev); - else if (preempt) - tsk_rt(prev)->budget.ops->on_preempt(prev); - } + if (blocks) + budget_state_machine(prev,on_blocked); + else if (sleep) + budget_state_machine(prev,on_sleep); + else if (preempt) + budget_state_machine(prev,on_preempt); /* If a task blocks we have no choice but to reschedule. */ @@ -1137,24 +1307,24 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) out_set_state: #endif -#ifdef CONFIG_LITMUS_LOCKING - /* Update priority inheritance linkbacks. - * A blocked task may have multiple tasks that inherit from it, but only - * one of those tasks should be runnable. Provide a link-back between the - * blocked task and the one that inherits from it. */ - - /* TODO: Support klmirqd and aux tasks */ - /* TODO: MOVE THESE CALLS TO __increase AND __decrease TO CATCH ALL CASES. - PAY ATTENTION TO RUN-STATE OF INHERITOR & INHERITEE */ - if (next != prev) { - if (prev && tsk_rt(prev)->inh_task) { - clear_inh_task_linkback(prev, tsk_rt(prev)->inh_task); - } - if (next && tsk_rt(next)->inh_task) { - set_inh_task_linkback(next, tsk_rt(next)->inh_task); - } - } -#endif +//#ifdef CONFIG_LITMUS_LOCKING +// /* Update priority inheritance linkbacks. +// * A blocked task may have multiple tasks that inherit from it, but only +// * one of those tasks should be runnable. Provide a link-back between the +// * blocked task and the one that inherits from it. */ +// +// /* TODO: Support klmirqd and aux tasks */ +// /* TODO: MOVE THESE CALLS TO __increase AND __decrease TO CATCH ALL CASES. +// PAY ATTENTION TO RUN-STATE OF INHERITOR & INHERITEE */ +// if (next != prev) { +// if (prev && tsk_rt(prev)->inh_task) { +// clear_inh_task_linkback(prev, tsk_rt(prev)->inh_task); +// } +// if (next && tsk_rt(next)->inh_task) { +// set_inh_task_linkback(next, tsk_rt(next)->inh_task); +// } +// } +//#endif sched_state_task_picked(); raw_spin_unlock(&cluster->cluster_lock); @@ -1226,50 +1396,53 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) } if (is_running(t)) { + cedf_track_in_top_m(t); cedf_job_arrival(t); } raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); } -static void cedf_task_wake_up(struct task_struct *task) +static void cedf_task_wake_up(struct task_struct *t) { unsigned long flags; cedf_domain_t *cluster; lt_t now; - cluster = task_cpu_cluster(task); + cluster = task_cpu_cluster(t); raw_spin_lock_irqsave(&cluster->cluster_lock, flags); now = litmus_clock(); - TRACE_TASK(task, "wake_up at %llu\n", now); + TRACE_TASK(t, "wake_up at %llu\n", now); - if (is_sporadic(task) && is_tardy(task, now)) { - release_at(task, now); - sched_trace_task_release(task); + if (is_sporadic(t) && is_tardy(t, now)) { + release_at(t, now); + sched_trace_task_release(t); } else { /* periodic task model. don't force job to end. * rely on user to say when jobs complete or when budget expires. */ - tsk_rt(task)->completed = 0; + tsk_rt(t)->completed = 0; } #ifdef CONFIG_REALTIME_AUX_TASKS - if (tsk_rt(task)->has_aux_tasks && !tsk_rt(task)->hide_from_aux_tasks) { - TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); - disable_aux_task_owner(task); + if (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks) { + TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", t->comm, t->pid); + disable_aux_task_owner(t); } #endif #ifdef CONFIG_LITMUS_NVIDIA - if (tsk_rt(task)->held_gpus && !tsk_rt(task)->hide_from_gpu) { - TRACE_CUR("%s/%d is ready so gpu klmirqd tasks may not inherit.\n", task->comm, task->pid); - disable_gpu_owner(task); + if (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) { + TRACE_CUR("%s/%d is ready so gpu klmirqd tasks may not inherit.\n", t->comm, t->pid); + disable_gpu_owner(t); } #endif - cedf_job_arrival(task); + budget_state_machine(t,on_wakeup); + cedf_job_arrival(t); + raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); } @@ -1321,9 +1494,15 @@ static void cedf_task_exit(struct task_struct * t) /* unlink if necessary */ raw_spin_lock_irqsave(&cluster->cluster_lock, flags); + if (tsk_rt(t)->inh_task) { + WARN_ON(1); + clear_inh_task_linkback(t, tsk_rt(t)->inh_task); + } + /* disable budget enforcement */ + cedf_untrack_in_top_m(t); if (tsk_rt(t)->budget.ops) - tsk_rt(t)->budget.ops->on_exit(t); + budget_state_machine(t,on_exit); #ifdef CONFIG_REALTIME_AUX_TASKS /* make sure we clean up on our way out */ @@ -1368,40 +1547,48 @@ static struct budget_tracker_ops cedf_drain_simple_ops = .on_sleep = simple_on_sleep, .on_exit = simple_on_exit, - .on_exhausted = cedf_simple_on_exhausted, - + .on_wakeup = NULL, .on_inherit = NULL, .on_disinherit = NULL, + .on_enter_top_m = NULL, + .on_exit_top_m = NULL, + + .on_exhausted = cedf_simple_on_exhausted, }; static struct budget_tracker_ops cedf_drain_sobliv_ops = { - .on_scheduled = sobliv_on_scheduled, + .on_scheduled = NULL, + .on_preempt = NULL, + .on_sleep = NULL, + .on_blocked = sobliv_on_blocked, - .on_preempt = sobliv_on_preempt, - .on_sleep = sobliv_on_sleep, + .on_wakeup = sobliv_on_wakeup, .on_exit = sobliv_on_exit, - - .on_exhausted = cedf_sobliv_on_exhausted, - .on_inherit = sobliv_on_inherit, .on_disinherit = sobliv_on_disinherit, + .on_enter_top_m = sobliv_on_enter_top_m, + .on_exit_top_m = sobliv_on_exit_top_m, + + .on_exhausted = cedf_sobliv_on_exhausted, }; static long cedf_admit_task(struct task_struct* tsk) { + struct budget_tracker_ops* ops = NULL; + if (remote_cluster(task_cpu(tsk)) != task_cpu_cluster(tsk)) return -EINVAL; if (budget_enforced(tsk) || budget_signalled(tsk)) { switch(get_drain_policy(tsk)) { case DRAIN_SIMPLE: - init_budget_tracker(&tsk_rt(tsk)->budget, &cedf_drain_simple_ops); + ops = &cedf_drain_simple_ops; break; case DRAIN_SOBLIV: /* budget_policy and budget_signal_policy cannot be quantum-based */ if (!budget_quantum_tracked(tsk) && budget_precisely_tracked(tsk)) { - init_budget_tracker(&tsk_rt(tsk)->budget, &cedf_drain_sobliv_ops); + ops = &cedf_drain_sobliv_ops; } else { TRACE_TASK(tsk, "QUANTUM_ENFORCEMENT and QUANTUM_SIGNALS is " @@ -1415,6 +1602,8 @@ static long cedf_admit_task(struct task_struct* tsk) } } + init_budget_tracker(&tsk_rt(tsk)->budget, ops); + #ifdef CONFIG_LITMUS_NESTED_LOCKING INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, edf_max_heap_base_priority_order); @@ -1491,24 +1680,18 @@ static int __increase_priority_inheritance(struct task_struct* t, sched_trace_eff_prio_change(t, prio_inh); /* clear out old inheritance relation */ - if (NULL != old_prio_inh && - NULL != get_budget_timer(t).ops->on_disinherit && - NULL != get_budget_timer(old_prio_inh).ops->on_disinherit) { - get_budget_timer(t).ops->on_disinherit(t, old_prio_inh); - } - if (old_prio_inh) + if (old_prio_inh) { + budget_state_machine2(t,old_prio_inh,on_disinherit); clear_inh_task_linkback(t, old_prio_inh); + } TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); tsk_rt(t)->inh_task = prio_inh; /* update inheritance relation */ - if (prio_inh && - NULL != get_budget_timer(t).ops->on_inherit && - NULL != get_budget_timer(prio_inh).ops->on_inherit) { - get_budget_timer(t).ops->on_inherit(t, prio_inh); - } + if (prio_inh) + budget_state_machine2(t,prio_inh,on_inherit); linked_on = tsk_rt(t)->linked_on; @@ -1582,8 +1765,14 @@ static int __increase_priority_inheritance(struct task_struct* t, #ifdef CONFIG_LITMUS_NESTED_LOCKING } else { + /* Occurance is okay under two scenarios: + * 1. Fine-grain nested locks (no compiled DGL support): Concurrent + * updates are chasing each other through the wait-for chain. + * 2. Budget exhausion caused the HP waiter to loose its priority, but + * the lock structure hasn't yet been updated (but soon will be). + */ TRACE_TASK(t, "Spurious invalid priority increase. " - "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d\n" + "Inheritance request: %s/%d [eff_prio = %s/%d] to inherit from %s/%d" "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", t->comm, t->pid, effective_priority(t)->comm, effective_priority(t)->pid, @@ -1614,9 +1803,9 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) if(tsk_rt(t)->held_gpus) { int i; - for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); + for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus)); i < NV_DEVICE_NUM; - i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { + i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) { pai_check_priority_increase(t, i); } } @@ -1688,13 +1877,10 @@ static int __decrease_priority_inheritance(struct task_struct* t, } /* clear out old inheritance relation */ - if (NULL != old_prio_inh && - NULL != get_budget_timer(t).ops->on_disinherit && - NULL != get_budget_timer(old_prio_inh).ops->on_disinherit) { - get_budget_timer(t).ops->on_disinherit(t, old_prio_inh); - } - if (old_prio_inh) + if (old_prio_inh) { + budget_state_machine2(t,old_prio_inh,on_disinherit); clear_inh_task_linkback(t, old_prio_inh); + } /* A job only stops inheriting a priority when it releases a * resource. Thus we can make the following assumption.*/ @@ -1707,11 +1893,8 @@ static int __decrease_priority_inheritance(struct task_struct* t, /* set up new inheritance relation */ tsk_rt(t)->inh_task = prio_inh; - if (prio_inh && - NULL != get_budget_timer(t).ops->on_inherit && - NULL != get_budget_timer(prio_inh).ops->on_inherit) { - get_budget_timer(t).ops->on_inherit(t, prio_inh); - } + if (prio_inh) + budget_state_machine2(t,prio_inh,on_inherit); if(tsk_rt(t)->scheduled_on != NO_CPU) { TRACE_TASK(t, "is scheduled.\n"); @@ -1792,9 +1975,9 @@ static void decrease_priority_inheritance(struct task_struct* t, #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) if(tsk_rt(t)->held_gpus) { int i; - for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); + for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus)); i < NV_DEVICE_NUM; - i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { + i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) { pai_check_priority_decrease(t, i); } } @@ -2255,6 +2438,10 @@ static long cedf_activate_plugin(void) raw_spin_lock_init(&cedf[i].dgl_lock); #endif + cedf[i].top_m_size = 0; + INIT_BINHEAP_HANDLE(&cedf[i].top_m, cedf_min_heap_base_priority_order); + INIT_BINHEAP_HANDLE(&cedf[i].not_top_m, cedf_max_heap_base_priority_order); + for_each_online_cpu(cpu) { /* check if the cpu is already in a cluster */ for (j = 0; j < num_clusters; j++) @@ -2320,7 +2507,6 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { .plugin_name = "C-EDF", .finish_switch = cedf_finish_switch, .tick = cedf_tick, - .check_schedule = cedf_check_schedule, .task_new = cedf_task_new, .complete_job = complete_job, .task_exit = cedf_task_exit, diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 0756aaddb390..ab97d59c9587 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c @@ -1284,9 +1284,9 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) if(tsk_rt(t)->held_gpus) { int i; - for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); + for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus)); i < NV_DEVICE_NUM; - i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { + i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) { pai_check_priority_increase(t, i); } } @@ -1394,9 +1394,9 @@ static void decrease_priority_inheritance(struct task_struct* t, #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) if(tsk_rt(t)->held_gpus) { int i; - for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); + for(i = find_first_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus)); i < NV_DEVICE_NUM; - i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { + i = find_next_bit(&tsk_rt(t)->held_gpus, BITS_PER_BYTE*sizeof(tsk_rt(t)->held_gpus), i+1)) { pai_check_priority_decrease(t, i); } } diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 60b58bb29ac4..eadd4fb8e5a4 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c @@ -151,8 +151,8 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) next->rt_param.stack_in_use = rq->cpu; next->se.exec_start = rq->clock; - if (is_realtime(next) && tsk_rt(next)->budget.ops) - tsk_rt(next)->budget.ops->on_scheduled(next); + if (is_realtime(next)) + budget_state_machine(next,on_scheduled); } return next; diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index 74bf6b1d2ce4..0d6cb534be8b 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c @@ -92,10 +92,6 @@ static void litmus_dummy_tick(struct task_struct* tsk) { } -static void litmus_dummy_check_schedule(struct task_struct* tsk) -{ -} - static long litmus_dummy_admit_task(struct task_struct* tsk) { printk(KERN_CRIT "LITMUS^RT: Linux plugin rejects %s/%d.\n", @@ -247,7 +243,6 @@ struct sched_plugin linux_sched_plugin = { .complete_job = litmus_dummy_complete_job, .schedule = litmus_dummy_schedule, .finish_switch = litmus_dummy_finish_switch, - .check_schedule = litmus_dummy_check_schedule, .activate_plugin = litmus_dummy_activate_plugin, .deactivate_plugin = litmus_dummy_deactivate_plugin, .compare = litmus_dummy_compare, @@ -305,7 +300,6 @@ int register_sched_plugin(struct sched_plugin* plugin) CHECK(finish_switch); CHECK(schedule); CHECK(tick); - CHECK(check_schedule); CHECK(task_wake_up); CHECK(task_exit); CHECK(task_block); -- cgit v1.2.2