#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <linux/signal.h>
#include <litmus/litmus.h>
#include <litmus/preempt.h>
#include <litmus/sched_plugin.h>
#include <litmus/budget.h>
#include <litmus/signal.h>
inline static void cancel_enforcement_timer(struct task_struct* t)
{
struct enforcement_timer* et;
int ret;
unsigned long flags;
BUG_ON(!t);
BUG_ON(!is_realtime(t));
et = &tsk_rt(t)->budget.timer;
TRACE("cancelling enforcement timer.\n");
if (et->armed) {
raw_spin_lock_irqsave(&et->lock, flags);
if (et->armed) {
ret = hrtimer_try_to_cancel(&et->timer);
et->armed = 0;
}
else {
TRACE("timer was not armed (race).\n");
}
raw_spin_unlock_irqrestore(&et->lock, flags);
}
else {
TRACE("timer was not armed.\n");
}
}
inline static void arm_enforcement_timer(struct task_struct* t)
{
struct enforcement_timer* et;
lt_t when_to_fire, remaining_budget;
lt_t now;
unsigned long flags;
BUG_ON(!t);
BUG_ON(!is_realtime(t));
et = &tsk_rt(t)->budget.timer;
if (et->armed) {
TRACE_TASK(t, "timer already armed!\n");
return;
}
/* Calling this when there is no budget left for the task
* makes no sense, unless the task is non-preemptive. */
if (budget_exhausted(t)) {
TRACE_TASK(t, "can't arm timer because no budget remaining\n");
return;
}
if ( (!budget_enforced(t) ||
(budget_enforced(t) && bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)))
&&
(!budget_signalled(t) ||
(budget_signalled(t) && bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)))) {
TRACE_TASK(t, "trying to arm timer when budget has already been exhausted.\n");
return;
}
TRACE_TASK(t, "arming enforcement timer.\n");
/* __hrtimer_start_range_ns() cancels the timer
* anyway, so we don't have to check whether it is still armed */
raw_spin_lock_irqsave(&et->lock, flags);
if (et->armed) {
TRACE_TASK(t, "timer already armed (race)!\n");
goto out;
}
now = litmus_clock();
remaining_budget = budget_remaining(t);
when_to_fire = now + remaining_budget;
TRACE_TASK(t, "budget remaining: %ld, when_to_fire: %ld\n", remaining_budget, when_to_fire);
__hrtimer_start_range_ns(&et->timer,
ns_to_ktime(when_to_fire),
0 /* delta */,
HRTIMER_MODE_ABS_PINNED, // TODO: need to use non-pinned?
0 /* no wakeup */);
et->armed = 1;
out:
raw_spin_unlock_irqrestore(&et->lock, flags);
}
void send_sigbudget(struct task_struct* t)
{
if (!bt_flag_test_and_set(t, BTF_SIG_BUDGET_SENT)) {
/* signal has not yet been sent and we are responsible for sending
* since we just set the sent-bit when it was previously 0. */
TRACE_TASK(t, "SIG_BUDGET being sent!\n");
send_sig(SIG_BUDGET, t, 1); /* '1' denotes signal sent from kernel */
}
}
/*
* DRAIN_SIMPLE
*/
void simple_on_scheduled(struct task_struct* t)
{
BUG_ON(!t);
if (budget_precisely_tracked(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
BUG_ON(tsk_rt(t)->budget.timer.armed);
arm_enforcement_timer(t);
}
}
inline static void __simple_on_unscheduled(struct task_struct* t)
{
BUG_ON(!t);
if (budget_precisely_tracked(t)) {
cancel_enforcement_timer(t);
}
}
void simple_on_blocked(struct task_struct* t)
{
__simple_on_unscheduled(t);
}
void simple_on_preempt(struct task_struct* t)
{
__simple_on_unscheduled(t);
}
void simple_on_sleep(struct task_struct* t)
{
__simple_on_unscheduled(t);
}
void simple_on_exit(struct task_struct* t)
{
__simple_on_unscheduled(t);
}
/*
* DRAIN_SOBLIV
*/
void sobliv_on_scheduled(struct task_struct* t)
{
BUG_ON(!t);
if (!bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
if (tsk_rt(t)->budget.timer.armed) {
TRACE_TASK(t, "budget timer already armed.\n");
}
else {
arm_enforcement_timer(t);
}
}
if (tsk_rt(t)->inh_task) {
BUG_ON(is_running(tsk_rt(t)->inh_task));
tsk_rt(tsk_rt(t)->inh_task)->inh_task_linkback = t;
}
}
void sobliv_on_blocked(struct task_struct* t)
{
/* NOOP */
TRACE_TASK(t, "sobliv: budget drains while suspended.\n");
}
void sobliv_on_sleep(struct task_struct* t)
{
if (budget_precisely_tracked(t)) {
/* kludge. callback called before job_completion logic runs, so
* we need to do some logic of our own to figure out if there is a
* backlog after this job (it is completing since sleep is asserted)
* completes. */
int no_backlog = (!has_backlog(t) || /* no backlog */
/* the last backlogged job is completing */
(get_backlog(t) == 1 && tsk_rt(t)->job_params.is_backlogged_job));
if (no_backlog)
cancel_enforcement_timer(t);
else
TRACE_TASK(t, "not cancelling timer because there is time for backlogged work.\n");
}
}
void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh)
{
BUG_ON(!prio_inh);
if (budget_precisely_tracked(t)) {
TRACE_TASK(t, "inheriting from %s/%d. stop draining own budget.\n",
prio_inh->comm, prio_inh->pid);
cancel_enforcement_timer(t);
}
}
void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh)
{
if (!prio_inh && budget_precisely_tracked(t)) {
TRACE_TASK(t, "assuming base priority. start draining own budget.\n");
arm_enforcement_timer(t);
}
}
static enum hrtimer_restart __on_timeout(struct hrtimer *timer)
{
enum hrtimer_restart restart;
unsigned long flags;
struct budget_tracker* bt =
container_of(
container_of(timer,
struct enforcement_timer,
timer),
struct budget_tracker,
timer);
struct task_struct* t =
container_of(
container_of(bt, struct rt_param, budget),
struct task_struct,
rt_param);
TRACE_TASK(t, "budget timer interrupt fired at time %lu\n", litmus_clock());
raw_spin_lock_irqsave(&bt->timer.lock, flags);
tsk_rt(t)->budget.timer.armed = 0;
raw_spin_unlock_irqrestore(&bt->timer.lock, flags);
restart = bt->ops->on_exhausted(t);
raw_spin_lock_irqsave(&bt->timer.lock, flags);
tsk_rt(t)->budget.timer.armed = (restart == HRTIMER_RESTART);
raw_spin_unlock_irqrestore(&bt->timer.lock, flags);
return restart;
}
void init_budget_tracker(struct budget_tracker* bt, const struct budget_tracker_ops* ops)
{
BUG_ON(!bt);
BUG_ON(!ops);
BUG_ON(!ops->on_scheduled);
BUG_ON(!ops->on_blocked);
BUG_ON(!ops->on_preempt);
BUG_ON(!ops->on_sleep);
BUG_ON(!ops->on_exhausted);
memset(bt, 0, sizeof(*bt));
raw_spin_lock_init(&bt->timer.lock);
hrtimer_init(&bt->timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
bt->timer.timer.function = __on_timeout;
bt->ops = ops;
}