1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
|
#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
#include <litmus/litmus.h>
#include <litmus/preempt.h>
#include <litmus/budget.h>
#include <litmus/sched_trace.h>
struct enforcement_timer {
/* The enforcement timer is used to accurately police
* slice budgets. */
struct hrtimer timer;
int armed;
};
DEFINE_PER_CPU(struct enforcement_timer, budget_timer);
static enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer)
{
struct enforcement_timer* et = container_of(timer,
struct enforcement_timer,
timer);
unsigned long flags;
local_irq_save(flags);
TRACE("enforcement timer fired.\n");
et->armed = 0;
/* activate scheduler */
litmus_reschedule_local();
local_irq_restore(flags);
return HRTIMER_NORESTART;
}
/* assumes called with IRQs off */
static void cancel_enforcement_timer(struct enforcement_timer* et)
{
int ret;
TRACE("cancelling enforcement timer.\n");
/* Since interrupts are disabled and et->armed is only
* modified locally, we do not need any locks.
*/
if (et->armed) {
ret = hrtimer_try_to_cancel(&et->timer);
/* Should never be inactive. */
BUG_ON(ret == 0);
/* Should never be running concurrently. */
BUG_ON(ret == -1);
et->armed = 0;
}
}
/* assumes called with IRQs off */
static void arm_enforcement_timer(struct enforcement_timer* et,
struct task_struct* t)
{
lt_t when_to_fire;
TRACE_TASK(t, "arming enforcement timer.\n");
/* Calling this when there is no budget left for the task
* makes no sense, unless the task is non-preemptive. */
/* BUG_ON(budget_exhausted(t) && (!is_np(t))); */
/* __hrtimer_start_range_ns() cancels the timer
* anyway, so we don't have to check whether it is still armed */
if (likely(!is_np(t))) {
when_to_fire = litmus_clock() + budget_remaining(t);
__hrtimer_start_range_ns(&et->timer,
ns_to_ktime(when_to_fire),
0 /* delta */,
HRTIMER_MODE_ABS_PINNED,
0 /* no wakeup */);
et->armed = 1;
}
}
/* expects to be called with IRQs off */
void update_enforcement_timer(struct task_struct* t)
{
struct enforcement_timer* et = &__get_cpu_var(budget_timer);
if (t && budget_precisely_enforced(t)) {
/* Make sure we call into the scheduler when this budget
* expires. */
arm_enforcement_timer(et, t);
} else if (et->armed) {
/* Make sure we don't cause unnecessary interrupts. */
cancel_enforcement_timer(et);
}
}
static int __init init_budget_enforcement(void)
{
int cpu;
struct enforcement_timer* et;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
et = &per_cpu(budget_timer, cpu);
hrtimer_init(&et->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
et->timer.function = on_enforcement_timeout;
}
return 0;
}
void task_release(struct task_struct *t)
{
t->rt_param.job_params.real_release = t->rt_param.job_params.real_deadline;
t->rt_param.job_params.real_deadline += get_rt_period(t);
t->rt_param.job_params.job_no++;
TRACE_TASK(t, "Releasing task, rr=%llu rd=%llu\n",
t->rt_param.job_params.real_release,
t->rt_param.job_params.real_deadline);
sched_trace_task_release(t);
}
void server_release(struct task_struct *t)
{
lt_t now = litmus_clock();
t->rt_param.job_params.exec_time = 0;
t->rt_param.job_params.release = t->rt_param.job_params.deadline;
t->rt_param.job_params.deadline += get_rt_period(t);
TRACE_TASK(t, "Releasing server, r=%llu d=%llu\n",
t->rt_param.job_params.release,
t->rt_param.job_params.deadline);
/* don't confuse linux */
t->rt.time_slice = 1;
}
void prepare_for_next_server(struct task_struct *t, int forced)
{
if (forced || job_behind(t)) {
server_release(t);
}
if (!forced) {
task_release(t);
}
}
module_init(init_budget_enforcement);
|