1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
|
/*
* litmus/sched_edzl.c
*
* Implementation of the EDZL scheduling algorithm.
*
* This version uses the simple approach and serializes all scheduling
* decisions by the use of a queue lock. This is probably not the
* best way to do it, but it should suffice for now.
*/
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <litmus/litmus.h>
#include <litmus/jobs.h>
#include <litmus/sched_global_plugin.h>
#include <litmus/edzl_common.h>
#include <litmus/sched_trace.h>
#include <litmus/preempt.h>
#include <litmus/bheap.h>
#include <linux/module.h>
static struct task_struct* __edzl_take_ready(rt_domain_t* rt);
static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new);
static void edzl_job_arrival(struct task_struct* task);
static void edzl_task_new(struct task_struct * t, int on_rq, int running);
static void edzl_task_wake_up(struct task_struct *task);
static void edzl_task_exit(struct task_struct * t);
static int edzl_preemption_needed(struct task_struct *t);
/* EDZL Plugin object */
static struct sched_global_plugin edzl_plugin __cacheline_aligned_in_smp = {
.plugin = {
.finish_switch = gblv_finish_switch,
.tick = gblv_tick,
.complete_job = complete_job,
.schedule = gblv_schedule,
.task_block = gblv_task_block,
.admit_task = gblv_admit_task,
.activate_plugin = gbl_activate_plugin,
.plugin_name = "EDZL",
.task_new = edzl_task_new,
.task_wake_up = edzl_task_wake_up,
.task_exit = edzl_task_exit,
},
.job_completion = gbl_job_completion,
.prio_order = edzl_higher_prio,
.take_ready = __edzl_take_ready,
.add_ready = __edzl_add_ready,
.job_arrival = edzl_job_arrival,
.preemption_needed = edzl_preemption_needed
};
#define active_gbl_domain (active_gbl_plugin->domain)
#define active_gbl_domain_lock (active_gbl_domain.ready_lock)
DEFINE_PER_CPU(cpu_entry_t, edzl_cpu_entries);
static enum hrtimer_restart on_zero_laxity(struct hrtimer *timer)
{
unsigned long flags;
struct task_struct* t;
lt_t now = litmus_clock();
TRACE("Zero-laxity timer went off!\n");
raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
t = container_of(container_of(timer, struct rt_param, zl_timer),
struct task_struct,
rt_param);
TRACE_TASK(t, "Reached zero-laxity. (now: %llu, zl-pt: %lld, time remaining (now): %lld)\n",
now,
get_deadline(t) - budget_remaining(t),
get_deadline(t) - now);
set_zerolaxity(t);
gbl_update_queue_position(t);
raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
return HRTIMER_NORESTART;
}
/* __edzl_take_ready - call's __take_ready with EDZL timer cancelation side-effect. */
static struct task_struct* __edzl_take_ready(rt_domain_t* rt)
{
struct task_struct* t = __take_ready(rt);
if(t)
{
if(get_zerolaxity(t) == 0)
{
if(hrtimer_active(&tsk_rt(t)->zl_timer))
{
int cancel_ret;
TRACE_TASK(t, "Canceling zero-laxity timer.\n");
cancel_ret = hrtimer_try_to_cancel(&tsk_rt(t)->zl_timer);
WARN_ON(cancel_ret == 0); /* should never be inactive. */
}
}
else
{
TRACE_TASK(t, "Task already has zero-laxity flagged.\n");
}
}
return t;
}
/* __edzl_add_ready - call's __add_ready with EDZL setting timer side-effect. */
static void __edzl_add_ready(rt_domain_t* rt, struct task_struct *new)
{
__add_ready(rt, new);
if(get_zerolaxity(new) == 0)
{
lt_t when_to_fire;
when_to_fire = get_deadline(new) - budget_remaining(new);
TRACE_TASK(new, "Setting zero-laxity timer for %llu. (deadline: %llu, remaining: %llu)\n",
when_to_fire,
get_deadline(new),
budget_remaining(new));
__hrtimer_start_range_ns(&tsk_rt(new)->zl_timer,
ns_to_ktime(when_to_fire),
0,
HRTIMER_MODE_ABS_PINNED,
0);
}
else
{
TRACE_TASK(new, "Already has zero-laxity when added to ready queue. (deadline: %llu, remaining: %llu))\n",
get_deadline(new),
budget_remaining(new));
}
}
/* edzl_job_arrival: task is either resumed or released */
static void edzl_job_arrival(struct task_struct* task)
{
BUG_ON(!task);
/* clear old laxity flag or tag zero-laxity upon release */
if(laxity_remaining(task))
clear_zerolaxity(task);
else
set_zerolaxity(task);
gbl_requeue(task);
gbl_check_for_preemptions();
}
/* Prepare a task for running in RT mode
*/
static void edzl_task_new(struct task_struct * t, int on_rq, int running)
{
unsigned long flags;
cpu_entry_t* entry;
TRACE("edzl: task new %d\n", t->pid);
raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
hrtimer_init(&t->rt_param.zl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
t->rt_param.zl_timer.function = on_zero_laxity;
/* setup job params */
release_at(t, litmus_clock());
if (running) {
entry = active_gbl_plugin->cpus[task_cpu(t)];
BUG_ON(entry->scheduled);
#ifdef CONFIG_RELEASE_MASTER
if (entry->cpu != active_gbl_domain.release_master) {
#endif
entry->scheduled = t;
tsk_rt(t)->scheduled_on = task_cpu(t);
#ifdef CONFIG_RELEASE_MASTER
} else {
/* do not schedule on release master */
gbl_preempt(entry); /* force resched */
tsk_rt(t)->scheduled_on = NO_CPU;
}
#endif
} else {
t->rt_param.scheduled_on = NO_CPU;
}
t->rt_param.linked_on = NO_CPU;
active_gbl_plugin->job_arrival(t);
raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
}
static void edzl_task_wake_up(struct task_struct *task)
{
unsigned long flags;
lt_t now;
TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
/* We need to take suspensions because of semaphores into
* account! If a job resumes after being suspended due to acquiring
* a semaphore, it should never be treated as a new job release.
*/
if (get_rt_flags(task) == RT_F_EXIT_SEM) {
set_rt_flags(task, RT_F_RUNNING);
} else {
now = litmus_clock();
if (is_tardy(task, now)) {
/* new sporadic release */
release_at(task, now);
sched_trace_task_release(task);
}
else {
if (task->rt.time_slice) {
/* came back in time before deadline
*/
set_rt_flags(task, RT_F_RUNNING);
}
}
}
active_gbl_plugin->job_arrival(task);
raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
}
static void edzl_task_exit(struct task_struct * t)
{
unsigned long flags;
/* unlink if necessary */
raw_spin_lock_irqsave(&active_gbl_domain_lock, flags);
gbl_unlink(t);
if (tsk_rt(t)->scheduled_on != NO_CPU) {
active_gbl_plugin->cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
tsk_rt(t)->scheduled_on = NO_CPU;
}
if(hrtimer_active(&tsk_rt(t)->zl_timer))
{
/* BUG if reached? */
TRACE_TASK(t, "Canceled armed timer while exiting.\n");
hrtimer_cancel(&tsk_rt(t)->zl_timer);
}
raw_spin_unlock_irqrestore(&active_gbl_domain_lock, flags);
BUG_ON(!is_realtime(t));
TRACE_TASK(t, "RIP\n");
}
/* need_to_preempt - check whether the task t needs to be preempted
* call only with irqs disabled and with ready_lock acquired
* THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
*/
static int edzl_preemption_needed(struct task_struct *t)
{
/* we need the read lock for edf_ready_queue */
/* no need to preempt if there is nothing pending */
if (!__jobs_pending(&active_gbl_domain))
return 0;
/* we need to reschedule if t doesn't exist */
if (!t)
return 1;
/* make sure to get non-rt stuff out of the way */
if (!is_realtime(t))
return 1;
/* NOTE: We cannot check for non-preemptibility since we
* don't know what address space we're currently in.
*/
/* Detect zero-laxity as needed. Easier to do it here than in tick.
(No timer is used to detect zero-laxity while a job is running.) */
if(unlikely(!get_zerolaxity(t) && laxity_remaining(t) == 0))
{
set_zerolaxity(t);
}
return edzl_higher_prio(__next_ready(&active_gbl_domain), t);
}
static int __init init_edzl(void)
{
int cpu;
cpu_entry_t *entry;
bheap_init(&edzl_plugin.cpu_heap);
/* initialize CPU state */
for (cpu = 0; cpu < NR_CPUS; cpu++) {
entry = &per_cpu(edzl_cpu_entries, cpu);
edzl_plugin.cpus[cpu] = entry;
entry->cpu = cpu;
entry->hn = &edzl_plugin.heap_node[cpu];
bheap_node_init(&entry->hn, entry);
}
gbl_domain_init(&edzl_plugin, NULL, gbl_release_jobs);
return register_sched_plugin(&edzl_plugin.plugin);
}
module_init(init_edzl);
|