aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2009-04-15 14:23:48 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2009-04-15 14:23:48 -0400
commitc7b59d1e59d066c5b7f1b950b85b4e0314cc19b6 (patch)
tree05ef9d358c6bbd7630817424a12cfea5e83ad130
parent7c114618378b50e12592ab52713331af76e481c6 (diff)
add G-EDF, a version of GSN-EDF with synchronization support ripped out
a good base version for other schedulers
-rw-r--r--litmus/Makefile3
-rw-r--r--litmus/sched_gedf.c502
2 files changed, 504 insertions, 1 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index 340c43c84f..c2c88cc9f6 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -10,7 +10,8 @@ obj-y = sched_plugin.o litmus.o \
10 sched_psn_edf.o \ 10 sched_psn_edf.o \
11 sched_cedf.o \ 11 sched_cedf.o \
12 sched_pfair.o \ 12 sched_pfair.o \
13 sched_gq_edf.o 13 sched_gq_edf.o \
14 sched_gedf.o
14 15
15obj-$(CONFIG_FEATHER_TRACE) += trace.o ft_event.o ftdev.o 16obj-$(CONFIG_FEATHER_TRACE) += trace.o ft_event.o ftdev.o
16obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o 17obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
diff --git a/litmus/sched_gedf.c b/litmus/sched_gedf.c
new file mode 100644
index 0000000000..e22e847c5f
--- /dev/null
+++ b/litmus/sched_gedf.c
@@ -0,0 +1,502 @@
1#include <linux/spinlock.h>
2#include <linux/percpu.h>
3#include <linux/sched.h>
4
5#include <litmus/litmus.h>
6#include <litmus/jobs.h>
7#include <litmus/sched_plugin.h>
8#include <litmus/edf_common.h>
9#include <litmus/sched_trace.h>
10
11#include <litmus/heap.h>
12
13#include <linux/module.h>
14
15/* cpu_entry_t - maintain the linked and scheduled state
16 */
17typedef struct {
18 int cpu;
19 struct task_struct* linked; /* only RT tasks */
20 struct task_struct* scheduled; /* only RT tasks */
21 atomic_t will_schedule; /* prevent unneeded IPIs */
22 struct heap_node* hn;
23} cpu_entry_t;
24DEFINE_PER_CPU(cpu_entry_t, gedf_cpu_entries);
25
26cpu_entry_t* gedf_cpus[NR_CPUS];
27
28#define set_will_schedule() \
29 (atomic_set(&__get_cpu_var(gedf_cpu_entries).will_schedule, 1))
30#define clear_will_schedule() \
31 (atomic_set(&__get_cpu_var(gedf_cpu_entries).will_schedule, 0))
32#define test_will_schedule(cpu) \
33 (atomic_read(&per_cpu(gedf_cpu_entries, cpu).will_schedule))
34
35
36#define NO_CPU 0xffffffff
37
38/* the cpus queue themselves according to priority in here */
39static struct heap_node gedf_heap_node[NR_CPUS];
40static struct heap gedf_cpu_heap;
41
42static rt_domain_t gedf;
43#define gedf_lock (gedf.ready_lock)
44
45
46static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b)
47{
48 cpu_entry_t *a, *b;
49 a = _a->value;
50 b = _b->value;
51 /* Note that a and b are inverted: we want the lowest-priority CPU at
52 * the top of the heap.
53 */
54 return edf_higher_prio(b->linked, a->linked);
55}
56
57/* update_cpu_position - Move the cpu entry to the correct place to maintain
58 * order in the cpu queue. Caller must hold gedf lock.
59 */
60static void update_cpu_position(cpu_entry_t *entry)
61{
62 if (likely(heap_node_in_heap(entry->hn)))
63 heap_delete(cpu_lower_prio, &gedf_cpu_heap, entry->hn);
64 heap_insert(cpu_lower_prio, &gedf_cpu_heap, entry->hn);
65}
66
67/* caller must hold gedf lock */
68static cpu_entry_t* lowest_prio_cpu(void)
69{
70 struct heap_node* hn;
71 hn = heap_peek(cpu_lower_prio, &gedf_cpu_heap);
72 return hn->value;
73}
74
75
76/* link_task_to_cpu - Update the link of a CPU.
77 * Handles the case where the to-be-linked task is already
78 * scheduled on a different CPU.
79 */
80static noinline void link_task_to_cpu(struct task_struct* linked,
81 cpu_entry_t *entry)
82{
83 cpu_entry_t *sched;
84 struct task_struct* tmp;
85 int on_cpu;
86
87 BUG_ON(linked && !is_realtime(linked));
88
89 /* Currently linked task is set to be unlinked. */
90 if (entry->linked) {
91 entry->linked->rt_param.linked_on = NO_CPU;
92 }
93
94 /* Link new task to CPU. */
95 if (linked) {
96 set_rt_flags(linked, RT_F_RUNNING);
97 /* handle task is already scheduled somewhere! */
98 on_cpu = linked->rt_param.scheduled_on;
99 if (on_cpu != NO_CPU) {
100 sched = &per_cpu(gedf_cpu_entries, on_cpu);
101 /* this should only happen if not linked already */
102 BUG_ON(sched->linked == linked);
103
104 /* If we are already scheduled on the CPU to which we
105 * wanted to link, we don't need to do the swap --
106 * we just link ourselves to the CPU and depend on
107 * the caller to get things right.
108 */
109 if (entry != sched) {
110 TRACE_TASK(linked,
111 "already scheduled on %d, updating link.\n",
112 sched->cpu);
113 tmp = sched->linked;
114 linked->rt_param.linked_on = sched->cpu;
115 sched->linked = linked;
116 update_cpu_position(sched);
117 linked = tmp;
118 }
119 }
120 if (linked) /* might be NULL due to swap */
121 linked->rt_param.linked_on = entry->cpu;
122 }
123 entry->linked = linked;
124 if (linked)
125 TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
126 else
127 TRACE("NULL linked to %d.\n", entry->cpu);
128 update_cpu_position(entry);
129}
130
131/* unlink - Make sure a task is not linked any longer to an entry
132 * where it was linked before. Must hold gedf_lock.
133 */
134static noinline void unlink(struct task_struct* t)
135{
136 cpu_entry_t *entry;
137
138 if (unlikely(!t)) {
139 TRACE_BUG_ON(!t);
140 return;
141 }
142
143 if (t->rt_param.linked_on != NO_CPU) {
144 /* unlink */
145 entry = &per_cpu(gedf_cpu_entries, t->rt_param.linked_on);
146 t->rt_param.linked_on = NO_CPU;
147 link_task_to_cpu(NULL, entry);
148 } else if (is_queued(t)) {
149 /* This is an interesting situation: t is scheduled,
150 * but was just recently unlinked. It cannot be
151 * linked anywhere else (because then it would have
152 * been relinked to this CPU), thus it must be in some
153 * queue. We must remove it from the list in this
154 * case.
155 */
156 remove(&gedf, t);
157 }
158}
159
160
161/* preempt - force a CPU to reschedule
162 */
163static noinline void preempt(cpu_entry_t *entry)
164{
165 if (smp_processor_id() == entry->cpu) {
166 set_tsk_need_resched(current);
167 } else
168 /* in case that it is a remote CPU we have to defer the
169 * the decision to the remote CPU
170 */
171 if (!test_will_schedule(entry->cpu))
172 smp_send_reschedule(entry->cpu);
173}
174
175/* requeue - Put an unlinked task into gsn-edf domain.
176 * Caller must hold gedf_lock.
177 */
178static noinline void requeue(struct task_struct* task)
179{
180 BUG_ON(!task);
181 /* sanity check before insertion */
182 BUG_ON(is_queued(task));
183
184 if (is_released(task, litmus_clock()))
185 __add_ready(&gedf, task);
186 else {
187 /* it has got to wait */
188 add_release(&gedf, task);
189 }
190}
191
192/* check for any necessary preemptions */
193static void check_for_preemptions(void)
194{
195 struct task_struct *task;
196 cpu_entry_t* last;
197
198 for(last = lowest_prio_cpu();
199 edf_preemption_needed(&gedf, last->linked);
200 last = lowest_prio_cpu()) {
201 /* preemption necessary */
202 task = __take_ready(&gedf);
203 TRACE("check_for_preemptions: attempting to link task %d to %d\n",
204 task->pid, last->cpu);
205 if (last->linked)
206 requeue(last->linked);
207 link_task_to_cpu(task, last);
208 preempt(last);
209 }
210}
211
212/* gedf_job_arrival: task is either resumed or released */
213static noinline void gedf_job_arrival(struct task_struct* task)
214{
215 BUG_ON(!task);
216
217 requeue(task);
218 check_for_preemptions();
219}
220
221static void gedf_release_jobs(rt_domain_t* rt, struct heap* tasks)
222{
223 unsigned long flags;
224
225 spin_lock_irqsave(&gedf_lock, flags);
226
227 __merge_ready(rt, tasks);
228 check_for_preemptions();
229
230 spin_unlock_irqrestore(&gedf_lock, flags);
231}
232
233/* caller holds gedf_lock */
234static noinline void job_completion(struct task_struct *t, int forced)
235{
236 BUG_ON(!t);
237
238 sched_trace_task_completion(t, forced);
239
240 TRACE_TASK(t, "job_completion().\n");
241
242 /* set flags */
243 set_rt_flags(t, RT_F_SLEEP);
244 /* prepare for next period */
245 prepare_for_next_period(t);
246 if (is_released(t, litmus_clock()))
247 sched_trace_task_release(t);
248 /* unlink */
249 unlink(t);
250 /* requeue
251 * But don't requeue a blocking task. */
252 if (is_running(t))
253 gedf_job_arrival(t);
254}
255
256/* gedf_tick - this function is called for every local timer
257 * interrupt.
258 *
259 * checks whether the current task has expired and checks
260 * whether we need to preempt it if it has not expired
261 */
262static void gedf_tick(struct task_struct* t)
263{
264 if (is_realtime(t) && budget_exhausted(t)) {
265 set_tsk_need_resched(t);
266 set_will_schedule();
267 }
268}
269
270static struct task_struct* gedf_schedule(struct task_struct * prev)
271{
272 cpu_entry_t* entry = &__get_cpu_var(gedf_cpu_entries);
273 int out_of_time, sleep, preempt, np, exists, blocks;
274 struct task_struct* next = NULL;
275
276 /* Will be released in finish_switch. */
277 spin_lock(&gedf_lock);
278 clear_will_schedule();
279
280 /* sanity checking */
281 BUG_ON(entry->scheduled && entry->scheduled != prev);
282 BUG_ON(entry->scheduled && !is_realtime(prev));
283 BUG_ON(is_realtime(prev) && !entry->scheduled);
284
285 /* (0) Determine state */
286 exists = entry->scheduled != NULL;
287 blocks = exists && !is_running(entry->scheduled);
288 out_of_time = exists && budget_exhausted(entry->scheduled);
289 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP;
290 preempt = entry->scheduled != entry->linked;
291
292 TRACE_TASK(prev, "invoked gedf_schedule.\n");
293
294 if (exists)
295 TRACE_TASK(prev,
296 "blocks:%d out_of_time:%d sleep:%d preempt:%d "
297 "state:%d sig:%d\n",
298 blocks, out_of_time, sleep, preempt,
299 prev->state, signal_pending(prev));
300 if (entry->linked && preempt)
301 TRACE_TASK(prev, "will be preempted by %s/%d\n",
302 entry->linked->comm, entry->linked->pid);
303
304
305 /* If a task blocks we have no choice but to reschedule.
306 */
307 if (blocks)
308 unlink(entry->scheduled);
309
310
311 /* Any task that is preemptable and either exhausts its execution
312 * budget or wants to sleep completes. We may have to reschedule after
313 * this. Don't do a job completion if we block (can't have timers running
314 * for blocked jobs). Preemption go first for the same reason.
315 */
316 if ((out_of_time || sleep) && !blocks && !preempt)
317 job_completion(entry->scheduled, !sleep);
318
319 /* Link pending task if we became unlinked.
320 */
321 if (!entry->linked)
322 link_task_to_cpu(__take_ready(&gedf), entry);
323
324 /* The final scheduling decision. Do we need to switch for some reason?
325 * If linked is different from scheduled, then select linked as next.
326 */
327 if (entry->linked != entry->scheduled) {
328 /* Schedule a linked job? */
329 if (entry->linked) {
330 entry->linked->rt_param.scheduled_on = entry->cpu;
331 next = entry->linked;
332 }
333 if (entry->scheduled) {
334 /* not gonna be scheduled soon */
335 entry->scheduled->rt_param.scheduled_on = NO_CPU;
336 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
337 }
338 } else
339 /* Only override Linux scheduler if we have a real-time task
340 * scheduled that needs to continue.
341 */
342 if (exists)
343 next = prev;
344
345 spin_unlock(&gedf_lock);
346
347 TRACE("gedf_lock released, next=0x%p\n", next);
348
349
350 if (next)
351 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
352 else if (exists && !next)
353 TRACE("becomes idle at %llu.\n", litmus_clock());
354
355
356 return next;
357}
358
359
360/* _finish_switch - we just finished the switch away from prev
361 */
362static void gedf_finish_switch(struct task_struct *prev)
363{
364 cpu_entry_t* entry = &__get_cpu_var(gedf_cpu_entries);
365
366 entry->scheduled = is_realtime(current) ? current : NULL;
367 TRACE_TASK(prev, "switched away from\n");
368}
369
370
371/* Prepare a task for running in RT mode
372 */
373static void gedf_task_new(struct task_struct * t, int on_rq, int running)
374{
375 unsigned long flags;
376 cpu_entry_t* entry;
377
378 TRACE("gsn edf: task new %d\n", t->pid);
379
380 spin_lock_irqsave(&gedf_lock, flags);
381 if (running) {
382 entry = &per_cpu(gedf_cpu_entries, task_cpu(t));
383 BUG_ON(entry->scheduled);
384 entry->scheduled = t;
385 t->rt_param.scheduled_on = task_cpu(t);
386 } else
387 t->rt_param.scheduled_on = NO_CPU;
388 t->rt_param.linked_on = NO_CPU;
389
390 /* setup job params */
391 release_at(t, litmus_clock());
392
393 gedf_job_arrival(t);
394 spin_unlock_irqrestore(&gedf_lock, flags);
395}
396
397static void gedf_task_wake_up(struct task_struct *task)
398{
399 unsigned long flags;
400 lt_t now;
401
402 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
403
404 spin_lock_irqsave(&gedf_lock, flags);
405 /* We need to take suspensions because of semaphores into
406 * account! If a job resumes after being suspended due to acquiring
407 * a semaphore, it should never be treated as a new job release.
408 */
409 if (get_rt_flags(task) == RT_F_EXIT_SEM) {
410 set_rt_flags(task, RT_F_RUNNING);
411 } else {
412 now = litmus_clock();
413 if (is_tardy(task, now)) {
414 /* new sporadic release */
415 release_at(task, now);
416 sched_trace_task_release(task);
417 }
418 else if (task->time_slice)
419 /* came back in time before deadline
420 */
421 set_rt_flags(task, RT_F_RUNNING);
422 }
423 gedf_job_arrival(task);
424 spin_unlock_irqrestore(&gedf_lock, flags);
425}
426
427static void gedf_task_block(struct task_struct *t)
428{
429 unsigned long flags;
430
431 TRACE_TASK(t, "block at %llu\n", litmus_clock());
432
433 /* unlink if necessary */
434 spin_lock_irqsave(&gedf_lock, flags);
435 unlink(t);
436 spin_unlock_irqrestore(&gedf_lock, flags);
437
438 BUG_ON(!is_realtime(t));
439}
440
441
442static void gedf_task_exit(struct task_struct * t)
443{
444 unsigned long flags;
445
446 /* unlink if necessary */
447 spin_lock_irqsave(&gedf_lock, flags);
448 unlink(t);
449 if (tsk_rt(t)->scheduled_on != NO_CPU) {
450 gedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
451 tsk_rt(t)->scheduled_on = NO_CPU;
452 }
453 spin_unlock_irqrestore(&gedf_lock, flags);
454
455 BUG_ON(!is_realtime(t));
456 TRACE_TASK(t, "RIP\n");
457}
458
459static long gedf_admit_task(struct task_struct* tsk)
460{
461 return 0;
462}
463
464
465/* Plugin object */
466static struct sched_plugin gedf_plugin __cacheline_aligned_in_smp = {
467 .plugin_name = "G-EDF",
468 .finish_switch = gedf_finish_switch,
469 .tick = gedf_tick,
470 .task_new = gedf_task_new,
471 .complete_job = complete_job,
472 .task_exit = gedf_task_exit,
473 .schedule = gedf_schedule,
474 .task_wake_up = gedf_task_wake_up,
475 .task_block = gedf_task_block,
476 .admit_task = gedf_admit_task
477};
478
479
480static int __init init_gedf(void)
481{
482 int cpu;
483 cpu_entry_t *entry;
484
485 heap_init(&gedf_cpu_heap);
486 /* initialize CPU state */
487 for (cpu = 0; cpu < NR_CPUS; cpu++) {
488 entry = &per_cpu(gedf_cpu_entries, cpu);
489 gedf_cpus[cpu] = entry;
490 atomic_set(&entry->will_schedule, 0);
491 entry->linked = NULL;
492 entry->scheduled = NULL;
493 entry->cpu = cpu;
494 entry->hn = &gedf_heap_node[cpu];
495 heap_node_init(&entry->hn, entry);
496 }
497 edf_domain_init(&gedf, NULL, gedf_release_jobs);
498 return register_sched_plugin(&gedf_plugin);
499}
500
501
502module_init(init_gedf);