diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-04-27 18:30:19 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-04-27 18:30:19 -0400 |
commit | cc9555cec51e33d1590cabcedd35c30f11712b70 (patch) | |
tree | d0d524d8d11df518d4ee0b0ffcfaea6ec767b80f | |
parent | 332257053f1e004a8f46d64bbea9ce8b2522e0df (diff) |
GHQ-EDF: first shot at hierarchical queues implementation
seems to run in QEMU
-rw-r--r-- | litmus/Makefile | 3 | ||||
-rw-r--r-- | litmus/sched_ghq_edf.c | 717 |
2 files changed, 719 insertions, 1 deletions
diff --git a/litmus/Makefile b/litmus/Makefile index b8188cc35a..28a6d79916 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -12,7 +12,8 @@ obj-y = sched_plugin.o litmus.o \ | |||
12 | sched_cedf.o \ | 12 | sched_cedf.o \ |
13 | sched_pfair.o \ | 13 | sched_pfair.o \ |
14 | sched_gq_edf.o \ | 14 | sched_gq_edf.o \ |
15 | sched_gedf.o | 15 | sched_gedf.o \ |
16 | sched_ghq_edf.o | ||
16 | 17 | ||
17 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | 18 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o |
18 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | 19 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o |
diff --git a/litmus/sched_ghq_edf.c b/litmus/sched_ghq_edf.c new file mode 100644 index 0000000000..9018ca5903 --- /dev/null +++ b/litmus/sched_ghq_edf.c | |||
@@ -0,0 +1,717 @@ | |||
1 | #include <linux/spinlock.h> | ||
2 | #include <linux/percpu.h> | ||
3 | #include <linux/sched.h> | ||
4 | |||
5 | #include <litmus/litmus.h> | ||
6 | #include <litmus/jobs.h> | ||
7 | #include <litmus/sched_plugin.h> | ||
8 | #include <litmus/edf_common.h> | ||
9 | #include <litmus/sched_trace.h> | ||
10 | |||
11 | #include <litmus/heap.h> | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | |||
15 | /* cpu_entry_t - maintain the linked and scheduled state | ||
16 | */ | ||
17 | typedef struct { | ||
18 | int cpu; | ||
19 | struct task_struct* linked; /* only RT tasks */ | ||
20 | int picked; /* linked was seen */ | ||
21 | struct task_struct* scheduled; /* only RT tasks */ | ||
22 | struct heap_node* hn; | ||
23 | } cpu_entry_t; | ||
24 | DEFINE_PER_CPU(cpu_entry_t, ghqedf_cpu_entries); | ||
25 | |||
26 | DEFINE_SPINLOCK(ghqedf_cpu_lock); /* synchronize access to cpu heap */ | ||
27 | |||
28 | cpu_entry_t* ghqedf_cpus[NR_CPUS]; | ||
29 | |||
30 | /* the cpus queue themselves according to priority in here */ | ||
31 | static struct heap_node ghqedf_heap_node[NR_CPUS]; | ||
32 | static struct heap ghqedf_cpu_heap; | ||
33 | |||
34 | static rt_domain_t ghqedf; /* used only for the release queue */ | ||
35 | |||
36 | struct subqueue { | ||
37 | struct heap queue; | ||
38 | struct task_struct* top; | ||
39 | struct heap_node* hn; | ||
40 | spinlock_t lock; | ||
41 | }; | ||
42 | |||
43 | /* per-cpu sub queue */ | ||
44 | //DEFINE_PER_CPU(struct subqueue, ghqedf_subqueue); | ||
45 | |||
46 | struct subqueue ghqedf_subqueue[NR_CPUS]; | ||
47 | |||
48 | /* heap nodes for subqueue::hn field */ | ||
49 | static struct heap_node ghqedf_subqueue_heap_node[NR_CPUS]; | ||
50 | |||
51 | /* queue of sub queues */ | ||
52 | struct heap master_queue; | ||
53 | |||
54 | /* re-use ready queue lock */ | ||
55 | #define master_lock (ghqedf.ready_lock) | ||
56 | |||
57 | static int subqueue_higher_prio(struct heap_node *_a, struct heap_node *_b) | ||
58 | { | ||
59 | struct subqueue *a, *b; | ||
60 | a = _a->value; | ||
61 | b = _b->value; | ||
62 | return edf_higher_prio(a->top, b->top); | ||
63 | } | ||
64 | |||
65 | static void subqueues_init(void) | ||
66 | { | ||
67 | int cpu; | ||
68 | struct subqueue *q; | ||
69 | |||
70 | heap_init(&master_queue); | ||
71 | |||
72 | for_each_online_cpu(cpu) { | ||
73 | // q = &per_cpu(ghqedf_subqueue, cpu); | ||
74 | q = ghqedf_subqueue + cpu; | ||
75 | heap_init(&q->queue); | ||
76 | q->top = NULL; | ||
77 | q->hn = ghqedf_subqueue_heap_node + cpu; | ||
78 | heap_node_init(&q->hn, q); | ||
79 | spin_lock_init(&q->lock); | ||
80 | heap_insert(subqueue_higher_prio, &master_queue, q->hn); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | static void __update_top(struct subqueue* q) | ||
85 | { | ||
86 | struct heap_node *tmp; | ||
87 | |||
88 | tmp = heap_peek(edf_ready_order, &q->queue); | ||
89 | q->top = tmp ? tmp->value : NULL; | ||
90 | } | ||
91 | |||
92 | static void update_top(struct subqueue* q) | ||
93 | { | ||
94 | spin_lock(&q->lock); | ||
95 | __update_top(q); | ||
96 | spin_unlock(&q->lock); | ||
97 | } | ||
98 | |||
99 | static void merge_into_ready_queue(struct heap *h) | ||
100 | { | ||
101 | // struct subqueue *q = &__get_cpu_var(ghqedf_subqueue); | ||
102 | struct subqueue *q = ghqedf_subqueue + smp_processor_id(); | ||
103 | struct heap_node *tmp; | ||
104 | void *old_top; | ||
105 | int changed_top = 0; | ||
106 | |||
107 | spin_lock(&q->lock); | ||
108 | tmp = heap_peek(edf_ready_order, &q->queue); | ||
109 | old_top = tmp ? tmp->value : NULL; | ||
110 | heap_union(edf_ready_order, &q->queue, h); | ||
111 | /* is the new min the task that we just inserted? */ | ||
112 | changed_top = !old_top || | ||
113 | heap_peek(edf_ready_order, &q->queue)->value != old_top; | ||
114 | spin_unlock(&q->lock); | ||
115 | if (changed_top) { | ||
116 | /* need to update master queue */ | ||
117 | spin_lock(&master_lock); | ||
118 | /* If it is not in the heap then it is already | ||
119 | * being updated concurrently, so we skip it. | ||
120 | */ | ||
121 | if (likely(heap_node_in_heap(q->hn))) { | ||
122 | heap_delete(subqueue_higher_prio, &master_queue, q->hn); | ||
123 | update_top(q); | ||
124 | heap_insert(subqueue_higher_prio, &master_queue, q->hn); | ||
125 | } else | ||
126 | TRACE("not updating subqueue top\n"); | ||
127 | spin_unlock(&master_lock); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | static void add_to_ready_queue(struct task_struct *t) | ||
132 | { | ||
133 | struct heap tmp; | ||
134 | |||
135 | TRACE_TASK(t, "adding to ready queue\n"); | ||
136 | heap_init(&tmp); | ||
137 | heap_insert(edf_ready_order, &tmp, tsk_rt(t)->heap_node); | ||
138 | merge_into_ready_queue(&tmp); | ||
139 | } | ||
140 | |||
141 | |||
142 | static int cpu_lower_prio(struct heap_node *_a, struct heap_node *_b) | ||
143 | { | ||
144 | cpu_entry_t *a, *b; | ||
145 | a = _a->value; | ||
146 | b = _b->value; | ||
147 | /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
148 | * the top of the heap. | ||
149 | */ | ||
150 | return edf_higher_prio(b->linked, a->linked); | ||
151 | } | ||
152 | |||
153 | static void remove_from_cpu_heap(cpu_entry_t* entry) | ||
154 | { | ||
155 | if (likely(heap_node_in_heap(entry->hn))) | ||
156 | heap_delete(cpu_lower_prio, &ghqedf_cpu_heap, entry->hn); | ||
157 | } | ||
158 | |||
159 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
160 | * order in the cpu queue. Caller must hold ghqedf lock. | ||
161 | */ | ||
162 | static void update_cpu_position(cpu_entry_t *entry) | ||
163 | { | ||
164 | remove_from_cpu_heap(entry); | ||
165 | heap_insert(cpu_lower_prio, &ghqedf_cpu_heap, entry->hn); | ||
166 | } | ||
167 | |||
168 | /* caller must hold ghqedf lock */ | ||
169 | static cpu_entry_t* lowest_prio_cpu(int take) | ||
170 | { | ||
171 | struct heap_node* hn; | ||
172 | if (take) | ||
173 | hn = heap_take(cpu_lower_prio, &ghqedf_cpu_heap); | ||
174 | else | ||
175 | hn = heap_peek(cpu_lower_prio, &ghqedf_cpu_heap); | ||
176 | return hn ? hn->value : NULL; | ||
177 | } | ||
178 | |||
179 | |||
180 | /* link_task_to_cpu - Update the link of a CPU. | ||
181 | * Handles the case where the to-be-linked task is already | ||
182 | * scheduled on a different CPU. | ||
183 | */ | ||
184 | static noinline void link_task_to_cpu(struct task_struct* linked, | ||
185 | cpu_entry_t *entry) | ||
186 | { | ||
187 | cpu_entry_t *sched = NULL; | ||
188 | struct task_struct* tmp; | ||
189 | int on_cpu; | ||
190 | |||
191 | BUG_ON(linked && !is_realtime(linked)); | ||
192 | |||
193 | /* Currently linked task is set to be unlinked. */ | ||
194 | if (entry->linked) { | ||
195 | entry->linked->rt_param.linked_on = NO_CPU; | ||
196 | } | ||
197 | |||
198 | /* Link new task to CPU. */ | ||
199 | if (linked) { | ||
200 | set_rt_flags(linked, RT_F_RUNNING); | ||
201 | /* handle task is already scheduled somewhere! */ | ||
202 | on_cpu = linked->rt_param.scheduled_on; | ||
203 | if (on_cpu != NO_CPU) { | ||
204 | sched = &per_cpu(ghqedf_cpu_entries, on_cpu); | ||
205 | /* this should only happen if not linked already */ | ||
206 | BUG_ON(sched->linked == linked); | ||
207 | |||
208 | /* If we are already scheduled on the CPU to which we | ||
209 | * wanted to link, we don't need to do the swap -- | ||
210 | * we just link ourselves to the CPU and depend on | ||
211 | * the caller to get things right. | ||
212 | * | ||
213 | * But only swap if the other node is in the queue. | ||
214 | * If it is not, then it is being updated | ||
215 | * concurrently and some other task was already | ||
216 | * picked for it. | ||
217 | */ | ||
218 | if (entry != sched && heap_node_in_heap(sched->hn)) { | ||
219 | TRACE_TASK(linked, | ||
220 | "already scheduled on %d, " | ||
221 | "updating link.\n", | ||
222 | sched->cpu); | ||
223 | tmp = sched->linked; | ||
224 | linked->rt_param.linked_on = sched->cpu; | ||
225 | sched->linked = linked; | ||
226 | sched->picked = 1; | ||
227 | update_cpu_position(sched); | ||
228 | linked = tmp; | ||
229 | } | ||
230 | } | ||
231 | if (linked) /* might be NULL due to swap */ | ||
232 | linked->rt_param.linked_on = entry->cpu; | ||
233 | } | ||
234 | entry->linked = linked; | ||
235 | entry->picked = entry == sched; /* set to one if we linked to the | ||
236 | * the CPU that the task is | ||
237 | * executing on | ||
238 | */ | ||
239 | if (linked) | ||
240 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
241 | else | ||
242 | TRACE("NULL linked to %d.\n", entry->cpu); | ||
243 | update_cpu_position(entry); | ||
244 | } | ||
245 | |||
246 | /* unlink - Make sure a task is not linked any longer to an entry | ||
247 | * where it was linked before. Must hold ghqedf_lock. | ||
248 | */ | ||
249 | static noinline void unlink(struct task_struct* t) | ||
250 | { | ||
251 | cpu_entry_t *entry; | ||
252 | |||
253 | if (t->rt_param.linked_on != NO_CPU) { | ||
254 | /* unlink */ | ||
255 | entry = &per_cpu(ghqedf_cpu_entries, t->rt_param.linked_on); | ||
256 | t->rt_param.linked_on = NO_CPU; | ||
257 | link_task_to_cpu(NULL, entry); | ||
258 | } | ||
259 | } | ||
260 | |||
261 | |||
262 | /* preempt - force a CPU to reschedule | ||
263 | */ | ||
264 | static noinline void preempt(cpu_entry_t *entry) | ||
265 | { | ||
266 | if (smp_processor_id() == entry->cpu) | ||
267 | set_tsk_need_resched(current); | ||
268 | else | ||
269 | smp_send_reschedule(entry->cpu); | ||
270 | } | ||
271 | |||
272 | /* requeue - Put an unlinked task into gsn-edf domain. | ||
273 | * Caller must hold ghqedf_lock. | ||
274 | * | ||
275 | * call unlocked, but with preemptions disabled! | ||
276 | */ | ||
277 | static noinline void requeue(struct task_struct* task) | ||
278 | { | ||
279 | if (is_released(task, litmus_clock())) | ||
280 | add_to_ready_queue(task); | ||
281 | else | ||
282 | /* it has got to wait */ | ||
283 | add_release(&ghqedf, task); | ||
284 | } | ||
285 | |||
286 | |||
287 | static struct task_struct* take_if_preempt_required(cpu_entry_t* last) | ||
288 | { | ||
289 | struct heap_node* tmp; | ||
290 | struct subqueue* q; | ||
291 | struct task_struct* t; | ||
292 | int preempt_required = 0; | ||
293 | |||
294 | spin_lock(&master_lock); | ||
295 | tmp = heap_peek(subqueue_higher_prio, &master_queue); | ||
296 | BUG_ON(!tmp); /* should be there */ | ||
297 | q = tmp->value; | ||
298 | |||
299 | spin_lock(&q->lock); | ||
300 | tmp = heap_peek(edf_ready_order, &q->queue); | ||
301 | t = tmp ? tmp->value : NULL; | ||
302 | preempt_required = edf_higher_prio(t, last->linked); | ||
303 | if (preempt_required) { | ||
304 | /* take it out */ | ||
305 | last = lowest_prio_cpu(1); | ||
306 | spin_unlock(&ghqedf_cpu_lock); | ||
307 | heap_delete(subqueue_higher_prio, &master_queue, q->hn); | ||
308 | } | ||
309 | /* drop lock master lock while we update subqueue */ | ||
310 | spin_unlock(&master_lock); | ||
311 | |||
312 | if (preempt_required) { | ||
313 | heap_delete(edf_ready_order, &q->queue, tmp); | ||
314 | /* precompute, so that next lookup is O(1) */ | ||
315 | __update_top(q); | ||
316 | spin_unlock(&q->lock); | ||
317 | |||
318 | /* re-insert with new priority */ | ||
319 | spin_lock(&master_lock); | ||
320 | /* update, with right locking order */ | ||
321 | update_top(q); | ||
322 | heap_insert(subqueue_higher_prio, &master_queue, q->hn); | ||
323 | spin_unlock(&master_lock); | ||
324 | |||
325 | return t; | ||
326 | } else { | ||
327 | spin_unlock(&q->lock); | ||
328 | return NULL; | ||
329 | } | ||
330 | } | ||
331 | |||
332 | |||
333 | /* check for any necessary preemptions */ | ||
334 | static void check_for_preemptions(void) | ||
335 | { | ||
336 | int done = 0; | ||
337 | unsigned long flags; | ||
338 | struct task_struct *task, *unlinked; | ||
339 | cpu_entry_t* last; | ||
340 | |||
341 | |||
342 | local_irq_save(flags); | ||
343 | while (!done) { | ||
344 | unlinked = NULL; | ||
345 | spin_lock(&ghqedf_cpu_lock); | ||
346 | last = lowest_prio_cpu(0); | ||
347 | if (likely(last)) { | ||
348 | task = take_if_preempt_required(last); | ||
349 | if (task) { | ||
350 | TRACE_TASK(task, "removed from ready Q\n"); | ||
351 | /* cpu lock was dropped, reacquire */ | ||
352 | spin_lock(&ghqedf_cpu_lock); | ||
353 | if (last->linked && !last->picked) | ||
354 | /* can be requeued by us */ | ||
355 | unlinked = last->linked; | ||
356 | TRACE("check_for_preemptions: " | ||
357 | "attempting to link task %d to %d\n", | ||
358 | task->pid, last->cpu); | ||
359 | link_task_to_cpu(task, last); | ||
360 | update_cpu_position(last); | ||
361 | } else | ||
362 | /* no preemption required */ | ||
363 | done = 1; | ||
364 | } else | ||
365 | /* all gone, being checked elsewhere? */ | ||
366 | done = 1; | ||
367 | spin_unlock(&ghqedf_cpu_lock); | ||
368 | if (unlinked) | ||
369 | /* stick it back into the queue */ | ||
370 | requeue(unlinked); | ||
371 | if (last && !done) | ||
372 | /* we have a preemption, send IPI */ | ||
373 | preempt(last); | ||
374 | } | ||
375 | TRACE("done with preemption checking\n"); | ||
376 | local_irq_restore(flags); | ||
377 | } | ||
378 | |||
379 | /* ghqedf_job_arrival: task is either resumed or released | ||
380 | * call only unlocked! | ||
381 | */ | ||
382 | static noinline void ghqedf_job_arrival(struct task_struct* task) | ||
383 | { | ||
384 | requeue(task); | ||
385 | check_for_preemptions(); | ||
386 | } | ||
387 | |||
388 | static void ghqedf_release_jobs(rt_domain_t* rt, struct heap* tasks) | ||
389 | { | ||
390 | unsigned long flags; | ||
391 | |||
392 | TRACE("release_jobs() invoked\n"); | ||
393 | local_irq_save(flags); | ||
394 | /* insert unlocked */ | ||
395 | merge_into_ready_queue(tasks); | ||
396 | local_irq_restore(flags); | ||
397 | check_for_preemptions(); | ||
398 | } | ||
399 | |||
400 | /* caller holds ghqedf_lock */ | ||
401 | static noinline int job_completion(cpu_entry_t* entry, int forced) | ||
402 | { | ||
403 | |||
404 | struct task_struct *t = entry->scheduled; | ||
405 | |||
406 | sched_trace_task_completion(t, forced); | ||
407 | |||
408 | TRACE_TASK(t, "job_completion().\n"); | ||
409 | |||
410 | /* set flags */ | ||
411 | set_rt_flags(t, RT_F_SLEEP); | ||
412 | /* prepare for next period */ | ||
413 | prepare_for_next_period(t); | ||
414 | if (is_released(t, litmus_clock())) | ||
415 | sched_trace_task_release(t); | ||
416 | |||
417 | |||
418 | if (is_released(t, litmus_clock())){ | ||
419 | /* we changed the priority, see if we need to preempt */ | ||
420 | set_rt_flags(t, RT_F_RUNNING); | ||
421 | update_cpu_position(entry); | ||
422 | return 1; | ||
423 | } | ||
424 | else { | ||
425 | /* it has got to wait */ | ||
426 | unlink(t); | ||
427 | add_release(&ghqedf, t); | ||
428 | return 0; | ||
429 | } | ||
430 | } | ||
431 | |||
432 | /* ghqedf_tick - this function is called for every local timer | ||
433 | * interrupt. | ||
434 | * | ||
435 | * checks whether the current task has expired and checks | ||
436 | * whether we need to preempt it if it has not expired | ||
437 | */ | ||
438 | static void ghqedf_tick(struct task_struct* t) | ||
439 | { | ||
440 | if (is_realtime(t) && budget_exhausted(t)) | ||
441 | set_tsk_need_resched(t); | ||
442 | } | ||
443 | |||
444 | static struct task_struct* ghqedf_schedule(struct task_struct * prev) | ||
445 | { | ||
446 | cpu_entry_t* entry = &__get_cpu_var(ghqedf_cpu_entries); | ||
447 | int out_of_time, sleep, preempt, exists, blocks; | ||
448 | struct task_struct* next = NULL; | ||
449 | |||
450 | /* Bail out early if we are the release master. | ||
451 | * The release master never schedules any real-time tasks. | ||
452 | */ | ||
453 | if (ghqedf.release_master == entry->cpu) | ||
454 | return NULL; | ||
455 | |||
456 | // TRACE_TASK(prev, "invoked ghqedf_schedule.\n"); | ||
457 | |||
458 | /* sanity checking */ | ||
459 | BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
460 | BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
461 | BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
462 | |||
463 | /* (0) Determine state */ | ||
464 | exists = entry->scheduled != NULL; | ||
465 | blocks = exists && !is_running(entry->scheduled); | ||
466 | out_of_time = exists && budget_exhausted(entry->scheduled); | ||
467 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
468 | |||
469 | spin_lock(&ghqedf_cpu_lock); | ||
470 | |||
471 | preempt = entry->scheduled != entry->linked; | ||
472 | |||
473 | if (exists) | ||
474 | TRACE_TASK(prev, | ||
475 | "blocks:%d out_of_time:%d sleep:%d preempt:%d " | ||
476 | "state:%d sig:%d\n", | ||
477 | blocks, out_of_time, sleep, preempt, | ||
478 | prev->state, signal_pending(prev)); | ||
479 | if (preempt && entry->linked) | ||
480 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
481 | entry->linked->comm, entry->linked->pid); | ||
482 | |||
483 | /* If a task blocks we have no choice but to reschedule. | ||
484 | */ | ||
485 | if (blocks) | ||
486 | unlink(entry->scheduled); | ||
487 | |||
488 | |||
489 | /* Any task that is preemptable and either exhausts its execution | ||
490 | * budget or wants to sleep completes. We may have to reschedule after | ||
491 | * this. Don't do a job completion if we block (can't have timers | ||
492 | * running for blocked jobs). Preemptions go first for the same reason. | ||
493 | */ | ||
494 | if ((out_of_time || sleep) && !blocks && !preempt) { | ||
495 | if (job_completion(entry, !sleep)) { | ||
496 | /* Task might stay with us. | ||
497 | * Drop locks and check for preemptions. | ||
498 | */ | ||
499 | spin_unlock(&ghqedf_cpu_lock); | ||
500 | /* anything to update ? */ | ||
501 | check_for_preemptions(); | ||
502 | spin_lock(&ghqedf_cpu_lock); | ||
503 | /* if something higher priority got linked, | ||
504 | * then we need to add the task into the | ||
505 | * ready queue (since it wasn't added by | ||
506 | * check_for_preemptions b/c picked==1. | ||
507 | */ | ||
508 | if (entry->linked != prev) | ||
509 | add_to_ready_queue(prev); | ||
510 | } | ||
511 | } | ||
512 | |||
513 | /* Link pending task if we became unlinked. | ||
514 | * NOTE: Do not hold locks while performing ready queue updates | ||
515 | * since we want concurrent access to the queue. | ||
516 | */ | ||
517 | if (!entry->linked) { | ||
518 | if (exists) | ||
519 | /* We are committed to descheduling; erase marker | ||
520 | * before we drop the lock. | ||
521 | */ | ||
522 | tsk_rt(prev)->scheduled_on = NO_CPU; | ||
523 | spin_unlock(&ghqedf_cpu_lock); | ||
524 | check_for_preemptions(); /* update links */ | ||
525 | spin_lock(&ghqedf_cpu_lock); | ||
526 | } | ||
527 | |||
528 | /* The final scheduling decision. Do we need to switch for some reason? | ||
529 | * If linked is different from scheduled, then select linked as next. | ||
530 | */ | ||
531 | if (entry->linked != entry->scheduled) { | ||
532 | /* Schedule a linked job? */ | ||
533 | if (entry->linked) { | ||
534 | entry->linked->rt_param.scheduled_on = entry->cpu; | ||
535 | entry->picked = 1; | ||
536 | next = entry->linked; | ||
537 | } | ||
538 | if (entry->scheduled) | ||
539 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
540 | } else | ||
541 | /* Only override Linux scheduler if we have a real-time task | ||
542 | * scheduled that needs to continue. | ||
543 | */ | ||
544 | if (exists) | ||
545 | next = prev; | ||
546 | |||
547 | spin_unlock(&ghqedf_cpu_lock); | ||
548 | if (exists && preempt && !blocks) | ||
549 | /* stick preempted task back into the ready queue */ | ||
550 | ghqedf_job_arrival(prev); | ||
551 | |||
552 | if (next) | ||
553 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
554 | else if (exists && !next) | ||
555 | TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
556 | |||
557 | return next; | ||
558 | } | ||
559 | |||
560 | |||
561 | /* _finish_switch - we just finished the switch away from prev | ||
562 | */ | ||
563 | static void ghqedf_finish_switch(struct task_struct *prev) | ||
564 | { | ||
565 | cpu_entry_t* entry = &__get_cpu_var(ghqedf_cpu_entries); | ||
566 | |||
567 | entry->scheduled = is_realtime(current) ? current : NULL; | ||
568 | TRACE_TASK(prev, "switched away from\n"); | ||
569 | } | ||
570 | |||
571 | |||
572 | /* Prepare a task for running in RT mode | ||
573 | */ | ||
574 | static void ghqedf_task_new(struct task_struct * t, int on_rq, int running) | ||
575 | { | ||
576 | unsigned long flags; | ||
577 | cpu_entry_t* entry; | ||
578 | |||
579 | TRACE("ghqedf: task new %d\n", t->pid); | ||
580 | |||
581 | spin_lock_irqsave(&ghqedf_cpu_lock, flags); | ||
582 | |||
583 | /* setup job params */ | ||
584 | release_at(t, litmus_clock()); | ||
585 | |||
586 | if (running) { | ||
587 | entry = &per_cpu(ghqedf_cpu_entries, task_cpu(t)); | ||
588 | BUG_ON(entry->scheduled); | ||
589 | if (entry->cpu != ghqedf.release_master) { | ||
590 | entry->scheduled = t; | ||
591 | t->rt_param.scheduled_on = task_cpu(t); | ||
592 | } else | ||
593 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
594 | } else { | ||
595 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
596 | ghqedf_job_arrival(t); | ||
597 | } | ||
598 | tsk_rt(t)->linked_on = NO_CPU; | ||
599 | |||
600 | spin_unlock_irqrestore(&ghqedf_cpu_lock, flags); | ||
601 | |||
602 | } | ||
603 | |||
604 | static void ghqedf_task_wake_up(struct task_struct *task) | ||
605 | { | ||
606 | unsigned long flags; | ||
607 | lt_t now; | ||
608 | |||
609 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
610 | |||
611 | spin_lock_irqsave(&ghqedf_cpu_lock, flags); | ||
612 | now = litmus_clock(); | ||
613 | if (is_tardy(task, now)) { | ||
614 | /* new sporadic release */ | ||
615 | release_at(task, now); | ||
616 | sched_trace_task_release(task); | ||
617 | } | ||
618 | spin_unlock_irqrestore(&ghqedf_cpu_lock, flags); | ||
619 | ghqedf_job_arrival(task); | ||
620 | } | ||
621 | |||
622 | static void ghqedf_task_block(struct task_struct *t) | ||
623 | { | ||
624 | TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
625 | } | ||
626 | |||
627 | static void ghqedf_task_exit(struct task_struct * t) | ||
628 | { | ||
629 | unsigned long flags; | ||
630 | |||
631 | /* unlink if necessary */ | ||
632 | spin_lock_irqsave(&ghqedf_cpu_lock, flags); | ||
633 | /* remove from CPU state, if necessary */ | ||
634 | unlink(t); | ||
635 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
636 | ghqedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
637 | tsk_rt(t)->scheduled_on = NO_CPU; | ||
638 | } else { | ||
639 | /* FIXME: If t is currently queued, then we need to | ||
640 | * dequeue it now; otherwise it will probably | ||
641 | * cause a crash once it is dequeued. | ||
642 | */ | ||
643 | TRACE_TASK(t, "called ghqedf_task_exit(), " | ||
644 | "but is not scheduled!\n"); | ||
645 | } | ||
646 | spin_unlock_irqrestore(&ghqedf_cpu_lock, flags); | ||
647 | |||
648 | TRACE_TASK(t, "RIP\n"); | ||
649 | } | ||
650 | |||
651 | static long ghqedf_admit_task(struct task_struct* tsk) | ||
652 | { | ||
653 | return 0; | ||
654 | } | ||
655 | |||
656 | |||
657 | static long ghqedf_activate_plugin(void) | ||
658 | { | ||
659 | int cpu; | ||
660 | cpu_entry_t *entry; | ||
661 | |||
662 | heap_init(&ghqedf_cpu_heap); | ||
663 | subqueues_init(); | ||
664 | ghqedf.release_master = atomic_read(&release_master_cpu); | ||
665 | |||
666 | for_each_online_cpu(cpu) { | ||
667 | entry = &per_cpu(ghqedf_cpu_entries, cpu); | ||
668 | heap_node_init(&entry->hn, entry); | ||
669 | entry->linked = NULL; | ||
670 | entry->scheduled = NULL; | ||
671 | entry->picked = 0; | ||
672 | if (cpu != ghqedf.release_master) { | ||
673 | TRACE("G-EDF: Initializing CPU #%d.\n", cpu); | ||
674 | update_cpu_position(entry); | ||
675 | } else { | ||
676 | TRACE("G-EDF: CPU %d is release master.\n", cpu); | ||
677 | } | ||
678 | } | ||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | |||
683 | /* Plugin object */ | ||
684 | static struct sched_plugin ghqedf_plugin __cacheline_aligned_in_smp = { | ||
685 | .plugin_name = "GHQ-EDF", | ||
686 | .finish_switch = ghqedf_finish_switch, | ||
687 | .tick = ghqedf_tick, | ||
688 | .task_new = ghqedf_task_new, | ||
689 | .complete_job = complete_job, | ||
690 | .task_exit = ghqedf_task_exit, | ||
691 | .schedule = ghqedf_schedule, | ||
692 | .task_wake_up = ghqedf_task_wake_up, | ||
693 | .task_block = ghqedf_task_block, | ||
694 | .admit_task = ghqedf_admit_task, | ||
695 | .activate_plugin = ghqedf_activate_plugin, | ||
696 | }; | ||
697 | |||
698 | |||
699 | static int __init init_ghqedf(void) | ||
700 | { | ||
701 | int cpu; | ||
702 | cpu_entry_t *entry; | ||
703 | |||
704 | /* initialize CPU state */ | ||
705 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
706 | entry = &per_cpu(ghqedf_cpu_entries, cpu); | ||
707 | ghqedf_cpus[cpu] = entry; | ||
708 | entry->cpu = cpu; | ||
709 | entry->hn = &ghqedf_heap_node[cpu]; | ||
710 | heap_node_init(&entry->hn, entry); | ||
711 | } | ||
712 | edf_domain_init(&ghqedf, NULL, ghqedf_release_jobs); | ||
713 | return register_sched_plugin(&ghqedf_plugin); | ||
714 | } | ||
715 | |||
716 | |||
717 | module_init(init_ghqedf); | ||