aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Calandrino <jmc@cs.unc.edu>2008-07-14 13:23:42 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2008-07-14 13:23:42 -0400
commit7310547d72e6d0f7096a8809e4fd0c104543ad4e (patch)
tree0be0b12f96445c89082952335a287090bebac135
parent9b070bc6b676c62d4988da8699f0a731e733b318 (diff)
PFAIR: Add support for sporad task releases.
-rwxr-xr-xlitmus/sched_pfair.c89
1 files changed, 54 insertions, 35 deletions
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index 6f95688508..beb303c352 100755
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -48,6 +48,7 @@ struct pfair_param {
48 int last_cpu; /* where scheduled last */ 48 int last_cpu; /* where scheduled last */
49 49
50 unsigned int present; /* Can the task be scheduled? */ 50 unsigned int present; /* Can the task be scheduled? */
51 unsigned int sporadic_release; /* On wakeup, new sporadic release? */
51 52
52 struct subtask subtasks[0]; /* allocate together with pfair_param */ 53 struct subtask subtasks[0]; /* allocate together with pfair_param */
53}; 54};
@@ -269,6 +270,32 @@ static void check_preempt(struct task_struct* t)
269 } 270 }
270} 271}
271 272
273/* caller must hold pfair_lock */
274static void drop_all_references(struct task_struct *t)
275{
276 int cpu;
277 struct pfair_state* s;
278 struct heap* q;
279 if (heap_node_in_heap(tsk_rt(t)->heap_node)) {
280 /* figure out what queue the node is in */
281 if (time_before_eq(cur_release(t), merge_time))
282 q = &pfair.ready_queue;
283 else
284 q = relq(cur_release(t));
285 heap_delete(pfair_ready_order, q,
286 tsk_rt(t)->heap_node);
287 }
288 for (cpu = 0; cpu < NR_CPUS; cpu++) {
289 s = &per_cpu(pfair_state, cpu);
290 if (s->linked == t)
291 s->linked = NULL;
292 if (s->local == t)
293 s->local = NULL;
294 if (s->scheduled == t)
295 s->scheduled = NULL;
296 }
297}
298
272/* returns 1 if the task needs to go the release queue */ 299/* returns 1 if the task needs to go the release queue */
273static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) 300static int advance_subtask(quanta_t time, struct task_struct* t, int cpu)
274{ 301{
@@ -279,10 +306,17 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu)
279 cpu, 306 cpu,
280 p->cur); 307 p->cur);
281 if (!p->cur) { 308 if (!p->cur) {
282 /* we start a new job */
283 get_rt_flags(t) = RT_F_RUNNING;
284 prepare_for_next_period(t); 309 prepare_for_next_period(t);
285 p->release += p->period; 310 if (tsk_pfair(t)->present) {
311 /* we start a new job */
312 get_rt_flags(t) = RT_F_RUNNING;
313 p->release += p->period;
314 } else {
315 /* remove task from system until it wakes */
316 drop_all_references(t);
317 tsk_pfair(t)->sporadic_release = 1;
318 return 0;
319 }
286 } 320 }
287 return time_after(cur_release(t), time); 321 return time_after(cur_release(t), time);
288} 322}
@@ -524,6 +558,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running)
524 558
525 prepare_release(t, pfair_time + 1); 559 prepare_release(t, pfair_time + 1);
526 tsk_pfair(t)->present = running; 560 tsk_pfair(t)->present = running;
561 tsk_pfair(t)->sporadic_release = 0;
527 pfair_add_release(t); 562 pfair_add_release(t);
528 check_preempt(t); 563 check_preempt(t);
529 564
@@ -542,13 +577,17 @@ static void pfair_task_wake_up(struct task_struct *t)
542 tsk_pfair(t)->present = 1; 577 tsk_pfair(t)->present = 1;
543 578
544 /* It is a little unclear how to deal with Pfair 579 /* It is a little unclear how to deal with Pfair
545 * tasks that block for a while and then wake. 580 * tasks that block for a while and then wake. For now,
546 * For now, we assume that such suspensions are included 581 * if a task blocks and wakes before its next job release,
547 * in the stated execution time of the task, and thus 582 * then it may resume if it is currently linked somewhere
548 * count as execution time for our purposes. Thus, if the 583 * (as if it never blocked at all). Otherwise, we have a
549 * task is currently linked somewhere, it may resume, otherwise 584 * new sporadic job release.
550 * it has to wait for its next quantum allocation.
551 */ 585 */
586 if (tsk_pfair(t)->sporadic_release) {
587 prepare_release(t, pfair_time + 1);
588 pfair_add_release(t);
589 tsk_pfair(t)->sporadic_release = 0;
590 }
552 591
553 check_preempt(t); 592 check_preempt(t);
554 593
@@ -562,32 +601,6 @@ static void pfair_task_block(struct task_struct *t)
562 (lt_t) jiffies, t->state); 601 (lt_t) jiffies, t->state);
563} 602}
564 603
565/* caller must hold pfair_lock */
566static void drop_all_references(struct task_struct *t)
567{
568 int cpu;
569 struct pfair_state* s;
570 struct heap* q;
571 if (heap_node_in_heap(tsk_rt(t)->heap_node)) {
572 /* figure out what queue the node is in */
573 if (time_before_eq(cur_release(t), merge_time))
574 q = &pfair.ready_queue;
575 else
576 q = relq(cur_release(t));
577 heap_delete(pfair_ready_order, q,
578 tsk_rt(t)->heap_node);
579 }
580 for (cpu = 0; cpu < NR_CPUS; cpu++) {
581 s = &per_cpu(pfair_state, cpu);
582 if (s->linked == t)
583 s->linked = NULL;
584 if (s->local == t)
585 s->local = NULL;
586 if (s->scheduled == t)
587 s->scheduled = NULL;
588 }
589}
590
591static void pfair_task_exit(struct task_struct * t) 604static void pfair_task_exit(struct task_struct * t)
592{ 605{
593 unsigned long flags; 606 unsigned long flags;
@@ -634,6 +647,12 @@ static void pfair_release_at(struct task_struct* task, lt_t start)
634 drop_all_references(task); 647 drop_all_references(task);
635 prepare_release(task, release); 648 prepare_release(task, release);
636 pfair_add_release(task); 649 pfair_add_release(task);
650
651 /* Clear sporadic release flag, since this release subsumes any
652 * sporadic release on wake.
653 */
654 tsk_pfair(task)->sporadic_release = 0;
655
637 spin_unlock_irqrestore(&pfair_lock, flags); 656 spin_unlock_irqrestore(&pfair_lock, flags);
638} 657}
639 658