aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--litmus/sched_pfair.c18
1 files changed, 3 insertions, 15 deletions
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index 71ce993df5ca..0a64273daa47 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -48,8 +48,6 @@ struct pfair_param {
48 quanta_t last_quantum; /* when scheduled last */ 48 quanta_t last_quantum; /* when scheduled last */
49 int last_cpu; /* where scheduled last */ 49 int last_cpu; /* where scheduled last */
50 50
51 unsigned int sporadic_release; /* On wakeup, new sporadic release? */
52
53 struct pfair_cluster* cluster; /* where this task is scheduled */ 51 struct pfair_cluster* cluster; /* where this task is scheduled */
54 52
55 struct subtask subtasks[0]; /* allocate together with pfair_param */ 53 struct subtask subtasks[0]; /* allocate together with pfair_param */
@@ -334,7 +332,6 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu)
334 } else { 332 } else {
335 /* remove task from system until it wakes */ 333 /* remove task from system until it wakes */
336 drop_all_references(t); 334 drop_all_references(t);
337 tsk_pfair(t)->sporadic_release = 1;
338 TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n", 335 TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n",
339 cpu, p->cur); 336 cpu, p->cur);
340 return 0; 337 return 0;
@@ -658,7 +655,6 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running)
658 t->rt_param.scheduled_on = NO_CPU; 655 t->rt_param.scheduled_on = NO_CPU;
659 656
660 prepare_release(t, cluster->pfair_time + 1); 657 prepare_release(t, cluster->pfair_time + 1);
661 tsk_pfair(t)->sporadic_release = 0;
662 pfair_add_release(cluster, t); 658 pfair_add_release(cluster, t);
663 check_preempt(t); 659 check_preempt(t);
664 660
@@ -678,21 +674,18 @@ static void pfair_task_wake_up(struct task_struct *t)
678 674
679 raw_spin_lock_irqsave(cluster_lock(cluster), flags); 675 raw_spin_lock_irqsave(cluster_lock(cluster), flags);
680 676
681 /* It is a little unclear how to deal with Pfair 677 /* If a task blocks and wakes before its next job release,
682 * tasks that block for a while and then wake. For now,
683 * if a task blocks and wakes before its next job release,
684 * then it may resume if it is currently linked somewhere 678 * then it may resume if it is currently linked somewhere
685 * (as if it never blocked at all). Otherwise, we have a 679 * (as if it never blocked at all). Otherwise, we have a
686 * new sporadic job release. 680 * new sporadic job release.
687 */ 681 */
688 if (tsk_pfair(t)->sporadic_release) { 682 now = litmus_clock();
689 now = litmus_clock(); 683 if (lt_before(get_deadline(t), now)) {
690 release_at(t, now); 684 release_at(t, now);
691 prepare_release(t, time2quanta(now, CEIL)); 685 prepare_release(t, time2quanta(now, CEIL));
692 sched_trace_task_release(t); 686 sched_trace_task_release(t);
693 /* FIXME: race with pfair_time advancing */ 687 /* FIXME: race with pfair_time advancing */
694 pfair_add_release(cluster, t); 688 pfair_add_release(cluster, t);
695 tsk_pfair(t)->sporadic_release = 0;
696 } 689 }
697 690
698 check_preempt(t); 691 check_preempt(t);
@@ -761,11 +754,6 @@ static void pfair_release_at(struct task_struct* task, lt_t start)
761 prepare_release(task, release); 754 prepare_release(task, release);
762 pfair_add_release(cluster, task); 755 pfair_add_release(cluster, task);
763 756
764 /* Clear sporadic release flag, since this release subsumes any
765 * sporadic release on wake.
766 */
767 tsk_pfair(task)->sporadic_release = 0;
768
769 raw_spin_unlock_irqrestore(cluster_lock(cluster), flags); 757 raw_spin_unlock_irqrestore(cluster_lock(cluster), flags);
770} 758}
771 759