diff options
Diffstat (limited to 'litmus/sched_pfair.c')
-rw-r--r-- | litmus/sched_pfair.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index c95bde87b5d7..16f1065bbdca 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c | |||
@@ -98,6 +98,8 @@ struct pfair_cluster { | |||
98 | raw_spinlock_t release_lock; | 98 | raw_spinlock_t release_lock; |
99 | }; | 99 | }; |
100 | 100 | ||
101 | #define RT_F_REQUEUE 0x2 | ||
102 | |||
101 | static inline struct pfair_cluster* cpu_cluster(struct pfair_state* state) | 103 | static inline struct pfair_cluster* cpu_cluster(struct pfair_state* state) |
102 | { | 104 | { |
103 | return container_of(state->topology.cluster, struct pfair_cluster, topology); | 105 | return container_of(state->topology.cluster, struct pfair_cluster, topology); |
@@ -288,6 +290,8 @@ static void drop_all_references(struct task_struct *t) | |||
288 | if (s->scheduled == t) | 290 | if (s->scheduled == t) |
289 | s->scheduled = NULL; | 291 | s->scheduled = NULL; |
290 | } | 292 | } |
293 | /* make sure we don't have a stale linked_on field */ | ||
294 | tsk_rt(t)->linked_on = NO_CPU; | ||
291 | } | 295 | } |
292 | 296 | ||
293 | static void pfair_prepare_next_period(struct task_struct* t) | 297 | static void pfair_prepare_next_period(struct task_struct* t) |
@@ -312,6 +316,7 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) | |||
312 | } else { | 316 | } else { |
313 | /* remove task from system until it wakes */ | 317 | /* remove task from system until it wakes */ |
314 | drop_all_references(t); | 318 | drop_all_references(t); |
319 | tsk_rt(t)->flags = RT_F_REQUEUE; | ||
315 | TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n", | 320 | TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n", |
316 | cpu, p->cur); | 321 | cpu, p->cur); |
317 | return 0; | 322 | return 0; |
@@ -688,6 +693,7 @@ static void pfair_task_wake_up(struct task_struct *t) | |||
688 | { | 693 | { |
689 | unsigned long flags; | 694 | unsigned long flags; |
690 | lt_t now; | 695 | lt_t now; |
696 | int requeue = 0; | ||
691 | struct pfair_cluster* cluster; | 697 | struct pfair_cluster* cluster; |
692 | 698 | ||
693 | cluster = tsk_pfair(t)->cluster; | 699 | cluster = tsk_pfair(t)->cluster; |
@@ -702,16 +708,21 @@ static void pfair_task_wake_up(struct task_struct *t) | |||
702 | * (as if it never blocked at all). Otherwise, we have a | 708 | * (as if it never blocked at all). Otherwise, we have a |
703 | * new sporadic job release. | 709 | * new sporadic job release. |
704 | */ | 710 | */ |
711 | requeue = tsk_rt(t)->flags == RT_F_REQUEUE; | ||
705 | now = litmus_clock(); | 712 | now = litmus_clock(); |
706 | if (lt_before(get_deadline(t), now)) { | 713 | if (lt_before(get_deadline(t), now)) { |
714 | TRACE_TASK(t, "sporadic release!\n"); | ||
707 | release_at(t, now); | 715 | release_at(t, now); |
708 | prepare_release(t, time2quanta(now, CEIL)); | 716 | prepare_release(t, time2quanta(now, CEIL)); |
709 | sched_trace_task_release(t); | 717 | sched_trace_task_release(t); |
710 | } | 718 | } |
711 | 719 | ||
712 | /* only add to ready queue if the task isn't still linked somewhere */ | 720 | /* only add to ready queue if the task isn't still linked somewhere */ |
713 | if (tsk_rt(t)->linked_on == NO_CPU) | 721 | if (requeue) { |
722 | TRACE_TASK(t, "requeueing required\n"); | ||
723 | tsk_rt(t)->flags = RT_F_RUNNING; | ||
714 | __add_ready(&cluster->pfair, t); | 724 | __add_ready(&cluster->pfair, t); |
725 | } | ||
715 | 726 | ||
716 | check_preempt(t); | 727 | check_preempt(t); |
717 | 728 | ||