diff options
Diffstat (limited to 'litmus/sched_cedf.c')
-rw-r--r-- | litmus/sched_cedf.c | 54 |
1 files changed, 22 insertions, 32 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 1feb2fbe42bc..dd64211a1402 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -240,26 +240,6 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
240 | sched = &per_cpu(cedf_cpu_entries, on_cpu); | 240 | sched = &per_cpu(cedf_cpu_entries, on_cpu); |
241 | 241 | ||
242 | BUG_ON(sched->linked == linked); | 242 | BUG_ON(sched->linked == linked); |
243 | #if 0 | ||
244 | /* this should only happen if not linked already */ | ||
245 | if (sched->linked == linked) { | ||
246 | printk(KERN_EMERG "%s/%d hit sched bug. is_aux = %d; has_aux = %d\n", | ||
247 | linked->comm, linked->pid, tsk_rt(linked)->is_aux_task, tsk_rt(linked)->has_aux_tasks); | ||
248 | printk(KERN_EMERG "eCPU: %d, eLinked: %s/%d, sCPU: %d, sched: %s/%d\n", | ||
249 | entry->cpu, | ||
250 | (entry->linked) ? | ||
251 | entry->linked->comm : "nil", | ||
252 | (entry->linked) ? | ||
253 | entry->linked->pid : -1, | ||
254 | sched->cpu, | ||
255 | (sched->linked) ? | ||
256 | sched->linked->comm : "nil", | ||
257 | (sched->linked) ? | ||
258 | sched->linked->pid : -1); | ||
259 | TRACE_TASK(linked, "LINK BUG!\n"); | ||
260 | unlink(linked); | ||
261 | } | ||
262 | #endif | ||
263 | 243 | ||
264 | /* If we are already scheduled on the CPU to which we | 244 | /* If we are already scheduled on the CPU to which we |
265 | * wanted to link, we don't need to do the swap -- | 245 | * wanted to link, we don't need to do the swap -- |
@@ -295,11 +275,9 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
295 | */ | 275 | */ |
296 | static noinline void unlink(struct task_struct* t) | 276 | static noinline void unlink(struct task_struct* t) |
297 | { | 277 | { |
298 | cpu_entry_t *entry; | ||
299 | |||
300 | if (t->rt_param.linked_on != NO_CPU) { | 278 | if (t->rt_param.linked_on != NO_CPU) { |
301 | /* unlink */ | 279 | /* unlink */ |
302 | entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); | 280 | cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); |
303 | t->rt_param.linked_on = NO_CPU; | 281 | t->rt_param.linked_on = NO_CPU; |
304 | link_task_to_cpu(NULL, entry); | 282 | link_task_to_cpu(NULL, entry); |
305 | } else if (is_queued(t)) { | 283 | } else if (is_queued(t)) { |
@@ -338,7 +316,7 @@ static noinline void requeue(struct task_struct* task) | |||
338 | 316 | ||
339 | if (is_released(task, litmus_clock())) | 317 | if (is_released(task, litmus_clock())) |
340 | #ifdef CONFIG_REALTIME_AUX_TASKS | 318 | #ifdef CONFIG_REALTIME_AUX_TASKS |
341 | if (unlikely(tsk_rt(task)->is_aux_task && !is_running(task))) { | 319 | if (unlikely(tsk_rt(task)->is_aux_task && task->state != TASK_RUNNING)) { |
342 | /* aux_task probably transitioned to real-time while it was blocked */ | 320 | /* aux_task probably transitioned to real-time while it was blocked */ |
343 | TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid); | 321 | TRACE_CUR("aux task %s/%d is not ready!\n", task->comm, task->pid); |
344 | unlink(task); /* really needed? */ | 322 | unlink(task); /* really needed? */ |
@@ -874,6 +852,15 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
874 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 852 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
875 | entry->linked->comm, entry->linked->pid); | 853 | entry->linked->comm, entry->linked->pid); |
876 | 854 | ||
855 | if (tsk_rt(prev)->is_aux_task && | ||
856 | (prev->state == TASK_INTERRUPTIBLE) && | ||
857 | !blocks) { | ||
858 | TRACE_TASK(prev, "Deferring descheduling of aux task %s/%d.\n", | ||
859 | prev->comm, prev->pid); | ||
860 | next = prev; /* allow prev to continue. */ | ||
861 | goto out_set_state; | ||
862 | } | ||
863 | |||
877 | /* Send the signal that the budget has been exhausted */ | 864 | /* Send the signal that the budget has been exhausted */ |
878 | if (signal_budget) | 865 | if (signal_budget) |
879 | send_sigbudget(entry->scheduled); | 866 | send_sigbudget(entry->scheduled); |
@@ -934,12 +921,17 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
934 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 921 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
935 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | 922 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); |
936 | } | 923 | } |
937 | } else | 924 | } |
925 | else { | ||
938 | /* Only override Linux scheduler if we have a real-time task | 926 | /* Only override Linux scheduler if we have a real-time task |
939 | * scheduled that needs to continue. | 927 | * scheduled that needs to continue. |
940 | */ | 928 | */ |
941 | if (exists) | 929 | if (exists) { |
942 | next = prev; | 930 | next = prev; |
931 | } | ||
932 | } | ||
933 | |||
934 | out_set_state: | ||
943 | 935 | ||
944 | sched_state_task_picked(); | 936 | sched_state_task_picked(); |
945 | raw_spin_unlock(&cluster->cluster_lock); | 937 | raw_spin_unlock(&cluster->cluster_lock); |
@@ -988,6 +980,8 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
988 | /* setup job params */ | 980 | /* setup job params */ |
989 | release_at(t, litmus_clock()); | 981 | release_at(t, litmus_clock()); |
990 | 982 | ||
983 | t->rt_param.linked_on = NO_CPU; | ||
984 | |||
991 | if (running) { | 985 | if (running) { |
992 | entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); | 986 | entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); |
993 | BUG_ON(entry->scheduled); | 987 | BUG_ON(entry->scheduled); |
@@ -1007,15 +1001,11 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running) | |||
1007 | } else { | 1001 | } else { |
1008 | t->rt_param.scheduled_on = NO_CPU; | 1002 | t->rt_param.scheduled_on = NO_CPU; |
1009 | } | 1003 | } |
1010 | t->rt_param.linked_on = NO_CPU; | ||
1011 | 1004 | ||
1012 | if (is_running(t)) { | 1005 | if (is_running(t)) { |
1013 | cedf_job_arrival(t); | 1006 | cedf_job_arrival(t); |
1014 | } | 1007 | } |
1015 | else { | 1008 | |
1016 | TRACE("Deferred job arrival because %d is blocked.\n", t->pid); | ||
1017 | unlink(t); /* needed?? */ | ||
1018 | } | ||
1019 | raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); | 1009 | raw_spin_unlock_irqrestore(&(cluster->cluster_lock), flags); |
1020 | } | 1010 | } |
1021 | 1011 | ||
@@ -1188,7 +1178,7 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1188 | 1178 | ||
1189 | /* If it is scheduled, then we need to reorder the CPU heap. */ | 1179 | /* If it is scheduled, then we need to reorder the CPU heap. */ |
1190 | if (linked_on != NO_CPU) { | 1180 | if (linked_on != NO_CPU) { |
1191 | TRACE_TASK(t, "%s: linked on %d\n", | 1181 | TRACE_TASK(t, "%s: linked on %d\n", |
1192 | __FUNCTION__, linked_on); | 1182 | __FUNCTION__, linked_on); |
1193 | /* Holder is scheduled; need to re-order CPUs. | 1183 | /* Holder is scheduled; need to re-order CPUs. |
1194 | * We can't use heap_decrease() here since | 1184 | * We can't use heap_decrease() here since |