diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-05-04 21:42:24 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-05-04 21:42:24 -0400 |
commit | d8fecef68f7998d4240a687444aefce2568476ca (patch) | |
tree | 34119f373709067ef65e522a9a052c0676ec63ac | |
parent | 2237cc63c52f8b80633d5ef04aa3b74957a68bab (diff) |
G-EDF: always mark scheduled tasks as being so (bugfix)
If schedule() was invoked twice in a retry, then the scheduled
task might have not been "picked" properly because it was
still scheduled.
-rw-r--r-- | litmus/sched_gedf.c | 11 |
1 files changed, 9 insertions, 2 deletions
diff --git a/litmus/sched_gedf.c b/litmus/sched_gedf.c index 3e544bb159..5e139ae36b 100644 --- a/litmus/sched_gedf.c +++ b/litmus/sched_gedf.c | |||
@@ -134,7 +134,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
134 | } | 134 | } |
135 | entry->linked = linked; | 135 | entry->linked = linked; |
136 | entry->picked = entry == sched; /* set to one if we linked to the | 136 | entry->picked = entry == sched; /* set to one if we linked to the |
137 | * the CPU that the task is | 137 | * the CPU that the task is |
138 | * executing on | 138 | * executing on |
139 | */ | 139 | */ |
140 | if (linked) | 140 | if (linked) |
@@ -425,7 +425,6 @@ static struct task_struct* gedf_schedule(struct task_struct * prev) | |||
425 | /* Schedule a linked job? */ | 425 | /* Schedule a linked job? */ |
426 | if (entry->linked) { | 426 | if (entry->linked) { |
427 | entry->linked->rt_param.scheduled_on = entry->cpu; | 427 | entry->linked->rt_param.scheduled_on = entry->cpu; |
428 | entry->picked = 1; | ||
429 | next = entry->linked; | 428 | next = entry->linked; |
430 | } | 429 | } |
431 | if (entry->scheduled) | 430 | if (entry->scheduled) |
@@ -437,6 +436,14 @@ static struct task_struct* gedf_schedule(struct task_struct * prev) | |||
437 | if (exists) | 436 | if (exists) |
438 | next = prev; | 437 | next = prev; |
439 | 438 | ||
439 | /* Mark entry->linked as being ours. Do this unconditionally since | ||
440 | * entry->linked might have become reassigned to us while we dropped | ||
441 | * the lock even though we never descheduled it. In this case, | ||
442 | * entry->picked became reset. | ||
443 | */ | ||
444 | entry->picked = 1; | ||
445 | if (next) | ||
446 | tsk_rt(next)->scheduled_on = entry->cpu; | ||
440 | spin_unlock(&gedf_cpu_lock); | 447 | spin_unlock(&gedf_cpu_lock); |
441 | if (exists && preempt && !blocks) | 448 | if (exists && preempt && !blocks) |
442 | /* stick preempted task back into the ready queue */ | 449 | /* stick preempted task back into the ready queue */ |