diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-04-11 00:15:44 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2011-04-11 00:15:44 -0400 |
commit | db309167b52ea3060567ebd6395781e3a54a4fd7 (patch) | |
tree | 79734fc29b8efa898fcfc487f2bbea75b2b6a83f | |
parent | 1be669427c6e2dddcae5e3e6caabd4de55ca5e87 (diff) |
Fix of blocking while being run by slack
-rw-r--r-- | litmus/sched_edf_hsb.c | 49 |
1 files changed, 35 insertions, 14 deletions
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c index d4869773875d..6d370340bcf9 100644 --- a/litmus/sched_edf_hsb.c +++ b/litmus/sched_edf_hsb.c | |||
@@ -812,18 +812,23 @@ static noinline void unlink(struct task_struct* t) | |||
812 | /* Unlink */ | 812 | /* Unlink */ |
813 | entry = task_linked_entry(t); | 813 | entry = task_linked_entry(t); |
814 | link_task_to_cpu(entry, NULL, NULL, 0); | 814 | link_task_to_cpu(entry, NULL, NULL, 0); |
815 | } else if (is_queued(t) && is_released(t, litmus_clock())) { | 815 | } else if (is_queued(t)) { |
816 | /* This is an interesting situation: t is scheduled, | 816 | |
817 | * but has already been unlinked. It was re-added to | 817 | if (is_released(t, litmus_clock())) { |
818 | * a ready queue of some sort but now needs to | 818 | /* This is an interesting situation: t is scheduled, |
819 | * be removed. This usually happens when a job has | 819 | * but has already been unlinked. It was re-added to |
820 | * been preempted but completes before it is | 820 | * a ready queue of some sort but now needs to |
821 | * descheduled. | 821 | * be removed. This usually happens when a job has |
822 | */ | 822 | * been preempted but completes before it is |
823 | TRACE_TASK_SUB(t, "removing from domain"); | 823 | * descheduled. |
824 | entry = task_sched_entry(t); | 824 | */ |
825 | remove(get_rt_domain(entry, t), t); | 825 | TRACE_TASK_SUB(t, "removing from domain"); |
826 | BUG_ON(is_queued(t)); | 826 | entry = task_sched_entry(t); |
827 | remove(get_rt_domain(entry, t), t); | ||
828 | BUG_ON(is_queued(t)); | ||
829 | }/* else { */ | ||
830 | |||
831 | /* } */ | ||
827 | } | 832 | } |
828 | } | 833 | } |
829 | 834 | ||
@@ -957,6 +962,15 @@ static struct task_struct* next_eligible_srt(void) | |||
957 | struct task_struct *next_srt = __peek_ready(&srt_domain); | 962 | struct task_struct *next_srt = __peek_ready(&srt_domain); |
958 | server_t *srt_server; | 963 | server_t *srt_server; |
959 | 964 | ||
965 | /* A blocking task might pollute the SRT domain if the | ||
966 | * task blocked while it was being run by a slack server. | ||
967 | * Remove and ignore this task. | ||
968 | */ | ||
969 | while (next_srt && !is_running(next_srt)) { | ||
970 | remove(&srt_domain, next_srt); | ||
971 | next_srt = __peek_ready(&srt_domain); | ||
972 | } | ||
973 | |||
960 | /* Catch up srt server. This happens when the job is tardy due | 974 | /* Catch up srt server. This happens when the job is tardy due |
961 | * to overutilization of the system. | 975 | * to overutilization of the system. |
962 | */ | 976 | */ |
@@ -2125,11 +2139,18 @@ static void edf_hsb_task_wake_up(struct task_struct *task) | |||
2125 | unsigned long flags; | 2139 | unsigned long flags; |
2126 | cpu_entry_t *entry = task_sched_entry(task); | 2140 | cpu_entry_t *entry = task_sched_entry(task); |
2127 | 2141 | ||
2128 | BUG_ON(is_queued(task)); | ||
2129 | 2142 | ||
2130 | TRACE_TASK(task, "wake_up at %llu on %d, %d\n", TIME(litmus_clock()), | 2143 | TRACE_TASK(task, "wake_up at %llu on %d, %d\n", TIME(litmus_clock()), |
2131 | task_cpu(task), task->rt_param.task_params.cpu); | 2144 | task_cpu(task), task->rt_param.task_params.cpu); |
2132 | 2145 | ||
2146 | set_rt_flags(task, RT_F_RUNNING); | ||
2147 | |||
2148 | /* The job blocked while it was being run by a slack server */ | ||
2149 | if (is_queued(task)) { | ||
2150 | check_slack_candidate(task); | ||
2151 | return; | ||
2152 | } | ||
2153 | |||
2133 | //BUG_ON(entry->scheduled == task); | 2154 | //BUG_ON(entry->scheduled == task); |
2134 | 2155 | ||
2135 | raw_spin_lock_irqsave(global_lock, flags); | 2156 | raw_spin_lock_irqsave(global_lock, flags); |
@@ -2143,7 +2164,7 @@ static void edf_hsb_task_wake_up(struct task_struct *task) | |||
2143 | * switched from the sporadic to the periodic task model. See | 2164 | * switched from the sporadic to the periodic task model. See |
2144 | * sched_gsn_edf.c for the sporadic version. | 2165 | * sched_gsn_edf.c for the sporadic version. |
2145 | */ | 2166 | */ |
2146 | set_rt_flags(task, RT_F_RUNNING); | 2167 | |
2147 | job_arrival(task, entry); | 2168 | job_arrival(task, entry); |
2148 | 2169 | ||
2149 | raw_spin_unlock_irqrestore(global_lock, flags); | 2170 | raw_spin_unlock_irqrestore(global_lock, flags); |