aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2010-09-21 20:54:41 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2010-09-21 20:54:41 -0400
commit16702bd7d7d8e797a4d87834ed861885742cb6c6 (patch)
treea8880673abccfff83948f99f7a902adbf8b60862
parentcf64f69b82e9d641aebdbbd98f468218e41d6105 (diff)
EDF-WM: rip out non-preemptivity checks
-rw-r--r--litmus/sched_edf_wm.c43
1 files changed, 9 insertions, 34 deletions
diff --git a/litmus/sched_edf_wm.c b/litmus/sched_edf_wm.c
index ed8cc24e9fe6..324ca8f28ab7 100644
--- a/litmus/sched_edf_wm.c
+++ b/litmus/sched_edf_wm.c
@@ -128,17 +128,9 @@ static void wm_tick(struct task_struct *t)
128 BUG_ON(is_realtime(t) && t != dom->scheduled); 128 BUG_ON(is_realtime(t) && t != dom->scheduled);
129 129
130 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { 130 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
131 if (!is_np(t)) { 131 set_tsk_need_resched(t);
132 set_tsk_need_resched(t); 132 TRACE_DOM(dom, "budget of %d exhausted in tick\n",
133 TRACE("wm_scheduler_tick: " 133 t->pid);
134 "%d is preemptable "
135 " => FORCE_RESCHED\n", t->pid);
136 } else if (is_user_np(t)) {
137 TRACE("wm_scheduler_tick: "
138 "%d is non-preemptable, "
139 "preemption delayed.\n", t->pid);
140 request_exit_np(t);
141 }
142 } 134 }
143} 135}
144 136
@@ -149,7 +141,7 @@ static struct task_struct* wm_schedule(struct task_struct * prev)
149 struct task_struct* next; 141 struct task_struct* next;
150 142
151 int out_of_time, sleep, preempt, 143 int out_of_time, sleep, preempt,
152 np, exists, blocks, resched; 144 exists, blocks, resched;
153 145
154 raw_spin_lock(&dom->slock); 146 raw_spin_lock(&dom->slock);
155 147
@@ -166,7 +158,6 @@ static struct task_struct* wm_schedule(struct task_struct * prev)
166 out_of_time = exists && 158 out_of_time = exists &&
167 budget_enforced(dom->scheduled) && 159 budget_enforced(dom->scheduled) &&
168 budget_exhausted(dom->scheduled); 160 budget_exhausted(dom->scheduled);
169 np = exists && is_np(dom->scheduled);
170 sleep = exists && get_rt_flags(dom->scheduled) == RT_F_SLEEP; 161 sleep = exists && get_rt_flags(dom->scheduled) == RT_F_SLEEP;
171 preempt = edf_preemption_needed(edf, prev); 162 preempt = edf_preemption_needed(edf, prev);
172 163
@@ -181,17 +172,11 @@ static struct task_struct* wm_schedule(struct task_struct * prev)
181 if (blocks) 172 if (blocks)
182 resched = 1; 173 resched = 1;
183 174
184 /* Request a sys_exit_np() call if we would like to preempt but cannot.
185 * Multiple calls to request_exit_np() don't hurt.
186 */
187 if (np && (out_of_time || preempt || sleep))
188 request_exit_np(dom->scheduled);
189
190 /* Any task that is preemptable and either exhausts its execution 175 /* Any task that is preemptable and either exhausts its execution
191 * budget or wants to sleep completes. We may have to reschedule after 176 * budget or wants to sleep completes. We may have to reschedule after
192 * this. 177 * this.
193 */ 178 */
194 if (!np && (out_of_time || sleep) && !blocks) { 179 if ((out_of_time || sleep) && !blocks) {
195 job_completion(dom->scheduled, !sleep); 180 job_completion(dom->scheduled, !sleep);
196 resched = 1; 181 resched = 1;
197 } 182 }
@@ -201,17 +186,13 @@ static struct task_struct* wm_schedule(struct task_struct * prev)
201 * resched. 186 * resched.
202 */ 187 */
203 next = NULL; 188 next = NULL;
204 if ((!np || blocks) && (resched || !exists)) { 189 if (resched || !exists) {
205 /* Take care of a previously scheduled
206 * job by taking it out of the Linux runqueue.
207 */
208 if (dom->scheduled && !blocks) 190 if (dom->scheduled && !blocks)
209 requeue(dom->scheduled, edf); 191 requeue(dom->scheduled, edf);
210 next = __take_ready(edf); 192 next = __take_ready(edf);
211 } else 193 } else
212 /* Only override Linux scheduler if we have a real-time task 194 /* Only override Linux scheduler if we have a real-time task
213 * scheduled that needs to continue. 195 * scheduled that needs to continue. */
214 */
215 if (exists) 196 if (exists)
216 next = prev; 197 next = prev;
217 198
@@ -269,15 +250,9 @@ static void wm_task_wake_up(struct task_struct *task)
269 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 250 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
270 raw_spin_lock_irqsave(&dom->slock, flags); 251 raw_spin_lock_irqsave(&dom->slock, flags);
271 BUG_ON(is_queued(task)); 252 BUG_ON(is_queued(task));
272 /* We need to take suspensions because of semaphores into 253
273 * account! If a job resumes after being suspended due to acquiring
274 * a semaphore, it should never be treated as a new job release.
275 *
276 * FIXME: This should be done in some more predictable and userspace-controlled way.
277 */
278 now = litmus_clock(); 254 now = litmus_clock();
279 if (is_tardy(task, now) && 255 if (is_tardy(task, now)) {
280 get_rt_flags(task) != RT_F_EXIT_SEM) {
281 /* new sporadic release */ 256 /* new sporadic release */
282 release_at(task, now); 257 release_at(task, now);
283 sched_trace_task_release(task); 258 sched_trace_task_release(task);