aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--litmus/sched_psn_edf.c52
1 files changed, 41 insertions, 11 deletions
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 3a93124e24f6..4829653b8f76 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -26,6 +26,13 @@ typedef struct {
26 int cpu; 26 int cpu;
27 struct task_struct* scheduled; /* only RT tasks */ 27 struct task_struct* scheduled; /* only RT tasks */
28/* 28/*
29 * A block event (mainly during synchronous release) may race
30 * with other releases or with triggering of the schedule() function.
31 * We need to trace a previous blocking event in the first execution of
32 * schedule()
33 */
34 int block;
35/*
29 * scheduling lock slock 36 * scheduling lock slock
30 * protects the domain and serializes scheduling decisions 37 * protects the domain and serializes scheduling decisions
31 */ 38 */
@@ -51,6 +58,7 @@ static void psnedf_domain_init(psnedf_domain_t* pedf,
51 edf_domain_init(&pedf->domain, check, release); 58 edf_domain_init(&pedf->domain, check, release);
52 pedf->cpu = cpu; 59 pedf->cpu = cpu;
53 pedf->scheduled = NULL; 60 pedf->scheduled = NULL;
61 pedf->block = 0;
54} 62}
55 63
56static void requeue(struct task_struct* t, rt_domain_t *edf) 64static void requeue(struct task_struct* t, rt_domain_t *edf)
@@ -140,22 +148,36 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
140 148
141 /* (0) Determine state */ 149 /* (0) Determine state */
142 exists = pedf->scheduled != NULL; 150 exists = pedf->scheduled != NULL;
143 blocks = exists && !is_running(pedf->scheduled); 151 /* a task may block if 1) the task exists but is no more running
152 * 2) the task blocked and this schedule is racing with it while
153 * it is going to sleep. In this case pedf->schedule != prev (and we
154 * already reset pedf->schedule to NULL), but nonetheless we want to
155 * pick a real-time task as next (if such a task exists in the ready
156 * queue).
157 */
158 blocks = pedf->block || (exists && !is_running(pedf->scheduled));
144 out_of_time = exists && budget_exhausted(pedf->scheduled); 159 out_of_time = exists && budget_exhausted(pedf->scheduled);
145 np = exists && is_np(pedf->scheduled); 160 np = exists && is_np(pedf->scheduled);
146 sleep = exists && get_rt_flags(pedf->scheduled) == RT_F_SLEEP; 161 sleep = exists && get_rt_flags(pedf->scheduled) == RT_F_SLEEP;
147 preempt = edf_preemption_needed(edf, prev); 162 preempt = edf_preemption_needed(edf, prev);
148 163
149 /* If we need to preempt do so.
150 * The following checks set resched to 1 in case of special
151 * circumstances.
152 */
153 resched = preempt;
154 164
155 /* If a task blocks we have no choice but to reschedule. 165 /* If a task blocks we have no choice but to reschedule.
156 */ 166 */
157 if (blocks) 167 if (blocks) {
168
158 resched = 1; 169 resched = 1;
170 /* reset the block flag, we are about to reschedule */
171 pedf->block = 0;
172 } else {
173
174 /* If we need to preempt do so.
175 * The following checks set resched to 1 in case of special
176 * circumstances.
177 */
178 resched = preempt;
179 }
180
159 181
160 /* Request a sys_exit_np() call if we would like to preempt but cannot. 182 /* Request a sys_exit_np() call if we would like to preempt but cannot.
161 * Multiple calls to request_exit_np() don't hurt. 183 * Multiple calls to request_exit_np() don't hurt.
@@ -266,6 +288,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
266static void psnedf_task_block(struct task_struct *t) 288static void psnedf_task_block(struct task_struct *t)
267{ 289{
268 psnedf_domain_t *pedf = task_pedf(t); 290 psnedf_domain_t *pedf = task_pedf(t);
291 unsigned long flags;
269 292
270 /* only running tasks can block, thus t is in no queue */ 293 /* only running tasks can block, thus t is in no queue */
271 TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state); 294 TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state);
@@ -273,12 +296,18 @@ static void psnedf_task_block(struct task_struct *t)
273 BUG_ON(!is_realtime(t)); 296 BUG_ON(!is_realtime(t));
274 BUG_ON(is_queued(t)); 297 BUG_ON(is_queued(t));
275 298
276 /* if this task is dead, then we need to reset pedf->schedule now 299 /* if this task is no more runnable, then we need to reset pedf->schedule
277 * as we might get rescheduled before task_exit executes 300 * and set the block flag as another schedule() may race wotj is while
301 * we are going to sleep
278 */ 302 */
279 if(unlikely(t->state == TASK_DEAD)) { 303 if(likely(t->state != TASK_RUNNING)) {
280 TRACE_TASK(t, "Dead, setting scheduled = NULL\n"); 304
305 TRACE_TASK(t, "psnedf_task_block, setting block flag\n");
306
307 spin_lock_irqsave(&pedf->slock, flags);
308 pedf->block = 1;
281 pedf->scheduled = NULL; 309 pedf->scheduled = NULL;
310 spin_unlock_irqrestore(&pedf->slock, flags);
282 } 311 }
283} 312}
284 313
@@ -347,6 +376,7 @@ static long psnedf_pi_block(struct pi_semaphore *sem,
347 376
348 spin_unlock(&pedf->slock); 377 spin_unlock(&pedf->slock);
349 } 378 }
379 TRACE_TASK(sem->holder, "psnedf_pi_block\n");
350 380
351 return 0; 381 return 0;
352} 382}