aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorManohar Vanga <mvanga@mpi-sws.org>2012-10-03 18:30:51 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2012-11-27 13:50:17 -0500
commitb7012aa7edba4b88906fc39b9005ff4dae69be59 (patch)
tree5176a5e920adcb4158d67778b2cdecb1e3cdc4c0
parente99428ce6786f76b64cfea5da814a7a5cd939de3 (diff)
litmus: get rid of RT_F_SLEEP and RT_F_RUNNING
This patch removes the flags RT_F_SLEEP and RT_F_RUNNING as their name is misleading. This patch replaces them with a 'completed' field in struct rt_param. Signed-off-by: Manohar Vanga <mvanga@mpi-sws.org>
-rw-r--r--include/litmus/litmus.h5
-rw-r--r--include/litmus/rt_param.h5
-rw-r--r--litmus/jobs.c4
-rw-r--r--litmus/sched_cedf.c12
-rw-r--r--litmus/sched_gsn_edf.c14
-rw-r--r--litmus/sched_pfair.c8
-rw-r--r--litmus/sched_pfp.c8
-rw-r--r--litmus/sched_psn_edf.c8
8 files changed, 35 insertions, 29 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 1cb3eaf25740..12770e08990b 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -232,6 +232,11 @@ static inline int is_present(struct task_struct* t)
232 return t && tsk_rt(t)->present; 232 return t && tsk_rt(t)->present;
233} 233}
234 234
235static inline int is_completed(struct task_struct* t)
236{
237 return t && tsk_rt(t)->completed;
238}
239
235 240
236/* make the unit explicit */ 241/* make the unit explicit */
237typedef unsigned long quanta_t; 242typedef unsigned long quanta_t;
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 8d9aa07cf324..5539b26588e6 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -160,6 +160,9 @@ struct rt_param {
160 /* is the task present? (true if it can be scheduled) */ 160 /* is the task present? (true if it can be scheduled) */
161 unsigned int present:1; 161 unsigned int present:1;
162 162
163 /* has the task completed? */
164 unsigned int completed:1;
165
163#ifdef CONFIG_LITMUS_LOCKING 166#ifdef CONFIG_LITMUS_LOCKING
164 /* Is the task being priority-boosted by a locking protocol? */ 167 /* Is the task being priority-boosted by a locking protocol? */
165 unsigned int priority_boosted:1; 168 unsigned int priority_boosted:1;
@@ -246,8 +249,6 @@ struct rt_param {
246}; 249};
247 250
248/* Possible RT flags */ 251/* Possible RT flags */
249#define RT_F_RUNNING 0x00000000
250#define RT_F_SLEEP 0x00000001
251#define RT_F_EXIT_SEM 0x00000008 252#define RT_F_EXIT_SEM 0x00000008
252 253
253#endif 254#endif
diff --git a/litmus/jobs.c b/litmus/jobs.c
index fb093c03d53d..13a4ed4c9e93 100644
--- a/litmus/jobs.c
+++ b/litmus/jobs.c
@@ -38,7 +38,7 @@ void release_at(struct task_struct *t, lt_t start)
38{ 38{
39 BUG_ON(!t); 39 BUG_ON(!t);
40 setup_release(t, start); 40 setup_release(t, start);
41 set_rt_flags(t, RT_F_RUNNING); 41 tsk_rt(t)->completed = 0;
42} 42}
43 43
44 44
@@ -48,7 +48,7 @@ void release_at(struct task_struct *t, lt_t start)
48long complete_job(void) 48long complete_job(void)
49{ 49{
50 /* Mark that we do not excute anymore */ 50 /* Mark that we do not excute anymore */
51 set_rt_flags(current, RT_F_SLEEP); 51 tsk_rt(current)->completed = 1;
52 /* call schedule, this will return when a new job arrives 52 /* call schedule, this will return when a new job arrives
53 * it also takes care of preparing for the next release 53 * it also takes care of preparing for the next release
54 */ 54 */
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index b0c16e34d2c5..62d28d2bf1c8 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -171,7 +171,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
171 171
172 /* Link new task to CPU. */ 172 /* Link new task to CPU. */
173 if (linked) { 173 if (linked) {
174 set_rt_flags(linked, RT_F_RUNNING); 174 tsk_rt(linked)->completed = 0;
175 /* handle task is already scheduled somewhere! */ 175 /* handle task is already scheduled somewhere! */
176 on_cpu = linked->rt_param.scheduled_on; 176 on_cpu = linked->rt_param.scheduled_on;
177 if (on_cpu != NO_CPU) { 177 if (on_cpu != NO_CPU) {
@@ -350,7 +350,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
350 TRACE_TASK(t, "job_completion().\n"); 350 TRACE_TASK(t, "job_completion().\n");
351 351
352 /* set flags */ 352 /* set flags */
353 set_rt_flags(t, RT_F_SLEEP); 353 tsk_rt(t)->completed = 1;
354 /* prepare for next period */ 354 /* prepare for next period */
355 prepare_for_next_period(t); 355 prepare_for_next_period(t);
356 if (is_released(t, litmus_clock())) 356 if (is_released(t, litmus_clock()))
@@ -404,7 +404,7 @@ static void cedf_tick(struct task_struct* t)
404 * 404 *
405 * - !is_running(scheduled) // the job blocks 405 * - !is_running(scheduled) // the job blocks
406 * - scheduled->timeslice == 0 // the job completed (forcefully) 406 * - scheduled->timeslice == 0 // the job completed (forcefully)
407 * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) 407 * - is_completed() // the job completed (by syscall)
408 * - linked != scheduled // we need to reschedule (for any reason) 408 * - linked != scheduled // we need to reschedule (for any reason)
409 * - is_np(scheduled) // rescheduling must be delayed, 409 * - is_np(scheduled) // rescheduling must be delayed,
410 * sys_exit_np must be requested 410 * sys_exit_np must be requested
@@ -443,7 +443,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
443 budget_enforced(entry->scheduled) && 443 budget_enforced(entry->scheduled) &&
444 budget_exhausted(entry->scheduled); 444 budget_exhausted(entry->scheduled);
445 np = exists && is_np(entry->scheduled); 445 np = exists && is_np(entry->scheduled);
446 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; 446 sleep = exists && is_completed(entry->scheduled);
447 preempt = entry->scheduled != entry->linked; 447 preempt = entry->scheduled != entry->linked;
448 448
449#ifdef WANT_ALL_SCHED_EVENTS 449#ifdef WANT_ALL_SCHED_EVENTS
@@ -600,7 +600,7 @@ static void cedf_task_wake_up(struct task_struct *task)
600 * a semaphore, it should never be treated as a new job release. 600 * a semaphore, it should never be treated as a new job release.
601 */ 601 */
602 if (get_rt_flags(task) == RT_F_EXIT_SEM) { 602 if (get_rt_flags(task) == RT_F_EXIT_SEM) {
603 set_rt_flags(task, RT_F_RUNNING); 603 tsk_rt(task)->completed = 0;
604 } else { 604 } else {
605 now = litmus_clock(); 605 now = litmus_clock();
606 if (is_tardy(task, now)) { 606 if (is_tardy(task, now)) {
@@ -612,7 +612,7 @@ static void cedf_task_wake_up(struct task_struct *task)
612 if (task->rt.time_slice) { 612 if (task->rt.time_slice) {
613 /* came back in time before deadline 613 /* came back in time before deadline
614 */ 614 */
615 set_rt_flags(task, RT_F_RUNNING); 615 tsk_rt(task)->completed = 0;
616 } 616 }
617 } 617 }
618 } 618 }
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index c3344b9d288f..990e4e1f86a9 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -44,7 +44,7 @@
44 * (thereby removing its association with this 44 * (thereby removing its association with this
45 * CPU). However, it will not requeue the 45 * CPU). However, it will not requeue the
46 * previously linked task (if any). It will set 46 * previously linked task (if any). It will set
47 * T's state to RT_F_RUNNING and check whether 47 * T's state to not completed and check whether
48 * it is already running somewhere else. If T 48 * it is already running somewhere else. If T
49 * is scheduled somewhere else it will link 49 * is scheduled somewhere else it will link
50 * it to that CPU instead (and pull the linked 50 * it to that CPU instead (and pull the linked
@@ -173,7 +173,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
173 173
174 /* Link new task to CPU. */ 174 /* Link new task to CPU. */
175 if (linked) { 175 if (linked) {
176 set_rt_flags(linked, RT_F_RUNNING); 176 tsk_rt(linked)->completed = 0;
177 /* handle task is already scheduled somewhere! */ 177 /* handle task is already scheduled somewhere! */
178 on_cpu = linked->rt_param.scheduled_on; 178 on_cpu = linked->rt_param.scheduled_on;
179 if (on_cpu != NO_CPU) { 179 if (on_cpu != NO_CPU) {
@@ -341,7 +341,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
341 TRACE_TASK(t, "job_completion().\n"); 341 TRACE_TASK(t, "job_completion().\n");
342 342
343 /* set flags */ 343 /* set flags */
344 set_rt_flags(t, RT_F_SLEEP); 344 tsk_rt(t)->completed = 1;
345 /* prepare for next period */ 345 /* prepare for next period */
346 prepare_for_next_period(t); 346 prepare_for_next_period(t);
347 if (is_released(t, litmus_clock())) 347 if (is_released(t, litmus_clock()))
@@ -394,7 +394,7 @@ static void gsnedf_tick(struct task_struct* t)
394 * 394 *
395 * - !is_running(scheduled) // the job blocks 395 * - !is_running(scheduled) // the job blocks
396 * - scheduled->timeslice == 0 // the job completed (forcefully) 396 * - scheduled->timeslice == 0 // the job completed (forcefully)
397 * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) 397 * - is_completed() // the job completed (by syscall)
398 * - linked != scheduled // we need to reschedule (for any reason) 398 * - linked != scheduled // we need to reschedule (for any reason)
399 * - is_np(scheduled) // rescheduling must be delayed, 399 * - is_np(scheduled) // rescheduling must be delayed,
400 * sys_exit_np must be requested 400 * sys_exit_np must be requested
@@ -430,7 +430,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
430 out_of_time = exists && budget_enforced(entry->scheduled) 430 out_of_time = exists && budget_enforced(entry->scheduled)
431 && budget_exhausted(entry->scheduled); 431 && budget_exhausted(entry->scheduled);
432 np = exists && is_np(entry->scheduled); 432 np = exists && is_np(entry->scheduled);
433 sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; 433 sleep = exists && is_completed(entry->scheduled);
434 preempt = entry->scheduled != entry->linked; 434 preempt = entry->scheduled != entry->linked;
435 435
436#ifdef WANT_ALL_SCHED_EVENTS 436#ifdef WANT_ALL_SCHED_EVENTS
@@ -582,7 +582,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
582 * a semaphore, it should never be treated as a new job release. 582 * a semaphore, it should never be treated as a new job release.
583 */ 583 */
584 if (get_rt_flags(task) == RT_F_EXIT_SEM) { 584 if (get_rt_flags(task) == RT_F_EXIT_SEM) {
585 set_rt_flags(task, RT_F_RUNNING); 585 tsk_rt(task)->completed = 0;
586 } else { 586 } else {
587 now = litmus_clock(); 587 now = litmus_clock();
588 if (is_tardy(task, now)) { 588 if (is_tardy(task, now)) {
@@ -594,7 +594,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
594 if (task->rt.time_slice) { 594 if (task->rt.time_slice) {
595 /* came back in time before deadline 595 /* came back in time before deadline
596 */ 596 */
597 set_rt_flags(task, RT_F_RUNNING); 597 tsk_rt(task)->completed = 0;
598 } 598 }
599 } 599 }
600 } 600 }
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index 72c06a492ef9..58f7fc9e8363 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -299,7 +299,7 @@ static void pfair_prepare_next_period(struct task_struct* t)
299 struct pfair_param* p = tsk_pfair(t); 299 struct pfair_param* p = tsk_pfair(t);
300 300
301 prepare_for_next_period(t); 301 prepare_for_next_period(t);
302 get_rt_flags(t) = RT_F_RUNNING; 302 tsk_rt(t)->completed = 0;
303 p->release += p->period; 303 p->release += p->period;
304} 304}
305 305
@@ -598,7 +598,7 @@ static int safe_to_schedule(struct task_struct* t, int cpu)
598 "scheduled already on %d.\n", cpu, where); 598 "scheduled already on %d.\n", cpu, where);
599 return 0; 599 return 0;
600 } else 600 } else
601 return tsk_rt(t)->present && get_rt_flags(t) == RT_F_RUNNING; 601 return tsk_rt(t)->present && !is_completed(t);
602} 602}
603 603
604static struct task_struct* pfair_schedule(struct task_struct * prev) 604static struct task_struct* pfair_schedule(struct task_struct * prev)
@@ -621,7 +621,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
621 raw_spin_lock(cpu_lock(state)); 621 raw_spin_lock(cpu_lock(state));
622 622
623 blocks = is_realtime(prev) && !is_running(prev); 623 blocks = is_realtime(prev) && !is_running(prev);
624 completion = is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP; 624 completion = is_realtime(prev) && is_completed(prev);
625 out_of_time = is_realtime(prev) && time_after(cur_release(prev), 625 out_of_time = is_realtime(prev) && time_after(cur_release(prev),
626 state->local_tick); 626 state->local_tick);
627 627
@@ -720,7 +720,7 @@ static void pfair_task_wake_up(struct task_struct *t)
720 /* only add to ready queue if the task isn't still linked somewhere */ 720 /* only add to ready queue if the task isn't still linked somewhere */
721 if (requeue) { 721 if (requeue) {
722 TRACE_TASK(t, "requeueing required\n"); 722 TRACE_TASK(t, "requeueing required\n");
723 tsk_rt(t)->flags = RT_F_RUNNING; 723 tsk_rt(t)->completed = 0;
724 __add_ready(&cluster->pfair, t); 724 __add_ready(&cluster->pfair, t);
725 } 725 }
726 726
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index c1ae4bbd2529..91e52391a173 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -114,7 +114,7 @@ static void requeue(struct task_struct* t, pfp_domain_t *pfp)
114{ 114{
115 BUG_ON(!is_running(t)); 115 BUG_ON(!is_running(t));
116 116
117 set_rt_flags(t, RT_F_RUNNING); 117 tsk_rt(t)->completed = 0;
118 if (is_released(t, litmus_clock())) 118 if (is_released(t, litmus_clock()))
119 fp_prio_add(&pfp->ready_queue, t, priority_index(t)); 119 fp_prio_add(&pfp->ready_queue, t, priority_index(t));
120 else 120 else
@@ -126,7 +126,7 @@ static void job_completion(struct task_struct* t, int forced)
126 sched_trace_task_completion(t,forced); 126 sched_trace_task_completion(t,forced);
127 TRACE_TASK(t, "job_completion().\n"); 127 TRACE_TASK(t, "job_completion().\n");
128 128
129 set_rt_flags(t, RT_F_SLEEP); 129 tsk_rt(t)->completed = 1;
130 prepare_for_next_period(t); 130 prepare_for_next_period(t);
131 if (is_released(t, litmus_clock())) 131 if (is_released(t, litmus_clock()))
132 sched_trace_task_release(t); 132 sched_trace_task_release(t);
@@ -180,7 +180,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
180 budget_enforced(pfp->scheduled) && 180 budget_enforced(pfp->scheduled) &&
181 budget_exhausted(pfp->scheduled); 181 budget_exhausted(pfp->scheduled);
182 np = exists && is_np(pfp->scheduled); 182 np = exists && is_np(pfp->scheduled);
183 sleep = exists && get_rt_flags(pfp->scheduled) == RT_F_SLEEP; 183 sleep = exists && is_completed(pfp->scheduled);
184 migrate = exists && get_partition(pfp->scheduled) != pfp->cpu; 184 migrate = exists && get_partition(pfp->scheduled) != pfp->cpu;
185 preempt = migrate || fp_preemption_needed(&pfp->ready_queue, prev); 185 preempt = migrate || fp_preemption_needed(&pfp->ready_queue, prev);
186 186
@@ -253,7 +253,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
253 253
254 if (next) { 254 if (next) {
255 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); 255 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
256 set_rt_flags(next, RT_F_RUNNING); 256 tsk_rt(next)->completed = 0;
257 } else { 257 } else {
258 TRACE("becoming idle at %llu\n", litmus_clock()); 258 TRACE("becoming idle at %llu\n", litmus_clock());
259 } 259 }
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 8933e15605ae..0e1675d2e572 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -60,7 +60,7 @@ static void requeue(struct task_struct* t, rt_domain_t *edf)
60 if (t->state != TASK_RUNNING) 60 if (t->state != TASK_RUNNING)
61 TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); 61 TRACE_TASK(t, "requeue: !TASK_RUNNING\n");
62 62
63 set_rt_flags(t, RT_F_RUNNING); 63 tsk_rt(t)->completed = 0;
64 if (is_released(t, litmus_clock())) 64 if (is_released(t, litmus_clock()))
65 __add_ready(edf, t); 65 __add_ready(edf, t);
66 else 66 else
@@ -160,7 +160,7 @@ static void job_completion(struct task_struct* t, int forced)
160 sched_trace_task_completion(t,forced); 160 sched_trace_task_completion(t,forced);
161 TRACE_TASK(t, "job_completion().\n"); 161 TRACE_TASK(t, "job_completion().\n");
162 162
163 set_rt_flags(t, RT_F_SLEEP); 163 tsk_rt(t)->completed = 1;
164 prepare_for_next_period(t); 164 prepare_for_next_period(t);
165} 165}
166 166
@@ -214,7 +214,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
214 budget_enforced(pedf->scheduled) && 214 budget_enforced(pedf->scheduled) &&
215 budget_exhausted(pedf->scheduled); 215 budget_exhausted(pedf->scheduled);
216 np = exists && is_np(pedf->scheduled); 216 np = exists && is_np(pedf->scheduled);
217 sleep = exists && get_rt_flags(pedf->scheduled) == RT_F_SLEEP; 217 sleep = exists && is_completed(pedf->scheduled);
218 preempt = edf_preemption_needed(edf, prev); 218 preempt = edf_preemption_needed(edf, prev);
219 219
220 /* If we need to preempt do so. 220 /* If we need to preempt do so.
@@ -266,7 +266,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
266 266
267 if (next) { 267 if (next) {
268 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); 268 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
269 set_rt_flags(next, RT_F_RUNNING); 269 tsk_rt(next)->completed = 0;
270 } else { 270 } else {
271 TRACE("becoming idle at %llu\n", litmus_clock()); 271 TRACE("becoming idle at %llu\n", litmus_clock());
272 } 272 }