aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-03-06 13:19:53 -0500
committerBjoern Brandenburg <bbb@mpi-sws.org>2013-03-12 10:29:37 -0400
commitf4ffe0719dfc150ee182f308d31a226b034f206b (patch)
tree0ffbaebcb08675ae4dda42711a8122c82254ff31 /litmus
parent181b6bb0f5f122741262edc7ac0eca86d3f6dd73 (diff)
Differentiate between PERIODIC and SPORADIC tasks.
Tasks can now be PERIODIC or SPORADIC. PERIODIC tasks do not have their job number incremented when they wake up and are tardy. PERIODIC jobs must end with a call to sys_complete_job() to set up their next release. (Not currently supported by pfair.) SPORADIC tasks _do_ have their job number incremented when they wake up and are tardy. SPORADIC is the default task behavior, carrying forward Litmus's current behavior.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/sched_cedf.c6
-rw-r--r--litmus/sched_gsn_edf.c6
-rw-r--r--litmus/sched_pfair.c2
-rw-r--r--litmus/sched_pfp.c2
-rw-r--r--litmus/sched_psn_edf.c4
5 files changed, 10 insertions, 10 deletions
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index ba3ed4525421..6e1327bbf504 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -254,7 +254,7 @@ static noinline void requeue(struct task_struct* task)
254 /* sanity check before insertion */ 254 /* sanity check before insertion */
255 BUG_ON(is_queued(task)); 255 BUG_ON(is_queued(task));
256 256
257 if (wants_early_release(task) || is_released(task, litmus_clock())) 257 if (is_early_releasing(task) || is_released(task, litmus_clock()))
258 __add_ready(&cluster->domain, task); 258 __add_ready(&cluster->domain, task);
259 else { 259 else {
260 /* it has got to wait */ 260 /* it has got to wait */
@@ -353,7 +353,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
353 tsk_rt(t)->completed = 1; 353 tsk_rt(t)->completed = 1;
354 /* prepare for next period */ 354 /* prepare for next period */
355 prepare_for_next_period(t); 355 prepare_for_next_period(t);
356 if (wants_early_release(t) || is_released(t, litmus_clock())) 356 if (is_early_releasing(t) || is_released(t, litmus_clock()))
357 sched_trace_task_release(t); 357 sched_trace_task_release(t);
358 /* unlink */ 358 /* unlink */
359 unlink(t); 359 unlink(t);
@@ -596,7 +596,7 @@ static void cedf_task_wake_up(struct task_struct *task)
596 596
597 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 597 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
598 now = litmus_clock(); 598 now = litmus_clock();
599 if (is_tardy(task, now)) { 599 if (is_sporadic(task) && is_tardy(task, now)) {
600 /* new sporadic release */ 600 /* new sporadic release */
601 release_at(task, now); 601 release_at(task, now);
602 sched_trace_task_release(task); 602 sched_trace_task_release(task);
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index ac5c4d836018..5956978ccdbe 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -251,7 +251,7 @@ static noinline void requeue(struct task_struct* task)
251 /* sanity check before insertion */ 251 /* sanity check before insertion */
252 BUG_ON(is_queued(task)); 252 BUG_ON(is_queued(task));
253 253
254 if (wants_early_release(task) || is_released(task, litmus_clock())) 254 if (is_early_releasing(task) || is_released(task, litmus_clock()))
255 __add_ready(&gsnedf, task); 255 __add_ready(&gsnedf, task);
256 else { 256 else {
257 /* it has got to wait */ 257 /* it has got to wait */
@@ -344,7 +344,7 @@ static noinline void job_completion(struct task_struct *t, int forced)
344 tsk_rt(t)->completed = 1; 344 tsk_rt(t)->completed = 1;
345 /* prepare for next period */ 345 /* prepare for next period */
346 prepare_for_next_period(t); 346 prepare_for_next_period(t);
347 if (wants_early_release(t) || is_released(t, litmus_clock())) 347 if (is_early_releasing(t) || is_released(t, litmus_clock()))
348 sched_trace_task_release(t); 348 sched_trace_task_release(t);
349 /* unlink */ 349 /* unlink */
350 unlink(t); 350 unlink(t);
@@ -578,7 +578,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
578 578
579 raw_spin_lock_irqsave(&gsnedf_lock, flags); 579 raw_spin_lock_irqsave(&gsnedf_lock, flags);
580 now = litmus_clock(); 580 now = litmus_clock();
581 if (is_tardy(task, now)) { 581 if (is_sporadic(task) && is_tardy(task, now)) {
582 /* new sporadic release */ 582 /* new sporadic release */
583 release_at(task, now); 583 release_at(task, now);
584 sched_trace_task_release(task); 584 sched_trace_task_release(task);
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index 6a89b003306c..d5fb3a832adc 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -710,7 +710,7 @@ static void pfair_task_wake_up(struct task_struct *t)
710 */ 710 */
711 requeue = tsk_rt(t)->flags == RT_F_REQUEUE; 711 requeue = tsk_rt(t)->flags == RT_F_REQUEUE;
712 now = litmus_clock(); 712 now = litmus_clock();
713 if (lt_before(get_deadline(t), now)) { 713 if (is_tardy(t, now)) {
714 TRACE_TASK(t, "sporadic release!\n"); 714 TRACE_TASK(t, "sporadic release!\n");
715 release_at(t, now); 715 release_at(t, now);
716 prepare_release(t, time2quanta(now, CEIL)); 716 prepare_release(t, time2quanta(now, CEIL));
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index fc9a509f185d..aade09044917 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -343,7 +343,7 @@ static void pfp_task_wake_up(struct task_struct *task)
343 BUG_ON(is_queued(task)); 343 BUG_ON(is_queued(task));
344#endif 344#endif
345 now = litmus_clock(); 345 now = litmus_clock();
346 if (is_tardy(task, now) 346 if (is_sporadic(task) && is_tardy(task, now)
347#ifdef CONFIG_LITMUS_LOCKING 347#ifdef CONFIG_LITMUS_LOCKING
348 /* We need to take suspensions because of semaphores into 348 /* We need to take suspensions because of semaphores into
349 * account! If a job resumes after being suspended due to acquiring 349 * account! If a job resumes after being suspended due to acquiring
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 316333460dd9..65c85a3a4c64 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -61,7 +61,7 @@ static void requeue(struct task_struct* t, rt_domain_t *edf)
61 TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); 61 TRACE_TASK(t, "requeue: !TASK_RUNNING\n");
62 62
63 tsk_rt(t)->completed = 0; 63 tsk_rt(t)->completed = 0;
64 if (wants_early_release(t) || is_released(t, litmus_clock())) 64 if (is_early_releasing(t) || is_released(t, litmus_clock()))
65 __add_ready(edf, t); 65 __add_ready(edf, t);
66 else 66 else
67 add_release(edf, t); /* it has got to wait */ 67 add_release(edf, t); /* it has got to wait */
@@ -320,7 +320,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
320 raw_spin_lock_irqsave(&pedf->slock, flags); 320 raw_spin_lock_irqsave(&pedf->slock, flags);
321 BUG_ON(is_queued(task)); 321 BUG_ON(is_queued(task));
322 now = litmus_clock(); 322 now = litmus_clock();
323 if (is_tardy(task, now) 323 if (is_sporadic(task) && is_tardy(task, now)
324#ifdef CONFIG_LITMUS_LOCKING 324#ifdef CONFIG_LITMUS_LOCKING
325 /* We need to take suspensions because of semaphores into 325 /* We need to take suspensions because of semaphores into
326 * account! If a job resumes after being suspended due to acquiring 326 * account! If a job resumes after being suspended due to acquiring