aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-10-14 19:22:30 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-10-14 19:22:30 -0400
commit2cd5b8b9f496e92884b71eb58ea1ed40947cd2f3 (patch)
tree8eabb44e3b4cfe78eaf20573e8c95cde2c1138e0 /litmus
parent9f7381b674d6e20fefde16633864d9e0aa44fa80 (diff)
paranoid android
Diffstat (limited to 'litmus')
-rw-r--r--litmus/budget.c45
-rw-r--r--litmus/jobs.c2
-rw-r--r--litmus/rt_domain.c35
-rw-r--r--litmus/sched_mc.c64
-rw-r--r--litmus/sched_task_trace.c4
5 files changed, 110 insertions, 40 deletions
diff --git a/litmus/budget.c b/litmus/budget.c
index 75f4b5156947..7d94f7e61b27 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -5,6 +5,7 @@
5#include <litmus/litmus.h> 5#include <litmus/litmus.h>
6#include <litmus/preempt.h> 6#include <litmus/preempt.h>
7#include <litmus/budget.h> 7#include <litmus/budget.h>
8#include <litmus/sched_trace.h>
8 9
9struct enforcement_timer { 10struct enforcement_timer {
10 /* The enforcement timer is used to accurately police 11 /* The enforcement timer is used to accurately police
@@ -63,7 +64,7 @@ static void arm_enforcement_timer(struct enforcement_timer* et,
63 64
64 /* Calling this when there is no budget left for the task 65 /* Calling this when there is no budget left for the task
65 * makes no sense, unless the task is non-preemptive. */ 66 * makes no sense, unless the task is non-preemptive. */
66 BUG_ON(budget_exhausted(t) && (!is_np(t))); 67 /* BUG_ON(budget_exhausted(t) && (!is_np(t))); */
67 68
68 /* __hrtimer_start_range_ns() cancels the timer 69 /* __hrtimer_start_range_ns() cancels the timer
69 * anyway, so we don't have to check whether it is still armed */ 70 * anyway, so we don't have to check whether it is still armed */
@@ -109,20 +110,38 @@ static int __init init_budget_enforcement(void)
109 return 0; 110 return 0;
110} 111}
111 112
113void task_release(struct task_struct *t)
114{
115 t->rt_param.job_params.real_release = t->rt_param.job_params.deadline;
116 t->rt_param.job_params.real_deadline += get_rt_period(t);
117 t->rt_param.job_params.job_no++;
118 TRACE_TASK(t, "Releasing task, rr=%llu rd=%llu\n",
119 t->rt_param.job_params.real_release,
120 t->rt_param.job_params.real_deadline);
121 sched_trace_task_release(t);
122}
123
124void server_release(struct task_struct *t)
125{
126 lt_t now = litmus_clock();
127 t->rt_param.job_params.exec_time = 0;
128 t->rt_param.job_params.release = t->rt_param.job_params.deadline;
129 t->rt_param.job_params.deadline += get_rt_period(t);
130 TRACE_TASK(t, "Releasing server, r=%llu d=%llu\n",
131 t->rt_param.job_params.release,
132 t->rt_param.job_params.deadline);
133 /* don't confuse linux */
134 t->rt.time_slice = 1;
135}
136
112void prepare_for_next_server(struct task_struct *t, int forced) 137void prepare_for_next_server(struct task_struct *t, int forced)
113{ 138{
114 if (!job_behind(t)) { 139 if (forced || job_behind(t)) {
115 t->rt_param.job_params.release = t->rt_param.job_params.deadline; 140 server_release(t);
116 t->rt_param.job_params.deadline += get_rt_period(t); 141 }
117 t->rt_param.job_params.exec_time = 0; 142
118 /* update job sequence number */ 143 if (!forced) {
119 t->rt_param.job_params.job_no++; 144 task_release(t);
120 } else if (forced) {
121 t->rt_param.job_params.release = t->rt_param.job_params.deadline;
122 t->rt_param.job_params.exec_time = 0;
123 } else /* behind */{
124 t->rt_param.job_params.deadline += get_rt_period(t);
125 t->rt_param.job_params.job_no++;
126 } 145 }
127} 146}
128 147
diff --git a/litmus/jobs.c b/litmus/jobs.c
index 669a3df5fcc1..10a42db1165e 100644
--- a/litmus/jobs.c
+++ b/litmus/jobs.c
@@ -12,7 +12,9 @@ void prepare_for_next_period(struct task_struct *t)
12 /* prepare next release */ 12 /* prepare next release */
13 13
14 t->rt_param.job_params.release = t->rt_param.job_params.deadline; 14 t->rt_param.job_params.release = t->rt_param.job_params.deadline;
15 t->rt_param.job_params.real_release = t->rt_param.job_params.release;
15 t->rt_param.job_params.deadline += get_rt_period(t); 16 t->rt_param.job_params.deadline += get_rt_period(t);
17 t->rt_param.job_params.real_deadline = t->rt_param.job_params.deadline;
16 t->rt_param.job_params.exec_time = 0; 18 t->rt_param.job_params.exec_time = 0;
17 /* update job sequence number */ 19 /* update job sequence number */
18 t->rt_param.job_params.job_no++; 20 t->rt_param.job_params.job_no++;
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index 09123eeadf2c..5264d2546a7f 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -254,7 +254,7 @@ static void setup_release(rt_domain_t *_rt)
254 list_for_each_safe(pos, safe, &list) { 254 list_for_each_safe(pos, safe, &list) {
255 /* pick task of work list */ 255 /* pick task of work list */
256 t = list_entry(pos, struct task_struct, rt_param.list); 256 t = list_entry(pos, struct task_struct, rt_param.list);
257 sched_trace_task_release(t); 257 /* sched_trace_task_release(t); */
258 list_del_init(pos); 258 list_del_init(pos);
259 259
260 /* put into release heap while holding release_lock */ 260 /* put into release heap while holding release_lock */
@@ -410,14 +410,21 @@ static void pd_requeue(domain_t *dom, struct task_struct *task)
410 BUG_ON(is_queued(task)); 410 BUG_ON(is_queued(task));
411 BUG_ON(get_task_domain(task) != dom); 411 BUG_ON(get_task_domain(task) != dom);
412 412
413 if (is_queued(task)) {
414 VTRACE_TASK(task, "Queued, skipping\n");
415 return;
416 }
417
413 if (is_released(task, litmus_clock())) { 418 if (is_released(task, litmus_clock())) {
414 __add_ready(domain, task); 419 __add_ready(domain, task);
415 VTRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n", 420 /* tsk_rt(task)->go = 1; */
421 VTRACE("going, rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n",
416 task->comm, task->pid, get_exec_cost(task), get_rt_period(task), 422 task->comm, task->pid, get_exec_cost(task), get_rt_period(task),
417 get_release(task), litmus_clock()); 423 get_release(task), litmus_clock());
418 } else { 424 } else {
419 /* task has to wait for next release */ 425 /* task has to wait for next release */
420 VTRACE_TASK(task, "add release(), rel=%llu\n", get_release(task)); 426 VTRACE_TASK(task, "not going, add release(), rel=%llu\n", get_release(task));
427 /* tsk_rt(task)->go = 0; */
421 add_release(domain, task); 428 add_release(domain, task);
422 } 429 }
423 430
@@ -429,7 +436,12 @@ static void pd_requeue(domain_t *dom, struct task_struct *task)
429 */ 436 */
430static struct task_struct* pd_take_ready(domain_t *dom) 437static struct task_struct* pd_take_ready(domain_t *dom)
431{ 438{
432 return __take_ready((rt_domain_t*)dom->data); 439 struct task_struct *t = __take_ready((rt_domain_t*)dom->data);
440 if (t) {
441 /* TRACE_TASK(t, "going\n"); */
442 /* tsk_rt(t)->go = 1; */
443 }
444 return t;
433} 445}
434 446
435/* pd_peek_ready - returns the head of the rt_domain ready queue 447/* pd_peek_ready - returns the head of the rt_domain ready queue
@@ -438,14 +450,23 @@ static struct task_struct* pd_take_ready(domain_t *dom)
438 */ 450 */
439static struct task_struct* pd_peek_ready(domain_t *dom) 451static struct task_struct* pd_peek_ready(domain_t *dom)
440{ 452{
441 return __next_ready((rt_domain_t*)dom->data); 453 struct task_struct *t = __peek_ready((rt_domain_t*)dom->data);
454 /* if (t) { */
455 /* TRACE_TASK(t, "going\n"); */
456 /* tsk_rt(t)->go = 1; */
457 /* } */
458 return t;
442} 459}
443 460
444static void pd_remove(domain_t *dom, struct task_struct *task) 461static void pd_remove(domain_t *dom, struct task_struct *task)
445{ 462{
446 if (is_queued(task)) { 463 if (is_queued(task))
447 remove((rt_domain_t*)dom->data, task); 464 remove((rt_domain_t*)dom->data, task);
448 } 465 /* if (is_queued(task) && tsk_rt(task)->go) { */
466 /* remove((rt_domain_t*)dom->data, task); */
467 /* } else if (is_queued(task)) { */
468 /* VTRACE_TASK(task, "Did not remove cause of nogo\n"); */
469 /* } */
449} 470}
450 471
451/* pd_domain_init - create a generic domain wrapper for an rt_domain 472/* pd_domain_init - create a generic domain wrapper for an rt_domain
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index baded0d58cb9..da8a82119612 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -477,6 +477,22 @@ static void update_crit_levels(struct cpu_entry *entry)
477 } 477 }
478} 478}
479 479
480static inline int cache_next(struct domain *dom)
481{
482 struct task_struct *t;
483 t = dom->peek_ready(dom);
484 return 1;
485 /* if (t && tsk_mc_crit(t) != CRIT_LEVEL_A && budget_exhausted(t)) { */
486 /* TRACE_TASK(t, "Cached and moved to release\n"); */
487 /* prepare_for_next_server(t, 1); */
488 /* dom->take_ready(dom); */
489 /* dom->requeue(dom, t); */
490 /* return 0; */
491 /* } else { */
492 /* return 1; */
493 /* } */
494}
495
480/** 496/**
481 * check_for_preempt() - Causes a preemption if higher-priority tasks are ready. 497 * check_for_preempt() - Causes a preemption if higher-priority tasks are ready.
482 * Caller must hold domain lock. 498 * Caller must hold domain lock.
@@ -495,8 +511,10 @@ static void check_for_preempt(struct domain *dom)
495 entry = crit_cpu(ce); 511 entry = crit_cpu(ce);
496 recheck = 1; 512 recheck = 1;
497 513
498 /* Cache ready task */ 514 /* Dodge exhausted tasks */
499 dom->peek_ready(dom); 515 if (!cache_next(dom)) {
516 continue;
517 }
500 518
501 raw_spin_lock(&entry->lock); 519 raw_spin_lock(&entry->lock);
502 if (!can_use(ce)) 520 if (!can_use(ce))
@@ -516,6 +534,7 @@ static void check_for_preempt(struct domain *dom)
516 ce = domain_data(dom)->crit_entry; 534 ce = domain_data(dom)->crit_entry;
517 entry = crit_cpu(ce); 535 entry = crit_cpu(ce);
518 raw_spin_lock(&entry->lock); 536 raw_spin_lock(&entry->lock);
537 while (!cache_next(dom));
519 if (can_use(ce) && dom->preempt_needed(dom, ce->linked)) { 538 if (can_use(ce) && dom->preempt_needed(dom, ce->linked)) {
520 preempt(dom, ce); 539 preempt(dom, ce);
521 update_crit_levels(entry); 540 update_crit_levels(entry);
@@ -570,7 +589,6 @@ static void remove_from_all(struct task_struct* task)
570 /* Ensure the task isn't returned by its domain */ 589 /* Ensure the task isn't returned by its domain */
571 dom->remove(dom, task); 590 dom->remove(dom, task);
572 591
573 BUG_ON(is_queued(task));
574 raw_spin_unlock(dom->lock); 592 raw_spin_unlock(dom->lock);
575} 593}
576 594
@@ -582,8 +600,15 @@ static void remove_from_all(struct task_struct* task)
582static void job_completion(struct task_struct *task, int forced) 600static void job_completion(struct task_struct *task, int forced)
583{ 601{
584 lt_t now; 602 lt_t now;
585 TRACE_MC_TASK(task, "Completed\n"); 603 int ghost = is_ghost(task);
586 sched_trace_task_completion(task, forced); 604 int behind = tsk_mc_crit(task) != CRIT_LEVEL_A && job_behind(task);
605 TRACE_MC_TASK(task, "Completed, ghost %d, forced %d, behind %d\n",
606 ghost, forced, behind);
607
608 /* if (!is_ghost(task)) { */
609 /* and no more forced!!! */
610 sched_trace_task_completion(task, forced);
611 /* } */
587 BUG_ON(!task); 612 BUG_ON(!task);
588 613
589 /* Logically stop the task execution */ 614 /* Logically stop the task execution */
@@ -592,23 +617,25 @@ static void job_completion(struct task_struct *task, int forced)
592 617
593 now = litmus_clock(); 618 now = litmus_clock();
594 619
595 /* If it's not a ghost job, do ghost job conversion */ 620
596 if (!is_ghost(task) && !job_behind(task)) { 621 if (!forced && !ghost) {
597 TRACE_MC_TASK(task, "is not a ghost task\n"); 622 task_release(task);
598 tsk_mc_data(task)->mc_job.ghost_budget = budget_remaining(task); 623 }
624
625 if (!forced && !ghost) {
626 tsk_mc_data(task)->mc_job.ghost_budget =
627 budget_remaining(task);
599 tsk_mc_data(task)->mc_job.is_ghost = 1; 628 tsk_mc_data(task)->mc_job.is_ghost = 1;
600 } 629 }
601 630
602 /* If the task is a ghost job with no budget, it either exhausted 631 if (forced || behind || tsk_mc_data(task)->mc_job.ghost_budget == 0) {
603 * its ghost budget or there was no ghost budget after the job 632 TRACE_MC_TASK(task, "making not a ghost\n");
604 * conversion. Revert back to a normal task and complete the period.
605 */
606 if (tsk_mc_data(task)->mc_job.ghost_budget == 0) {
607 TRACE_MC_TASK(task, "has zero ghost budget\n");
608 tsk_mc_data(task)->mc_job.is_ghost = 0; 633 tsk_mc_data(task)->mc_job.is_ghost = 0;
609 prepare_for_next_server(task, forced); 634 tsk_mc_data(task)->mc_job.ghost_budget = 0;
610 if (is_released(task, litmus_clock())) 635 }
611 sched_trace_task_release(task); 636
637 if (forced || (!behind && !is_ghost(task))) {
638 server_release(task);
612 } 639 }
613 640
614 /* Requeue non-blocking tasks */ 641 /* Requeue non-blocking tasks */
@@ -967,6 +994,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
967 raw_spin_lock(dom->lock); 994 raw_spin_lock(dom->lock);
968 995
969 /* Peek at task here to avoid lock use */ 996 /* Peek at task here to avoid lock use */
997 while (!cache_next(dom));
970 dtask = dom->peek_ready(dom); 998 dtask = dom->peek_ready(dom);
971 999
972 raw_spin_lock(&entry->lock); 1000 raw_spin_lock(&entry->lock);
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index 768a2a0f9d2f..f923280b3146 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -146,8 +146,8 @@ feather_callback void do_sched_trace_task_release(unsigned long id, unsigned lon
146 struct task_struct *t = (struct task_struct*) _task; 146 struct task_struct *t = (struct task_struct*) _task;
147 struct st_event_record* rec = get_record(ST_RELEASE, t); 147 struct st_event_record* rec = get_record(ST_RELEASE, t);
148 if (rec) { 148 if (rec) {
149 rec->data.release.release = get_release(t); 149 rec->data.release.release = tsk_rt(t)->job_params.real_release;
150 rec->data.release.deadline = get_deadline(t); 150 rec->data.release.deadline = tsk_rt(t)->job_params.real_deadline;
151 put_record(rec); 151 put_record(rec);
152 } 152 }
153} 153}