aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-12-27 22:51:58 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2011-12-27 22:51:58 -0500
commit6366f1c7fdfb79101fbbd05f912197edc239ffe9 (patch)
treec369a6f795bf803b00de1226d6571b3c82cc2acd
parentf118a1633185c72d9430d8d074735858d450b34a (diff)
parent6c6fb3cefaa114d9ba709d3a6c264f6a69af18a2 (diff)
Merge branch 'wip-mc' of ssh://cvs.cs.unc.edu/cvs/proj/litmus/repo/litmus2010 into wip-mc
Conflicts: litmus/sched_mc.c
-rw-r--r--include/litmus/budget.h30
-rw-r--r--litmus/budget.c19
-rw-r--r--litmus/rt_domain.c20
-rw-r--r--litmus/sched_mc.c91
4 files changed, 59 insertions, 101 deletions
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
index 062df818de45..ff18d89e8630 100644
--- a/include/litmus/budget.h
+++ b/include/litmus/budget.h
@@ -1,15 +1,35 @@
1#ifndef _LITMUS_BUDGET_H_ 1#ifndef _LITMUS_BUDGET_H_
2#define _LITMUS_BUDGET_H_ 2#define _LITMUS_BUDGET_H_
3 3
4/* Update the per-processor enforcement timer (arm/reproram/cancel) for 4/**
5 * the next task. */ 5 * update_enforcement_timer() - Update per-processor enforcement timer for
6 * the next scheduled task.
7 *
8 * If @t is not NULL and has a precisely enforced budget, the timer will be
9 * armed to trigger a reschedule when the budget is exhausted. Otherwise,
10 * the timer will be cancelled.
11*/
6void update_enforcement_timer(struct task_struct* t); 12void update_enforcement_timer(struct task_struct* t);
7 13
8void prepare_for_next_server(struct task_struct* t,int forced); 14/* True if a task's server has progressed farther than the task
9 15 * itself. This happens when budget enforcement has caused a task to be
10#define job_behind(t)\ 16 * booted off until the next period.
17 */
18#define behind_server(t)\
11 (lt_before((t)->rt_param.job_params.real_release, get_release(t))) 19 (lt_before((t)->rt_param.job_params.real_release, get_release(t)))
12 20
21/**
22 * server_release() - Prepare the task server parameters for the next period.
23 * The server for @t is what is actually executed from the schedulers
24 * perspective.
25 */
13void server_release(struct task_struct *t); 26void server_release(struct task_struct *t);
27
28/**
29 * task_release() - Prepare actual task parameters for the next period.
30 * The actual task parameters for @t, real_deadline and real_release, are
31 * the deadline and release from the tasks perspective. We only record these
32 * so that we can write them to feather trace.
33 */
14void task_release(struct task_struct *t); 34void task_release(struct task_struct *t);
15#endif 35#endif
diff --git a/litmus/budget.c b/litmus/budget.c
index b2239ff1c45c..93945e0911a7 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -64,7 +64,7 @@ static void arm_enforcement_timer(struct enforcement_timer* et,
64 64
65 /* Calling this when there is no budget left for the task 65 /* Calling this when there is no budget left for the task
66 * makes no sense, unless the task is non-preemptive. */ 66 * makes no sense, unless the task is non-preemptive. */
67 /* BUG_ON(budget_exhausted(t) && (!is_np(t))); */ 67 BUG_ON(budget_exhausted(t) && (!is_np(t)));
68 68
69 /* __hrtimer_start_range_ns() cancels the timer 69 /* __hrtimer_start_range_ns() cancels the timer
70 * anyway, so we don't have to check whether it is still armed */ 70 * anyway, so we don't have to check whether it is still armed */
@@ -115,9 +115,6 @@ void task_release(struct task_struct *t)
115 t->rt_param.job_params.real_release = t->rt_param.job_params.real_deadline; 115 t->rt_param.job_params.real_release = t->rt_param.job_params.real_deadline;
116 t->rt_param.job_params.real_deadline += get_rt_period(t); 116 t->rt_param.job_params.real_deadline += get_rt_period(t);
117 t->rt_param.job_params.job_no++; 117 t->rt_param.job_params.job_no++;
118 TRACE_TASK(t, "Releasing task, rr=%llu rd=%llu\n",
119 t->rt_param.job_params.real_release,
120 t->rt_param.job_params.real_deadline);
121 sched_trace_task_release(t); 118 sched_trace_task_release(t);
122} 119}
123 120
@@ -127,22 +124,8 @@ void server_release(struct task_struct *t)
127 t->rt_param.job_params.exec_time = 0; 124 t->rt_param.job_params.exec_time = 0;
128 t->rt_param.job_params.release = t->rt_param.job_params.deadline; 125 t->rt_param.job_params.release = t->rt_param.job_params.deadline;
129 t->rt_param.job_params.deadline += get_rt_period(t); 126 t->rt_param.job_params.deadline += get_rt_period(t);
130 TRACE_TASK(t, "Releasing server, r=%llu d=%llu\n",
131 t->rt_param.job_params.release,
132 t->rt_param.job_params.deadline);
133 /* don't confuse linux */ 127 /* don't confuse linux */
134 t->rt.time_slice = 1; 128 t->rt.time_slice = 1;
135} 129}
136 130
137void prepare_for_next_server(struct task_struct *t, int forced)
138{
139 if (forced || job_behind(t)) {
140 server_release(t);
141 }
142
143 if (!forced) {
144 task_release(t);
145 }
146}
147
148module_init(init_budget_enforcement); 131module_init(init_budget_enforcement);
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index b615092ce9c9..3b3b49ed48ea 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -261,7 +261,6 @@ static void setup_release(rt_domain_t *_rt)
261 list_for_each_safe(pos, safe, &list) { 261 list_for_each_safe(pos, safe, &list) {
262 /* pick task of work list */ 262 /* pick task of work list */
263 t = list_entry(pos, struct task_struct, rt_param.list); 263 t = list_entry(pos, struct task_struct, rt_param.list);
264 /* sched_trace_task_release(t); */
265 list_del_init(pos); 264 list_del_init(pos);
266 265
267 /* put into release heap while holding release_lock */ 266 /* put into release heap while holding release_lock */
@@ -412,24 +411,19 @@ static void pd_requeue(domain_t *dom, struct task_struct *task)
412{ 411{
413 rt_domain_t *domain = (rt_domain_t*)dom->data; 412 rt_domain_t *domain = (rt_domain_t*)dom->data;
414 413
415 BUG_ON(!task || !is_realtime(task));
416 TRACE_TASK(task, "Requeueing\n"); 414 TRACE_TASK(task, "Requeueing\n");
415 BUG_ON(!task || !is_realtime(task));
417 BUG_ON(is_queued(task)); 416 BUG_ON(is_queued(task));
418 BUG_ON(get_task_domain(task) != dom); 417 BUG_ON(get_task_domain(task) != dom);
419 418
420 if (is_queued(task)) {
421 VTRACE_TASK(task, "Queued, skipping\n");
422 return;
423 }
424
425 if (is_released(task, litmus_clock())) { 419 if (is_released(task, litmus_clock())) {
426 __add_ready(domain, task); 420 __add_ready(domain, task);
427 VTRACE("going, rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n", 421 VTRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n",
428 task->comm, task->pid, get_exec_cost(task), get_rt_period(task), 422 task->comm, task->pid, get_exec_cost(task), get_rt_period(task),
429 get_release(task), litmus_clock()); 423 get_release(task), litmus_clock());
430 } else { 424 } else {
431 /* task has to wait for next release */ 425 /* task has to wait for next release */
432 VTRACE_TASK(task, "not going, add release(), rel=%llu\n", get_release(task)); 426 VTRACE_TASK(task, "add release(), rel=%llu\n", get_release(task));
433 add_release(domain, task); 427 add_release(domain, task);
434 } 428 }
435 429
@@ -441,9 +435,8 @@ static void pd_requeue(domain_t *dom, struct task_struct *task)
441 */ 435 */
442static struct task_struct* pd_take_ready(domain_t *dom) 436static struct task_struct* pd_take_ready(domain_t *dom)
443{ 437{
444 struct task_struct *t = __take_ready((rt_domain_t*)dom->data); 438 return __take_ready((rt_domain_t*)dom->data);
445 return t; 439 }
446}
447 440
448/* pd_peek_ready - returns the head of the rt_domain ready queue 441/* pd_peek_ready - returns the head of the rt_domain ready queue
449 * 442 *
@@ -451,8 +444,7 @@ static struct task_struct* pd_take_ready(domain_t *dom)
451 */ 444 */
452static struct task_struct* pd_peek_ready(domain_t *dom) 445static struct task_struct* pd_peek_ready(domain_t *dom)
453{ 446{
454 struct task_struct *t = __peek_ready((rt_domain_t*)dom->data); 447 return __next_ready((rt_domain_t*)dom->data);
455 return t;
456} 448}
457 449
458static void pd_remove(domain_t *dom, struct task_struct *task) 450static void pd_remove(domain_t *dom, struct task_struct *task)
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 5eaaa4734fa3..5700d21250d2 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -104,8 +104,6 @@ DEFINE_PER_CPU(struct cpu_entry, cpus);
104static int interrupt_cpu; 104static int interrupt_cpu;
105#endif 105#endif
106 106
107#define FTRACE_CPU 6
108
109#define domain_data(dom) (container_of(dom, struct domain_data, domain)) 107#define domain_data(dom) (container_of(dom, struct domain_data, domain))
110#define is_global(dom) (domain_data(dom)->heap) 108#define is_global(dom) (domain_data(dom)->heap)
111#define is_global_task(t) (is_global(get_task_domain(t))) 109#define is_global_task(t) (is_global(get_task_domain(t)))
@@ -194,11 +192,9 @@ static void fix_crit_position(struct crit_entry *ce)
194 if (is_global(ce->domain)) { 192 if (is_global(ce->domain)) {
195 if (CS_ACTIVATE == ce->state) { 193 if (CS_ACTIVATE == ce->state) {
196 ce->state = CS_ACTIVE; 194 ce->state = CS_ACTIVE;
197 TRACE_CRIT_ENTRY(ce, "CS_ACTIVE\n");
198 update_crit_position(ce); 195 update_crit_position(ce);
199 } else if (CS_REMOVE == ce->state) { 196 } else if (CS_REMOVE == ce->state) {
200 ce->state = CS_REMOVED; 197 ce->state = CS_REMOVED;
201 TRACE_CRIT_ENTRY(ce, "CS_REMOVED\n");
202 update_crit_position(ce); 198 update_crit_position(ce);
203 } 199 }
204 } 200 }
@@ -433,7 +429,6 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task)
433 for (; i < entry_level(entry) + 1; i++) { 429 for (; i < entry_level(entry) + 1; i++) {
434 ce = &entry->crit_entries[i]; 430 ce = &entry->crit_entries[i];
435 if (!can_use(ce)) { 431 if (!can_use(ce)) {
436 TRACE_CRIT_ENTRY(ce, "CS_ACTIVATE\n");
437 ce->state = CS_ACTIVATE; 432 ce->state = CS_ACTIVATE;
438 } 433 }
439 } 434 }
@@ -505,7 +500,6 @@ static void update_crit_levels(struct cpu_entry *entry)
505 */ 500 */
506 readmit[i] = (!global_preempted) ? ce->linked : NULL; 501 readmit[i] = (!global_preempted) ? ce->linked : NULL;
507 502
508 TRACE_CRIT_ENTRY(ce, "CS_REMOVE\n");
509 ce->state = CS_REMOVE; 503 ce->state = CS_REMOVE;
510 if (ce->linked) 504 if (ce->linked)
511 link_task_to_crit(ce, NULL); 505 link_task_to_crit(ce, NULL);
@@ -522,22 +516,6 @@ static void update_crit_levels(struct cpu_entry *entry)
522 } 516 }
523} 517}
524 518
525static inline int cache_next(struct domain *dom)
526{
527 struct task_struct *t;
528 t = dom->peek_ready(dom);
529 return 1;
530 /* if (t && tsk_mc_crit(t) != CRIT_LEVEL_A && budget_exhausted(t)) { */
531 /* TRACE_TASK(t, "Cached and moved to release\n"); */
532 /* prepare_for_next_server(t, 1); */
533 /* dom->take_ready(dom); */
534 /* dom->requeue(dom, t); */
535 /* return 0; */
536 /* } else { */
537 /* return 1; */
538 /* } */
539}
540
541/** 519/**
542 * check_for_preempt() - Causes a preemption if higher-priority tasks are ready. 520 * check_for_preempt() - Causes a preemption if higher-priority tasks are ready.
543 * Caller must hold domain lock. 521 * Caller must hold domain lock.
@@ -556,10 +534,8 @@ static void check_for_preempt(struct domain *dom)
556 entry = crit_cpu(ce); 534 entry = crit_cpu(ce);
557 recheck = 1; 535 recheck = 1;
558 536
559 /* Dodge exhausted tasks */ 537 /* Cache next task */
560 if (!cache_next(dom)) { 538 dom->peek_ready(dom);
561 continue;
562 }
563 539
564 raw_spin_lock(&entry->lock); 540 raw_spin_lock(&entry->lock);
565 if (!can_use(ce)) 541 if (!can_use(ce))
@@ -578,8 +554,11 @@ static void check_for_preempt(struct domain *dom)
578 } else /* Partitioned */ { 554 } else /* Partitioned */ {
579 ce = domain_data(dom)->crit_entry; 555 ce = domain_data(dom)->crit_entry;
580 entry = crit_cpu(ce); 556 entry = crit_cpu(ce);
557
558 /* Cache next task */
559 dom->peek_ready(dom);
560
581 raw_spin_lock(&entry->lock); 561 raw_spin_lock(&entry->lock);
582 while (!cache_next(dom));
583 if (can_use(ce) && dom->preempt_needed(dom, ce->linked)) { 562 if (can_use(ce) && dom->preempt_needed(dom, ce->linked)) {
584 preempt(dom, ce); 563 preempt(dom, ce);
585 update_crit_levels(entry); 564 update_crit_levels(entry);
@@ -595,7 +574,7 @@ static void check_for_preempt(struct domain *dom)
595 */ 574 */
596static void remove_from_all(struct task_struct* task) 575static void remove_from_all(struct task_struct* task)
597{ 576{
598 int update = 0, old; 577 int update = 0;
599 struct cpu_entry *entry; 578 struct cpu_entry *entry;
600 struct crit_entry *ce; 579 struct crit_entry *ce;
601 struct domain *dom = get_task_domain(task); 580 struct domain *dom = get_task_domain(task);
@@ -644,43 +623,38 @@ static void remove_from_all(struct task_struct* task)
644 */ 623 */
645static void job_completion(struct task_struct *task, int forced) 624static void job_completion(struct task_struct *task, int forced)
646{ 625{
647 lt_t now; 626 int behind;
648 int ghost = is_ghost(task); 627 TRACE_MC_TASK(task, "Completed\n");
649 int behind = tsk_mc_crit(task) != CRIT_LEVEL_A && job_behind(task);
650 TRACE_MC_TASK(task, "Completed, ghost %d, forced %d, behind %d\n",
651 ghost, forced, behind);
652
653 /* if (!is_ghost(task)) { */
654 /* and no more forced!!! */
655 if (!ghost && !forced)
656 sched_trace_task_completion(task, forced);
657 /* } */
658 BUG_ON(!task);
659 628
660 /* Logically stop the task execution */ 629 /* Logically stop the task execution */
661 set_rt_flags(task, RT_F_SLEEP); 630 set_rt_flags(task, RT_F_SLEEP);
662 remove_from_all(task); 631 remove_from_all(task);
663 632
664 now = litmus_clock(); 633 /* Level-A tasks cannot ever get behind */
665 634 behind = tsk_mc_crit(task) != CRIT_LEVEL_A && behind_server(task);
666 635
667 if (!forced && !ghost) { 636 if (!forced && !is_ghost(task)) {
637 /* Task voluntarily ceased execution. Move on to next period */
668 task_release(task); 638 task_release(task);
669 } 639 sched_trace_task_completion(task, forced);
670 640
671 if (!forced && !ghost) { 641 /* Convert to ghost job */
672 tsk_mc_data(task)->mc_job.ghost_budget = 642 tsk_mc_data(task)->mc_job.ghost_budget = budget_remaining(task);
673 budget_remaining(task);
674 tsk_mc_data(task)->mc_job.is_ghost = 1; 643 tsk_mc_data(task)->mc_job.is_ghost = 1;
675 } 644 }
676 645
677 if (forced || behind || tsk_mc_data(task)->mc_job.ghost_budget == 0) { 646 /* If the task has no ghost budget, convert back from ghost.
678 TRACE_MC_TASK(task, "making not a ghost\n"); 647 * If the task is behind, undo ghost conversion so that it
648 * can catch up.
649 */
650 if (behind || tsk_mc_data(task)->mc_job.ghost_budget == 0) {
651 TRACE_MC_TASK(task, "Not a ghost task\n");
679 tsk_mc_data(task)->mc_job.is_ghost = 0; 652 tsk_mc_data(task)->mc_job.is_ghost = 0;
680 tsk_mc_data(task)->mc_job.ghost_budget = 0; 653 tsk_mc_data(task)->mc_job.ghost_budget = 0;
681 } 654 }
682 655
683 if (forced || (!behind && !is_ghost(task))) { 656 /* If server has run out of budget, wait until next release */
657 if (budget_exhausted(task)) {
684 server_release(task); 658 server_release(task);
685 } 659 }
686 660
@@ -855,13 +829,8 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running)
855 tsk_mc_data(t)->mc_job.is_ghost = 0; 829 tsk_mc_data(t)->mc_job.is_ghost = 0;
856 if (running) { 830 if (running) {
857 BUG_ON(entry->scheduled); 831 BUG_ON(entry->scheduled);
858 if (entry->cpu != FTRACE_CPU) { 832 entry->scheduled = t;
859 entry->scheduled = t; 833 tsk_rt(t)->scheduled_on = entry->cpu;
860 tsk_rt(t)->scheduled_on = entry->cpu;
861 } else {
862 t->rt_param.scheduled_on = NO_CPU;
863 preempt_if_preemptable(NULL, entry->cpu);
864 }
865 } else { 834 } else {
866 t->rt_param.scheduled_on = NO_CPU; 835 t->rt_param.scheduled_on = NO_CPU;
867 } 836 }
@@ -971,9 +940,6 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
971 int i, out_of_time, sleep, preempt, exists, blocks, global, lower; 940 int i, out_of_time, sleep, preempt, exists, blocks, global, lower;
972 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; 941 struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL;
973 942
974 if (entry->cpu == FTRACE_CPU)
975 return NULL;
976
977 local_irq_save(flags); 943 local_irq_save(flags);
978 944
979 /* Litmus gave up because it couldn't access the stack of the CPU 945 /* Litmus gave up because it couldn't access the stack of the CPU
@@ -1143,7 +1109,6 @@ static long mc_activate_plugin(void)
1143 struct domain_data *dom_data; 1109 struct domain_data *dom_data;
1144 struct domain *dom; 1110 struct domain *dom;
1145 struct domain_data *our_domains[NR_CPUS]; 1111 struct domain_data *our_domains[NR_CPUS];
1146 struct event_group *event_group;
1147 int cpu, n = 0; 1112 int cpu, n = 0;
1148 long ret; 1113 long ret;
1149 1114
@@ -1239,9 +1204,7 @@ static void init_global_domain(struct domain_data *dom_data, enum crit_level lev
1239 ce = &entry->crit_entries[level]; 1204 ce = &entry->crit_entries[level];
1240 init_crit_entry(ce, level, dom_data, node); 1205 init_crit_entry(ce, level, dom_data, node);
1241 bheap_node_init(&ce->node, ce); 1206 bheap_node_init(&ce->node, ce);
1242 1207 bheap_insert(cpu_lower_prio, heap, node);
1243 if (cpu != FTRACE_CPU)
1244 bheap_insert(cpu_lower_prio, heap, node);
1245 } 1208 }
1246} 1209}
1247 1210