diff options
Diffstat (limited to 'litmus/sched_mc_ce.c')
-rw-r--r-- | litmus/sched_mc_ce.c | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/litmus/sched_mc_ce.c b/litmus/sched_mc_ce.c index 702b46da93d5..e4d66bad2138 100644 --- a/litmus/sched_mc_ce.c +++ b/litmus/sched_mc_ce.c | |||
@@ -130,7 +130,7 @@ static inline lt_t get_cycle_offset(const lt_t when, const lt_t cycle_time) | |||
130 | * | 130 | * |
131 | * Do not call prepare_for_next_period on Level-A tasks! | 131 | * Do not call prepare_for_next_period on Level-A tasks! |
132 | */ | 132 | */ |
133 | static void mc_ce_job_completion(struct domain *dom, struct task_struct *ts) | 133 | void mc_ce_job_completion(struct domain *dom, struct task_struct *ts) |
134 | { | 134 | { |
135 | const int cpu = task_cpu(ts); | 135 | const int cpu = task_cpu(ts); |
136 | const int idx = tsk_mc_data(ts)->mc_task.lvl_a_id; | 136 | const int idx = tsk_mc_data(ts)->mc_task.lvl_a_id; |
@@ -141,6 +141,7 @@ static void mc_ce_job_completion(struct domain *dom, struct task_struct *ts) | |||
141 | 141 | ||
142 | /* sched_trace_task_completion(ts, 0); */ | 142 | /* sched_trace_task_completion(ts, 0); */ |
143 | /* post-increment is important here */ | 143 | /* post-increment is important here */ |
144 | sched_trace_server_completion(-ts->pid, get_rt_job(ts)); | ||
144 | just_finished = (tsk_rt(ts)->job_params.job_no)++; | 145 | just_finished = (tsk_rt(ts)->job_params.job_no)++; |
145 | 146 | ||
146 | /* Job completes in expected window: everything is normal. | 147 | /* Job completes in expected window: everything is normal. |
@@ -157,7 +158,6 @@ static void mc_ce_job_completion(struct domain *dom, struct task_struct *ts) | |||
157 | printk(KERN_CRIT "job %u completed in expected job %u which " | 158 | printk(KERN_CRIT "job %u completed in expected job %u which " |
158 | "seems too early\n", just_finished, | 159 | "seems too early\n", just_finished, |
159 | pid_entry->expected_job); | 160 | pid_entry->expected_job); |
160 | BUG(); | ||
161 | } | 161 | } |
162 | } | 162 | } |
163 | 163 | ||
@@ -451,7 +451,7 @@ lt_t mc_ce_timer_callback_common(struct domain *dom) | |||
451 | struct ce_pid_table *pid_table; | 451 | struct ce_pid_table *pid_table; |
452 | struct ce_pid_entry *pid_entry; | 452 | struct ce_pid_entry *pid_entry; |
453 | struct ce_dom_data *ce_data; | 453 | struct ce_dom_data *ce_data; |
454 | int idx, budget_overrun; | 454 | int idx, budget_overrun, expected; |
455 | 455 | ||
456 | ce_data = dom->data; | 456 | ce_data = dom->data; |
457 | pid_table = get_pid_table(ce_data->cpu); | 457 | pid_table = get_pid_table(ce_data->cpu); |
@@ -481,26 +481,31 @@ lt_t mc_ce_timer_callback_common(struct domain *dom) | |||
481 | * If jobs are not overrunning their budgets, then this | 481 | * If jobs are not overrunning their budgets, then this |
482 | * should not happen. | 482 | * should not happen. |
483 | */ | 483 | */ |
484 | pid_entry->expected_job++; | 484 | expected = ++pid_entry->expected_job; |
485 | budget_overrun = pid_entry->expected_job != | 485 | TRACE_MC_TASK(should_schedule, "Expected now: %d\n", expected); |
486 | budget_overrun = expected != | ||
486 | tsk_rt(should_schedule)->job_params.job_no; | 487 | tsk_rt(should_schedule)->job_params.job_no; |
487 | if (budget_overrun) | 488 | if (budget_overrun) { |
488 | TRACE_MC_TASK(should_schedule, | 489 | TRACE_MC_TASK(should_schedule, |
489 | "timer expected job number: %u " | 490 | "timer expected job number: %u " |
490 | "but current job: %u\n", | 491 | "but current job: %u\n", |
491 | pid_entry->expected_job, | 492 | expected, |
492 | tsk_rt(should_schedule)->job_params.job_no); | 493 | tsk_rt(should_schedule)->job_params.job_no); |
494 | } | ||
493 | } | 495 | } |
494 | 496 | ||
495 | if (ce_data->should_schedule) { | 497 | if (ce_data->should_schedule) { |
496 | tsk_rt(should_schedule)->job_params.deadline = | 498 | get_deadline(should_schedule) = |
497 | cycle_start_abs + pid_entry->acc_time; | 499 | cycle_start_abs + pid_entry->acc_time; |
498 | tsk_rt(should_schedule)->job_params.release = | 500 | get_release(should_schedule) = tsk_rt(should_schedule)->job_params.deadline - |
499 | tsk_rt(should_schedule)->job_params.deadline - | ||
500 | pid_entry->budget; | 501 | pid_entry->budget; |
501 | tsk_rt(should_schedule)->job_params.exec_time = 0; | 502 | tsk_rt(should_schedule)->job_params.exec_time = 0; |
502 | /* sched_trace_task_release(should_schedule); */ | 503 | |
504 | TRACE_MC_TASK(should_schedule, "Released!\n"); | ||
503 | set_rt_flags(ce_data->should_schedule, RT_F_RUNNING); | 505 | set_rt_flags(ce_data->should_schedule, RT_F_RUNNING); |
506 | sched_trace_task_release(should_schedule); | ||
507 | sched_trace_server_release(-should_schedule->pid, get_rt_job(should_schedule), | ||
508 | tsk_rt(should_schedule)->job_params); | ||
504 | } | 509 | } |
505 | return next_timer_abs; | 510 | return next_timer_abs; |
506 | } | 511 | } |
@@ -603,7 +608,7 @@ static void arm_all_timers(void) | |||
603 | if (0 == pid_table->num_pid_entries) | 608 | if (0 == pid_table->num_pid_entries) |
604 | continue; | 609 | continue; |
605 | for (idx = 0; idx < pid_table->num_pid_entries; idx++) { | 610 | for (idx = 0; idx < pid_table->num_pid_entries; idx++) { |
606 | pid_table->entries[idx].expected_job = 0; | 611 | pid_table->entries[idx].expected_job = 1; |
607 | } | 612 | } |
608 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER | 613 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER |
609 | cpu_for_timer = interrupt_cpu; | 614 | cpu_for_timer = interrupt_cpu; |
@@ -629,7 +634,7 @@ static void arm_all_timers(void) | |||
629 | */ | 634 | */ |
630 | void mc_ce_release_at_common(struct task_struct *ts, lt_t start) | 635 | void mc_ce_release_at_common(struct task_struct *ts, lt_t start) |
631 | { | 636 | { |
632 | TRACE_TASK(ts, "release at\n"); | 637 | TRACE("release CE at %llu\n", start); |
633 | if (atomic_inc_and_test(&start_time_set)) { | 638 | if (atomic_inc_and_test(&start_time_set)) { |
634 | /* in this case, we won the race */ | 639 | /* in this case, we won the race */ |
635 | cancel_all_timers(); | 640 | cancel_all_timers(); |
@@ -664,8 +669,7 @@ long mc_ce_activate_plugin_common(void) | |||
664 | 669 | ||
665 | atomic_set(&start_time_set, -1); | 670 | atomic_set(&start_time_set, -1); |
666 | atomic64_set(&start_time, litmus_clock()); | 671 | atomic64_set(&start_time, litmus_clock()); |
667 | /* may not want to arm timers on activation, just after release */ | 672 | |
668 | arm_all_timers(); | ||
669 | ret = 0; | 673 | ret = 0; |
670 | out: | 674 | out: |
671 | return ret; | 675 | return ret; |
@@ -707,7 +711,7 @@ static void clear_pid_entries(void) | |||
707 | } | 711 | } |
708 | pid_table->entries[entry].budget = 0; | 712 | pid_table->entries[entry].budget = 0; |
709 | pid_table->entries[entry].acc_time = 0; | 713 | pid_table->entries[entry].acc_time = 0; |
710 | pid_table->entries[entry].expected_job = 0; | 714 | pid_table->entries[entry].expected_job = 1; |
711 | } | 715 | } |
712 | } | 716 | } |
713 | } | 717 | } |
@@ -752,7 +756,7 @@ static int __init init_sched_mc_ce(void) | |||
752 | raw_spin_lock_init(ce_lock); | 756 | raw_spin_lock_init(ce_lock); |
753 | dom_data = &per_cpu(_mc_ce_doms, cpu); | 757 | dom_data = &per_cpu(_mc_ce_doms, cpu); |
754 | dom = &dom_data->domain; | 758 | dom = &dom_data->domain; |
755 | ce_domain_init(dom, ce_lock, NULL, NULL, NULL, NULL, NULL, | 759 | ce_domain_init(dom, ce_lock, NULL, NULL, NULL, NULL, |
756 | &per_cpu(_mc_ce_dom_data, cpu), cpu, | 760 | &per_cpu(_mc_ce_dom_data, cpu), cpu, |
757 | mc_ce_timer_callback); | 761 | mc_ce_timer_callback); |
758 | } | 762 | } |