diff options
author | Joshua Bakita <jbakita@cs.unc.edu> | 2020-03-07 17:05:30 -0500 |
---|---|---|
committer | Joshua Bakita <jbakita@cs.unc.edu> | 2020-03-07 17:05:30 -0500 |
commit | 1624739afa2d2e74b77e91b4a9524b69d05d04c0 (patch) | |
tree | 31693a63cd63b8f6e26700b1778263b95b912ff2 | |
parent | a0c5b601560f849056c7da653c15853671695fff (diff) |
Fix several issues in container_boundary and add documentation
1. Don't double-add to cpu heap
2. Correctly update linked_on to NO_CPU before linking container
3. Unlink container before requeuing
4. Don't requeue a container that's already queued
-rw-r--r-- | litmus/sched_edfsc.c | 26 |
1 files changed, 21 insertions, 5 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index 812feabb6417..17a7d1f5bca0 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c | |||
@@ -1013,28 +1013,44 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
1013 | prepare_for_next_period(t); | 1013 | prepare_for_next_period(t); |
1014 | if (is_early_releasing(t) || is_released(t, now)) | 1014 | if (is_early_releasing(t) || is_released(t, now)) |
1015 | sched_trace_task_release(t); | 1015 | sched_trace_task_release(t); |
1016 | /* If this container is fully provisioned, remove it from gsched_domain, | ||
1017 | * edfsc_cpu_heap, and disable the idle enforcement timer. If not, restore. | ||
1018 | */ | ||
1016 | if (get_rt_utilization(t) == to_fp(1)) { | 1019 | if (get_rt_utilization(t) == to_fp(1)) { |
1017 | tsk_rt(t)->task_params.exec_cost = from_fp(get_rt_utilization(t) * get_rt_period(t)); | ||
1018 | cpu_entry_t* entry = &per_cpu(edfsc_cpu_entries, tsk_rt(t)->edfsc_params.id); | 1020 | cpu_entry_t* entry = &per_cpu(edfsc_cpu_entries, tsk_rt(t)->edfsc_params.id); |
1021 | // Update execution cost to reflect a utilization of 1 | ||
1022 | tsk_rt(t)->task_params.exec_cost = get_rt_period(t); | ||
1023 | // Make this cpu unavailable to the global scheduler | ||
1019 | if (bheap_node_in_heap(entry->hn)) | 1024 | if (bheap_node_in_heap(entry->hn)) |
1020 | remove_cpu_from_global(entry); | 1025 | remove_cpu_from_global(entry); |
1021 | entry->linked = t; | 1026 | // Fully provisioned containers always run, so just set this here |
1022 | tsk_rt(t)->linked_on = entry->cpu; | 1027 | if (entry->linked != t) |
1028 | link_task_to_cpu(t, entry); | ||
1029 | // Note that we no longer need the global scheduler to schedule us | ||
1023 | if (is_queued(t)) | 1030 | if (is_queued(t)) |
1024 | remove(&gsched_domain, t); | 1031 | remove(&gsched_domain, t); |
1032 | // Fully provisioned containers always run, so idle enforcement is superfluous | ||
1025 | cancel_idle_enforcement_timer(t); | 1033 | cancel_idle_enforcement_timer(t); |
1026 | tsk_rt(t)->edfsc_params.domain->scheduled_last_exec_time = litmus_clock(); | 1034 | tsk_rt(t)->edfsc_params.domain->scheduled_last_exec_time = litmus_clock(); |
1035 | // Run schedule again to make sure that we're run | ||
1027 | preempt(entry); | 1036 | preempt(entry); |
1028 | } | 1037 | } |
1029 | else { | 1038 | else { |
1030 | cpu_entry_t* entry = &per_cpu(edfsc_cpu_entries, tsk_rt(t)->edfsc_params.id); | 1039 | cpu_entry_t* entry = &per_cpu(edfsc_cpu_entries, tsk_rt(t)->edfsc_params.id); |
1031 | add_cpu_to_global(entry); | 1040 | // Make our cpu available again |
1041 | if (!bheap_node_in_heap(entry->hn)) | ||
1042 | add_cpu_to_global(entry); | ||
1032 | if (is_current_running()) { //since we don't support blocking, this should always be true | 1043 | if (is_current_running()) { //since we don't support blocking, this should always be true |
1033 | if (tsk_rt(t)->edfsc_params.domain->scheduled) { | 1044 | if (tsk_rt(t)->edfsc_params.domain->scheduled) { |
1034 | requeue(tsk_rt(t)->edfsc_params.domain->scheduled); | 1045 | requeue(tsk_rt(t)->edfsc_params.domain->scheduled); |
1035 | tsk_rt(t)->edfsc_params.domain->scheduled = NULL; | 1046 | tsk_rt(t)->edfsc_params.domain->scheduled = NULL; |
1036 | } | 1047 | } |
1037 | requeue(t); | 1048 | // Let g_preempt_check() decide what to run, don't impose |
1049 | unlink(t); | ||
1050 | // Request to be scheduled globally again | ||
1051 | if (!is_queued(t)) | ||
1052 | requeue(t); | ||
1053 | // Re-run our EDF scheduling to adjust for the added core | ||
1038 | g_preempt_check(); | 1054 | g_preempt_check(); |
1039 | } | 1055 | } |
1040 | } | 1056 | } |