diff options
author | Joshua Bakita <jbakita@cs.unc.edu> | 2020-03-06 15:24:52 -0500 |
---|---|---|
committer | Joshua Bakita <jbakita@cs.unc.edu> | 2020-03-06 15:24:52 -0500 |
commit | 289013db51e6e7c802c092dba687963750407165 (patch) | |
tree | 6ca757d1d2381468c698ffc8045ea86cef5ae003 | |
parent | 1cf95dfd3e5be213d598abdf927c317a1b621b99 (diff) |
Fixed contain domain->scheduled tracking
scheduled for container domains is now set to null when container is preempted
-rw-r--r-- | litmus/sched_edfsc.c | 35 |
1 files changed, 24 insertions, 11 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index 754891b8c12f..10ebf7c098f4 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c | |||
@@ -66,7 +66,6 @@ u64 sys_util, future_sys_util; | |||
66 | 66 | ||
67 | #define is_container(task) ((task) && tsk_rt(task)->edfsc_params.domain != NULL && tsk_rt(task)->domain == &gsched_domain) | 67 | #define is_container(task) ((task) && tsk_rt(task)->edfsc_params.domain != NULL && tsk_rt(task)->domain == &gsched_domain) |
68 | #define is_fixed(task) ((task) && tsk_rt(task)->edfsc_params.container_task != NULL) | 68 | #define is_fixed(task) ((task) && tsk_rt(task)->edfsc_params.container_task != NULL) |
69 | //migrating task can temporarily have a container_task when it is being background scheduled | ||
70 | #define is_migrating(task) ((task) && tsk_rt(task)->edfsc_params.domain == NULL && tsk_rt(task)->domain == &gsched_domain) | 69 | #define is_migrating(task) ((task) && tsk_rt(task)->edfsc_params.domain == NULL && tsk_rt(task)->domain == &gsched_domain) |
71 | 70 | ||
72 | #define FP_SHIFT 20 | 71 | #define FP_SHIFT 20 |
@@ -527,13 +526,15 @@ static void c_job_completion(struct task_struct* t, int forced) | |||
527 | static void g_finish_switch(struct task_struct *prev) | 526 | static void g_finish_switch(struct task_struct *prev) |
528 | { | 527 | { |
529 | cpu_entry_t* entry = this_cpu_ptr(&edfsc_cpu_entries); | 528 | cpu_entry_t* entry = this_cpu_ptr(&edfsc_cpu_entries); |
529 | struct task_struct* container = &container_tasks[entry->cpu]; | ||
530 | BUG_ON(is_realtime(current) && tsk_rt(current)->domain == NULL); | 530 | BUG_ON(is_realtime(current) && tsk_rt(current)->domain == NULL); |
531 | //BUG_ON(tsk_rt(container)->scheduled_on != smp_processor_id()); | ||
531 | 532 | ||
532 | entry->scheduled = is_realtime(current) ? current : NULL; | 533 | entry->scheduled = is_realtime(current) ? current : NULL; |
534 | // If we're scheduling a task in a container, set entry->scheduled to the container | ||
533 | if (entry->scheduled) { | 535 | if (entry->scheduled) { |
534 | // when a task is being scheduled under a container, container_task exists | 536 | if (tsk_rt(container)->edfsc_params.domain->scheduled == entry->scheduled) |
535 | struct task_struct* t = tsk_rt(current)->edfsc_params.container_task; | 537 | entry->scheduled = container; |
536 | entry->scheduled = (t) ? t : current; | ||
537 | } | 538 | } |
538 | // occurs when current is non-rt, and linked is a container | 539 | // occurs when current is non-rt, and linked is a container |
539 | // this happens when an empty container "task" is supposed to be current | 540 | // this happens when an empty container "task" is supposed to be current |
@@ -541,6 +542,11 @@ static void g_finish_switch(struct task_struct *prev) | |||
541 | else if (is_container(entry->linked)) { | 542 | else if (is_container(entry->linked)) { |
542 | entry->scheduled = entry->linked; | 543 | entry->scheduled = entry->linked; |
543 | } | 544 | } |
545 | |||
546 | if (!is_container(entry->scheduled) && tsk_rt(container)->edfsc_params.domain->scheduled) { | ||
547 | requeue(tsk_rt(container)->edfsc_params.domain->scheduled); | ||
548 | tsk_rt(container)->edfsc_params.domain->scheduled = NULL; | ||
549 | } | ||
544 | #ifdef WANT_ALL_SCHED_EVENTS | 550 | #ifdef WANT_ALL_SCHED_EVENTS |
545 | TRACE_TASK(prev, "switched away from\n"); | 551 | TRACE_TASK(prev, "switched away from\n"); |
546 | #endif | 552 | #endif |
@@ -573,8 +579,7 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | |||
573 | * cedf->schedule may be null and prev _is_ realtime | 579 | * cedf->schedule may be null and prev _is_ realtime |
574 | */ | 580 | */ |
575 | BUG_ON(cedf->scheduled && cedf->scheduled != prev && is_realtime(prev)); | 581 | BUG_ON(cedf->scheduled && cedf->scheduled != prev && is_realtime(prev)); |
576 | // XXX: Is this okay with background scheduling??? | 582 | BUG_ON(cedf->scheduled && !is_realtime(cedf->scheduled)); |
577 | BUG_ON(cedf->scheduled && is_migrating(cedf->scheduled)); | ||
578 | 583 | ||
579 | /* (0) Determine state */ | 584 | /* (0) Determine state */ |
580 | exists = cedf->scheduled != NULL; | 585 | exists = cedf->scheduled != NULL; |
@@ -627,12 +632,13 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | |||
627 | * the appropriate queue. | 632 | * the appropriate queue. |
628 | */ | 633 | */ |
629 | next = __take_ready(edf); | 634 | next = __take_ready(edf); |
630 | } else | 635 | } else if (exists) { |
636 | BUG_ON(!is_realtime(prev)); | ||
631 | /* Only override Linux scheduler if we have a real-time task | 637 | /* Only override Linux scheduler if we have a real-time task |
632 | * scheduled that needs to continue. | 638 | * scheduled that needs to continue. |
633 | */ | 639 | */ |
634 | if (exists) | 640 | next = prev; |
635 | next = prev; | 641 | } |
636 | 642 | ||
637 | if (next) { | 643 | if (next) { |
638 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | 644 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); |
@@ -772,6 +778,7 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
772 | 778 | ||
773 | BUG_ON(entry->linked && budget_exhausted(entry->linked)); | 779 | BUG_ON(entry->linked && budget_exhausted(entry->linked)); |
774 | BUG_ON(!bheap_node_in_heap(entry->hn) && tsk_rt(entry->linked)->edfsc_params.id != entry->cpu); | 780 | BUG_ON(!bheap_node_in_heap(entry->hn) && tsk_rt(entry->linked)->edfsc_params.id != entry->cpu); |
781 | BUG_ON(is_container(entry->linked) && tsk_rt(entry->linked)->edfsc_params.id != entry->cpu); | ||
775 | 782 | ||
776 | /* The final scheduling decision. Do we need to switch for some reason? | 783 | /* The final scheduling decision. Do we need to switch for some reason? |
777 | * If linked is different from scheduled, then select linked as next. | 784 | * If linked is different from scheduled, then select linked as next. |
@@ -815,9 +822,9 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
815 | if (is_container(next)) { | 822 | if (is_container(next)) { |
816 | manage_idle_enforcement_timer(next); | 823 | manage_idle_enforcement_timer(next); |
817 | return tsk_rt(next)->edfsc_params.domain->scheduled; | 824 | return tsk_rt(next)->edfsc_params.domain->scheduled; |
818 | } | 825 | } else { |
819 | else | ||
820 | return next; | 826 | return next; |
827 | } | ||
821 | } | 828 | } |
822 | 829 | ||
823 | /* | 830 | /* |
@@ -843,6 +850,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
843 | 850 | ||
844 | now = litmus_clock(); | 851 | now = litmus_clock(); |
845 | 852 | ||
853 | // Update budget tracking for containers | ||
846 | for (i = 0; i < num_cpus; i++) { | 854 | for (i = 0; i < num_cpus; i++) { |
847 | t = container_list[i]->container; | 855 | t = container_list[i]->container; |
848 | if (container_list[i]->timer_armed) | 856 | if (container_list[i]->timer_armed) |
@@ -884,6 +892,8 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
884 | sys_util += get_rt_utilization(t); | 892 | sys_util += get_rt_utilization(t); |
885 | future_sys_util += get_rt_utilization(t); | 893 | future_sys_util += get_rt_utilization(t); |
886 | need_reweight = 1; | 894 | need_reweight = 1; |
895 | // Setup the release time for the first job to be now | ||
896 | release_at(t, litmus_clock()); | ||
887 | } | 897 | } |
888 | list_del(it); | 898 | list_del(it); |
889 | /* Unblock the task waiting on our admission decision. They will detect | 899 | /* Unblock the task waiting on our admission decision. They will detect |
@@ -1060,6 +1070,9 @@ static void edfsc_task_new(struct task_struct* t, int on_rq, int is_scheduled) | |||
1060 | 1070 | ||
1061 | // Run the scheduler to make sure that this should be running | 1071 | // Run the scheduler to make sure that this should be running |
1062 | preempt(entry); | 1072 | preempt(entry); |
1073 | } else { | ||
1074 | // If we're not scheduled, we should be queued | ||
1075 | requeue(t); | ||
1063 | } | 1076 | } |
1064 | 1077 | ||
1065 | // XXX: This seems unnecessary and possibly dangerous if `is_scheduled` | 1078 | // XXX: This seems unnecessary and possibly dangerous if `is_scheduled` |