aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoshua Bakita <jbakita@cs.unc.edu>2020-05-17 16:28:20 -0400
committerJoshua Bakita <jbakita@cs.unc.edu>2020-05-17 16:28:20 -0400
commit1394cfe730e1e5030decc9990b37011a4957a7c0 (patch)
tree135ac6edcecdbbe723a6f18eb23d6cf5d606d8fc
parent888ab3700f7d9e0b59795c6d8b0461b3ce0cdc81 (diff)
Implement background scheduling
This turned out to be incredibly complicated. High-level changes are: 1. Container completion in g_job_completion() has been completely reworked. We now check for and appropriately handle all possible states of a background scheduled task. 2. edfsc_cschedule() has had its fixed task scheduling logic overhauled to be easier to follow. This fixes the logic for when a fixed task preempts a background scheduled task, but may also fix other bugs. 3. When a task blocks while being background scheduled, remove that task, NOT the container scheduling that task. 4. Update `entry->scheduled` at the end of edfsc_gschedule(). We will not run g_finish_switch() if we continue scheduling the same task, but we still need entry->scheduled to be updated in the case when we stop background scheduling and switch to normal scheduling. The original code that sets `entry->scheduled` in g_finish_switch() may no longer be needed and further investigation of its removal is encouraged. 5. If a task exits while being background scheduled, remove it from the container rather than `entry->scheduled`.
-rw-r--r--litmus/sched_edfsc.c95
1 files changed, 64 insertions, 31 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c
index a91951480f33..8ae94f2dc1df 100644
--- a/litmus/sched_edfsc.c
+++ b/litmus/sched_edfsc.c
@@ -436,10 +436,6 @@ static void c_release(struct task_struct *t) {
436 if (!bheap_node_in_heap(entry->hn)) 436 if (!bheap_node_in_heap(entry->hn))
437 add_cpu_to_global(entry); 437 add_cpu_to_global(entry);
438 // Note that container's aren't real tasks and thus can't block 438 // Note that container's aren't real tasks and thus can't block
439 if (tsk_rt(t)->edfsc_params.domain->scheduled) {
440 requeue(tsk_rt(t)->edfsc_params.domain->scheduled);
441 tsk_rt(t)->edfsc_params.domain->scheduled = NULL;
442 }
443 // Let g_preempt_check() decide what to run, don't impose 439 // Let g_preempt_check() decide what to run, don't impose
444 unlink(t); 440 unlink(t);
445 // Request to be scheduled globally again 441 // Request to be scheduled globally again
@@ -482,12 +478,44 @@ static noinline void g_job_completion(struct task_struct* t, int forced)
482 requeue(t); 478 requeue(t);
483 g_preempt_check(); 479 g_preempt_check();
484 } 480 }
485 // When a container job finishes late 481 /* A container may be in several different states when it finishes. It may:
486 } else if (is_container(t) && tsk_rt(t)->edfsc_params.can_release) { 482 * - Be scheduling a migrating task that is finished, blocked, or out of budget
487 tsk_rt(t)->edfsc_params.can_release = 0; 483 * - Be scheduling a fixed task
488 c_release(t); 484 * - Be scheduling nothing
489 if (get_rt_utilization(t) == to_fp(1)) 485 * If there's a migrating task being scheduled, we can't unconditionally
490 manage_idle_enforcement_timer(t); 486 * requeue it. Often, we may actually have to call g_job_completion() on
487 * that migrating task. If we finish while running a fixed task, we just
488 * "freeze" it in the container - edfsc_cschedule() will take care of
489 * processing its state when the container is rescheduled.
490 *
491 * If the container is tardy, we process its scheduled task as in the non-
492 * tardy case, then just immediately call c_release() on the container.
493 */
494 } else if (is_container(t)) {
495 struct task_struct** child = &tsk_rt(t)->edfsc_params.domain->scheduled;
496 // No need to handle fixed tasks, cschedule will do that when it runs next
497 if (*child && is_migrating(*child)) {
498 BUG_ON(is_queued(*child));
499 // If migrating and done
500 if (is_completed(*child) || (budget_enforced(*child) && budget_exhausted(*child))) {
501 g_job_completion(*child, budget_enforced(*child) && budget_exhausted(*child));
502 // If migrating and blocked
503 } else if (!is_current_running()) {
504 unlink(*child);
505 // Otherwise it can keep running globally
506 } else {
507 requeue(*child);
508 }
509 // Regardless, we never "freeze" a migrating task in a container
510 *child = NULL;
511 }
512 // When a container job finishes late, release it immediately
513 if (tsk_rt(t)->edfsc_params.can_release) {
514 tsk_rt(t)->edfsc_params.can_release = 0;
515 c_release(t);
516 if (get_rt_utilization(t) == to_fp(1))
517 manage_idle_enforcement_timer(t);
518 }
491 } 519 }
492} 520}
493 521
@@ -585,11 +613,6 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev)
585 */ 613 */
586 resched = preempt; 614 resched = preempt;
587 615
588 /* If a task blocks we have no choice but to reschedule.
589 */
590 if (blocks)
591 resched = 1;
592
593 /* Request a sys_exit_np() call if we would like to preempt but cannot. 616 /* Request a sys_exit_np() call if we would like to preempt but cannot.
594 * Multiple calls to request_exit_np() don't hurt. 617 * Multiple calls to request_exit_np() don't hurt.
595 */ 618 */
@@ -608,24 +631,24 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev)
608 resched = 1; 631 resched = 1;
609 } 632 }
610 633
634 // Deschedule any background jobs if a fixed task is ready
635 if (is_migrating(cedf->scheduled) && preempt) {
636 if (!sleep && !out_of_time && !blocks && !is_queued(cedf->scheduled))
637 requeue(cedf->scheduled);
638 resched = 1;
639 }
640
611 /* The final scheduling decision. Do we need to switch for some reason? 641 /* The final scheduling decision. Do we need to switch for some reason?
612 * Switch if we are in RT mode and have no task or if we need to 642 * Switch if we are in RT mode and have no task or if we need to
613 * resched. 643 * resched.
614 */ 644 */
615 next = NULL; 645 next = NULL;
616 if ((!np || blocks) && (resched || !exists)) { 646 if (blocks || !exists || (!np && resched)) {
617 /* When preempting a task that does not block, then
618 * re-insert it into either the ready queue or the
619 * release queue (if it completed). requeue() picks
620 * the appropriate queue.
621 */
622 next = __take_ready(edf); 647 next = __take_ready(edf);
623 } else if (exists) { 648 } else if (exists) {
624 BUG_ON(!is_realtime(prev)); 649 // This is safe when background scheduling, as we can only get here if
625 /* Only override Linux scheduler if we have a real-time task 650 // there were no other fixed tasks ready to run.
626 * scheduled that needs to continue. 651 next = cedf->scheduled;
627 */
628 next = prev;
629 } 652 }
630 653
631 if (next) { 654 if (next) {
@@ -706,10 +729,10 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev)
706 729
707 730
708 /* If a task blocks we have no choice but to reschedule. 731 /* If a task blocks we have no choice but to reschedule.
709 * Note: containers never block 732 * Note: containers never block, so if blocks is true and we're background
733 * scheduling, we want to unlink `prev` NOT `entry->scheduled`.
710 */ 734 */
711 if (blocks) 735 unlink(prev);
712 unlink(entry->scheduled);
713 736
714 /* Request a sys_exit_np() call if we would like to preempt but cannot. 737 /* Request a sys_exit_np() call if we would like to preempt but cannot.
715 * We need to make sure to update the link structure anyway in case 738 * We need to make sure to update the link structure anyway in case
@@ -788,6 +811,10 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev)
788 811
789 // Tell LITMUS^RT that we choose a task and are done scheduling after return 812 // Tell LITMUS^RT that we choose a task and are done scheduling after return
790 sched_state_task_picked(); 813 sched_state_task_picked();
814 // When we transition from doing background scheduling to doing normal
815 // scheduling, we may schedule the same task. Unfortunately, when this
816 // happens, g_finish_switch() will /not/ be called. Fix the state manually.
817 entry->scheduled = next;
791 818
792 // if no fixed tasks to be scheduled by the container, then container->scheduled 819 // if no fixed tasks to be scheduled by the container, then container->scheduled
793 // should be the previous non-rt task if any 820 // should be the previous non-rt task if any
@@ -1091,8 +1118,14 @@ static void edfsc_task_exit(struct task_struct* t)
1091 BUG_ON(t != current); 1118 BUG_ON(t != current);
1092 list_del(&t->edfsc_qnode); 1119 list_del(&t->edfsc_qnode);
1093 entry = &per_cpu(edfsc_cpu_entries, task_cpu(t)); 1120 entry = &per_cpu(edfsc_cpu_entries, task_cpu(t));
1094 BUG_ON(entry->scheduled != t); 1121 // Handle the case where we exit while being background scheduled
1095 entry->scheduled = NULL; 1122 if (is_container(entry->scheduled)) {
1123 BUG_ON(entry->scheduled->rt_param.edfsc_params.domain->scheduled != t);
1124 entry->scheduled->rt_param.edfsc_params.domain->scheduled = NULL;
1125 } else {
1126 BUG_ON(entry->scheduled != t);
1127 entry->scheduled = NULL;
1128 }
1096 } 1129 }
1097 1130
1098 /* To preserve EDF-sc scheduling invariants, we can only release a task's 1131 /* To preserve EDF-sc scheduling invariants, we can only release a task's