diff options
author | Zelin Tong <ztong@ludwig.cs.unc.edu> | 2020-06-01 13:26:51 -0400 |
---|---|---|
committer | Zelin Tong <ztong@ludwig.cs.unc.edu> | 2020-06-01 13:26:51 -0400 |
commit | e4c5fa6df346a78dfb683d601fd5ad34e6de3375 (patch) | |
tree | 90a7b4dcf891ec84c2f7ca4d32bb94d63cc12a2c | |
parent | 1394cfe730e1e5030decc9990b37011a4957a7c0 (diff) |
Final Fixes
Fixed jobs sometimes disappearing due to mysterious path in gschedule
where it is not requeued. This is done by first requeueing regardless,
and letting unlink dequeue as necessary.
Made container_boundary only try to stabilize when new tasks are
removed/added to the systed
-rw-r--r-- | litmus/litmus_proc.c | 9 | ||||
-rw-r--r-- | litmus/sched_edfsc.c | 129 |
2 files changed, 99 insertions, 39 deletions
diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c index de5e3f37fe88..ebea92da0a3d 100644 --- a/litmus/litmus_proc.c +++ b/litmus/litmus_proc.c | |||
@@ -31,13 +31,18 @@ static struct proc_dir_entry *litmus_dir = NULL, | |||
31 | /* in litmus/sync.c */ | 31 | /* in litmus/sync.c */ |
32 | int count_tasks_waiting_for_release(void); | 32 | int count_tasks_waiting_for_release(void); |
33 | 33 | ||
34 | /* in litmus/sched_edfsc.c */ | ||
35 | int count_migrating_tasks(void); | ||
36 | |||
34 | static int litmus_stats_proc_show(struct seq_file *m, void *v) | 37 | static int litmus_stats_proc_show(struct seq_file *m, void *v) |
35 | { | 38 | { |
36 | seq_printf(m, | 39 | seq_printf(m, |
37 | "real-time tasks = %d\n" | 40 | "real-time tasks = %d\n" |
38 | "ready for release = %d\n", | 41 | "ready for release = %d\n" |
42 | "migrating tasks = %d\n", | ||
39 | atomic_read(&rt_task_count), | 43 | atomic_read(&rt_task_count), |
40 | count_tasks_waiting_for_release()); | 44 | count_tasks_waiting_for_release(), |
45 | count_migrating_tasks()); | ||
41 | return 0; | 46 | return 0; |
42 | } | 47 | } |
43 | 48 | ||
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index 8ae94f2dc1df..fae6feeac76f 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c | |||
@@ -68,6 +68,9 @@ static rt_domain_t gsched_domain; | |||
68 | u64 m_util; | 68 | u64 m_util; |
69 | u64 sys_util; | 69 | u64 sys_util; |
70 | 70 | ||
71 | //only true when container_boundary needs to perform stabilization | ||
72 | int sys_changed; | ||
73 | |||
71 | #define is_container(task) ((task) && tsk_rt(task)->edfsc_params.domain != NULL && tsk_rt(task)->domain == &gsched_domain) | 74 | #define is_container(task) ((task) && tsk_rt(task)->edfsc_params.domain != NULL && tsk_rt(task)->domain == &gsched_domain) |
72 | #define is_fixed(task) ((task) && tsk_rt(task)->edfsc_params.container_task != NULL) | 75 | #define is_fixed(task) ((task) && tsk_rt(task)->edfsc_params.container_task != NULL) |
73 | #define is_migrating(task) ((task) && tsk_rt(task)->edfsc_params.domain == NULL && tsk_rt(task)->domain == &gsched_domain) | 76 | #define is_migrating(task) ((task) && tsk_rt(task)->edfsc_params.domain == NULL && tsk_rt(task)->domain == &gsched_domain) |
@@ -83,6 +86,21 @@ void release_heap_free(struct release_heap* rh); | |||
83 | struct bheap_node* bheap_node_alloc(int gfp_flags); | 86 | struct bheap_node* bheap_node_alloc(int gfp_flags); |
84 | void bheap_node_free(struct bheap_node* hn); | 87 | void bheap_node_free(struct bheap_node* hn); |
85 | 88 | ||
89 | int count_migrating_tasks(void) | ||
90 | { | ||
91 | int task_count = 0; | ||
92 | struct list_head *pos; | ||
93 | unsigned long flags; | ||
94 | |||
95 | raw_spin_lock_irqsave(&g_lock, flags); | ||
96 | |||
97 | list_for_each(pos, &migrating_tasks) { | ||
98 | task_count++; | ||
99 | } | ||
100 | |||
101 | raw_spin_unlock_irqrestore(&g_lock, flags); | ||
102 | return task_count; | ||
103 | } | ||
86 | 104 | ||
87 | /* Do a backwards comparison based on f_util so that heavier containers | 105 | /* Do a backwards comparison based on f_util so that heavier containers |
88 | * will come first | 106 | * will come first |
@@ -370,6 +388,7 @@ static void g_remove_task(struct task_struct *t) | |||
370 | BUG_ON(is_container(t)); | 388 | BUG_ON(is_container(t)); |
371 | m_util -= get_rt_utilization(t); | 389 | m_util -= get_rt_utilization(t); |
372 | sys_util -= get_rt_utilization(t); | 390 | sys_util -= get_rt_utilization(t); |
391 | sys_changed = 1; | ||
373 | } | 392 | } |
374 | 393 | ||
375 | static void c_remove_task(struct task_struct *t) | 394 | static void c_remove_task(struct task_struct *t) |
@@ -378,6 +397,7 @@ static void c_remove_task(struct task_struct *t) | |||
378 | tsk_rt(container_task)->edfsc_params.domain->f_util -= | 397 | tsk_rt(container_task)->edfsc_params.domain->f_util -= |
379 | get_rt_utilization(t); | 398 | get_rt_utilization(t); |
380 | sys_util -= get_rt_utilization(t); | 399 | sys_util -= get_rt_utilization(t); |
400 | sys_changed = 1; | ||
381 | } | 401 | } |
382 | 402 | ||
383 | /** | 403 | /** |
@@ -400,6 +420,7 @@ static void migrate_task(struct task_struct *t) | |||
400 | tsk_rt(t)->edfsc_params.container_task = tsk_rt(t)->edfsc_params.move_to->container; | 420 | tsk_rt(t)->edfsc_params.container_task = tsk_rt(t)->edfsc_params.move_to->container; |
401 | requeue(t); | 421 | requeue(t); |
402 | tsk_rt(t)->edfsc_params.move_to = NULL; | 422 | tsk_rt(t)->edfsc_params.move_to = NULL; |
423 | sys_changed = 1; | ||
403 | } | 424 | } |
404 | 425 | ||
405 | /** | 426 | /** |
@@ -492,6 +513,7 @@ static noinline void g_job_completion(struct task_struct* t, int forced) | |||
492 | * tardy case, then just immediately call c_release() on the container. | 513 | * tardy case, then just immediately call c_release() on the container. |
493 | */ | 514 | */ |
494 | } else if (is_container(t)) { | 515 | } else if (is_container(t)) { |
516 | /* | ||
495 | struct task_struct** child = &tsk_rt(t)->edfsc_params.domain->scheduled; | 517 | struct task_struct** child = &tsk_rt(t)->edfsc_params.domain->scheduled; |
496 | // No need to handle fixed tasks, cschedule will do that when it runs next | 518 | // No need to handle fixed tasks, cschedule will do that when it runs next |
497 | if (*child && is_migrating(*child)) { | 519 | if (*child && is_migrating(*child)) { |
@@ -509,6 +531,7 @@ static noinline void g_job_completion(struct task_struct* t, int forced) | |||
509 | // Regardless, we never "freeze" a migrating task in a container | 531 | // Regardless, we never "freeze" a migrating task in a container |
510 | *child = NULL; | 532 | *child = NULL; |
511 | } | 533 | } |
534 | */ | ||
512 | // When a container job finishes late, release it immediately | 535 | // When a container job finishes late, release it immediately |
513 | if (tsk_rt(t)->edfsc_params.can_release) { | 536 | if (tsk_rt(t)->edfsc_params.can_release) { |
514 | tsk_rt(t)->edfsc_params.can_release = 0; | 537 | tsk_rt(t)->edfsc_params.can_release = 0; |
@@ -543,7 +566,8 @@ static void g_finish_switch(struct task_struct *prev) | |||
543 | // members of entry multiple times, we have to lock. Otherwise we | 566 | // members of entry multiple times, we have to lock. Otherwise we |
544 | // may make an if branch based off entry->linked, and then have it | 567 | // may make an if branch based off entry->linked, and then have it |
545 | // change before we can set entry->scheduled. | 568 | // change before we can set entry->scheduled. |
546 | raw_spin_lock_irqsave(&g_lock, flags); | 569 | //raw_spin_lock_irqsave(&g_lock, flags); |
570 | preempt_disable(); | ||
547 | entry->scheduled = is_realtime(current) ? current : NULL; | 571 | entry->scheduled = is_realtime(current) ? current : NULL; |
548 | // If we're scheduling a task in a container, set entry->scheduled to the container | 572 | // If we're scheduling a task in a container, set entry->scheduled to the container |
549 | if (entry->scheduled) { | 573 | if (entry->scheduled) { |
@@ -557,13 +581,9 @@ static void g_finish_switch(struct task_struct *prev) | |||
557 | entry->scheduled = entry->linked; | 581 | entry->scheduled = entry->linked; |
558 | } | 582 | } |
559 | 583 | ||
560 | // This handles requeuing when a container is descheduled | 584 | BUG_ON(is_fixed(entry->scheduled)); |
561 | // TODO: Move this to edfsc_gschedule() | 585 | //raw_spin_unlock_irqrestore(&g_lock, flags); |
562 | if (!is_container(entry->scheduled) && tsk_rt(container)->edfsc_params.domain->scheduled) { | 586 | preempt_enable(); |
563 | requeue(tsk_rt(container)->edfsc_params.domain->scheduled); | ||
564 | tsk_rt(container)->edfsc_params.domain->scheduled = NULL; | ||
565 | } | ||
566 | raw_spin_unlock_irqrestore(&g_lock, flags); | ||
567 | #ifdef WANT_ALL_SCHED_EVENTS | 587 | #ifdef WANT_ALL_SCHED_EVENTS |
568 | TRACE_TASK(prev, "switched away from\n"); | 588 | TRACE_TASK(prev, "switched away from\n"); |
569 | #endif | 589 | #endif |
@@ -595,7 +615,7 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | |||
595 | * differently from gedf, when a task exits (dead) | 615 | * differently from gedf, when a task exits (dead) |
596 | * cedf->schedule may be null and prev _is_ realtime | 616 | * cedf->schedule may be null and prev _is_ realtime |
597 | */ | 617 | */ |
598 | BUG_ON(cedf->scheduled && cedf->scheduled != prev && is_realtime(prev)); | 618 | //BUG_ON(cedf->scheduled && cedf->scheduled != prev && is_realtime(prev)); |
599 | BUG_ON(cedf->scheduled && !is_realtime(cedf->scheduled)); | 619 | BUG_ON(cedf->scheduled && !is_realtime(cedf->scheduled)); |
600 | 620 | ||
601 | /* (0) Determine state */ | 621 | /* (0) Determine state */ |
@@ -605,7 +625,8 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | |||
605 | && budget_exhausted(cedf->scheduled); | 625 | && budget_exhausted(cedf->scheduled); |
606 | np = exists && is_np(cedf->scheduled); | 626 | np = exists && is_np(cedf->scheduled); |
607 | sleep = exists && is_completed(cedf->scheduled); | 627 | sleep = exists && is_completed(cedf->scheduled); |
608 | preempt = (is_migrating(prev) && __peek_ready(edf)) || edf_preemption_needed(edf, prev); | 628 | preempt = (is_migrating(cedf->scheduled) && __peek_ready(edf)) || |
629 | (exists && edf_preemption_needed(edf, cedf->scheduled)); | ||
609 | 630 | ||
610 | /* If we need to preempt do so. | 631 | /* If we need to preempt do so. |
611 | * The following checks set resched to 1 in case of special | 632 | * The following checks set resched to 1 in case of special |
@@ -632,7 +653,7 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | |||
632 | } | 653 | } |
633 | 654 | ||
634 | // Deschedule any background jobs if a fixed task is ready | 655 | // Deschedule any background jobs if a fixed task is ready |
635 | if (is_migrating(cedf->scheduled) && preempt) { | 656 | if (is_migrating(cedf->scheduled) || preempt) { |
636 | if (!sleep && !out_of_time && !blocks && !is_queued(cedf->scheduled)) | 657 | if (!sleep && !out_of_time && !blocks && !is_queued(cedf->scheduled)) |
637 | requeue(cedf->scheduled); | 658 | requeue(cedf->scheduled); |
638 | resched = 1; | 659 | resched = 1; |
@@ -644,6 +665,9 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | |||
644 | */ | 665 | */ |
645 | next = NULL; | 666 | next = NULL; |
646 | if (blocks || !exists || (!np && resched)) { | 667 | if (blocks || !exists || (!np && resched)) { |
668 | /*if (exists && !out_of_time && !sleep && !is_queued(cedf->scheduled)) { | ||
669 | requeue(cedf->scheduled); | ||
670 | }*/ | ||
647 | next = __take_ready(edf); | 671 | next = __take_ready(edf); |
648 | } else if (exists) { | 672 | } else if (exists) { |
649 | // This is safe when background scheduling, as we can only get here if | 673 | // This is safe when background scheduling, as we can only get here if |
@@ -682,6 +706,7 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
682 | int out_of_time, sleep, preempted, np, exists, blocks, is_cont; | 706 | int out_of_time, sleep, preempted, np, exists, blocks, is_cont; |
683 | unsigned long flags; | 707 | unsigned long flags; |
684 | struct task_struct* next = NULL; | 708 | struct task_struct* next = NULL; |
709 | struct task_struct* temp = NULL; | ||
685 | 710 | ||
686 | raw_spin_lock_irqsave(&g_lock, flags); | 711 | raw_spin_lock_irqsave(&g_lock, flags); |
687 | 712 | ||
@@ -727,11 +752,14 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
727 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 752 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
728 | entry->linked->comm, entry->linked->pid); | 753 | entry->linked->comm, entry->linked->pid); |
729 | 754 | ||
755 | if (exists && preempted && !is_queued(entry->scheduled)) | ||
756 | requeue(entry->scheduled); | ||
730 | 757 | ||
731 | /* If a task blocks we have no choice but to reschedule. | 758 | /* If a task blocks we have no choice but to reschedule. |
732 | * Note: containers never block, so if blocks is true and we're background | 759 | * Note: containers never block, so if blocks is true and we're background |
733 | * scheduling, we want to unlink `prev` NOT `entry->scheduled`. | 760 | * scheduling, we want to unlink `prev` NOT `entry->scheduled`. |
734 | */ | 761 | */ |
762 | if (blocks) | ||
735 | unlink(prev); | 763 | unlink(prev); |
736 | 764 | ||
737 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | 765 | /* Request a sys_exit_np() call if we would like to preempt but cannot. |
@@ -811,9 +839,11 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
811 | 839 | ||
812 | // Tell LITMUS^RT that we choose a task and are done scheduling after return | 840 | // Tell LITMUS^RT that we choose a task and are done scheduling after return |
813 | sched_state_task_picked(); | 841 | sched_state_task_picked(); |
842 | |||
814 | // When we transition from doing background scheduling to doing normal | 843 | // When we transition from doing background scheduling to doing normal |
815 | // scheduling, we may schedule the same task. Unfortunately, when this | 844 | // scheduling, we may schedule the same task. Unfortunately, when this |
816 | // happens, g_finish_switch() will /not/ be called. Fix the state manually. | 845 | // happens, g_finish_switch() will /not/ be called. Fix the state manually. |
846 | temp = entry->scheduled; | ||
817 | entry->scheduled = next; | 847 | entry->scheduled = next; |
818 | 848 | ||
819 | // if no fixed tasks to be scheduled by the container, then container->scheduled | 849 | // if no fixed tasks to be scheduled by the container, then container->scheduled |
@@ -824,6 +854,27 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
824 | manage_idle_enforcement_timer(next); | 854 | manage_idle_enforcement_timer(next); |
825 | next = tsk_rt(next)->edfsc_params.domain->scheduled; | 855 | next = tsk_rt(next)->edfsc_params.domain->scheduled; |
826 | } | 856 | } |
857 | // When next is migrating, but previously scheduled realtime task is a container | ||
858 | // must properly restore background scheduled task to its correct queue/heap | ||
859 | else if (is_container(temp) && next != temp) { | ||
860 | struct task_struct** child = &tsk_rt(temp)->edfsc_params.domain->scheduled; | ||
861 | // No need to handle fixed tasks, cschedule will do that when it runs next | ||
862 | if (*child && is_migrating(*child)) { | ||
863 | BUG_ON(is_queued(*child)); | ||
864 | // If migrating and done | ||
865 | if (is_completed(*child) || (budget_enforced(*child) && budget_exhausted(*child))) { | ||
866 | g_job_completion(*child, budget_enforced(*child) && budget_exhausted(*child)); | ||
867 | // If migrating and blocked | ||
868 | } else if (!is_current_running()) { | ||
869 | unlink(*child); | ||
870 | // Otherwise it can keep running globally | ||
871 | } else { | ||
872 | requeue(*child); | ||
873 | } | ||
874 | // Regardless, we never "freeze" a migrating task in a container | ||
875 | *child = NULL; | ||
876 | } | ||
877 | } | ||
827 | 878 | ||
828 | raw_spin_unlock_irqrestore(&g_lock, flags); | 879 | raw_spin_unlock_irqrestore(&g_lock, flags); |
829 | 880 | ||
@@ -849,7 +900,6 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
849 | struct list_head *it; | 900 | struct list_head *it; |
850 | struct list_head *temp; | 901 | struct list_head *temp; |
851 | u64 u_extra; | 902 | u64 u_extra; |
852 | int need_reweight; | ||
853 | cont_domain_t *container; | 903 | cont_domain_t *container; |
854 | struct task_struct *t; | 904 | struct task_struct *t; |
855 | lt_t now; | 905 | lt_t now; |
@@ -898,7 +948,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
898 | list_add(&t->edfsc_qnode, &migrating_tasks); | 948 | list_add(&t->edfsc_qnode, &migrating_tasks); |
899 | } | 949 | } |
900 | sys_util += get_rt_utilization(t); | 950 | sys_util += get_rt_utilization(t); |
901 | need_reweight = 1; | 951 | sys_changed = 1; |
902 | // Setup the release time for the first job to be now | 952 | // Setup the release time for the first job to be now |
903 | release_at(t, litmus_clock()); | 953 | release_at(t, litmus_clock()); |
904 | } | 954 | } |
@@ -925,38 +975,40 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
925 | // We do this by adding a fake task that ultimately doesn't release any jobs | 975 | // We do this by adding a fake task that ultimately doesn't release any jobs |
926 | // This is represented here by adding the utilization to sys_util | 976 | // This is represented here by adding the utilization to sys_util |
927 | // which will be subtracted when the migrating task is actually changed to fixed | 977 | // which will be subtracted when the migrating task is actually changed to fixed |
928 | list_for_each(it, &migrating_tasks) { | 978 | if (sys_changed) { |
929 | struct task_struct* t = task_of_list_node(it); | 979 | list_for_each(it, &migrating_tasks) { |
930 | // Although technically selecting the migrating tasks to be moved into containers | 980 | struct task_struct* t = task_of_list_node(it); |
931 | // doesn't change m_util and the container's f_util until after the move, | 981 | // Although technically selecting the migrating tasks to be moved into containers |
932 | // but since the move is guaranteed to happen before the next container_boundary | 982 | // doesn't change m_util and the container's f_util until after the move, |
933 | // where we check all the utilization stuff, it's fine to account for it now | 983 | // but since the move is guaranteed to happen before the next container_boundary |
934 | if (!(tsk_rt(t)->edfsc_params.move_to) && !is_released(t, now) | 984 | // where we check all the utilization stuff, it's fine to account for it now |
985 | if (!(tsk_rt(t)->edfsc_params.move_to) && !is_released(t, now) | ||
935 | && get_deadline(t) < get_deadline(&container_tasks[0]) + get_rt_period(&container_tasks[0])) { | 986 | && get_deadline(t) < get_deadline(&container_tasks[0]) + get_rt_period(&container_tasks[0])) { |
936 | tsk_rt(t)->edfsc_params.move_to = NULL; | 987 | tsk_rt(t)->edfsc_params.move_to = NULL; |
937 | 988 | ||
938 | container = NULL; | 989 | container = NULL; |
939 | for (i = 0; i < num_cpus; i++) { | 990 | for (i = 0; i < num_cpus; i++) { |
940 | u64 leftover = to_fp(1) - container_domains[i].f_util; | 991 | u64 leftover = to_fp(1) - container_domains[i].f_util; |
941 | if (leftover >= get_rt_utilization(t) && to_fp(num_cpus) >= get_rt_utilization(t) + sys_util) { | 992 | if (leftover>=get_rt_utilization(t) && to_fp(num_cpus)>=get_rt_utilization(t)+sys_util) { |
942 | container = &(container_domains[i]); | 993 | container = &(container_domains[i]); |
943 | break; | 994 | break; |
995 | } | ||
944 | } | 996 | } |
945 | } | ||
946 | 997 | ||
947 | if (container) { | 998 | if (container) { |
948 | list_del_init(&t->edfsc_qnode); | 999 | list_del_init(&t->edfsc_qnode); |
949 | container->f_util += get_rt_utilization(t); | 1000 | container->f_util += get_rt_utilization(t); |
950 | m_util -= get_rt_utilization(t); | 1001 | m_util -= get_rt_utilization(t); |
951 | sys_util += get_rt_utilization(t); | 1002 | sys_util += get_rt_utilization(t); |
952 | tsk_rt(t)->edfsc_params.move_to = container; | 1003 | tsk_rt(t)->edfsc_params.move_to = container; |
953 | need_reweight = 1; | 1004 | sys_changed = 1; |
1005 | } | ||
954 | } | 1006 | } |
955 | } | 1007 | } |
956 | } | 1008 | } |
957 | 1009 | ||
958 | // If needed, reweight containers using EqualOver heuristic | 1010 | // If needed, reweight containers using EqualOver heuristic |
959 | if (need_reweight) { | 1011 | if (sys_changed) { |
960 | int remaining; | 1012 | int remaining; |
961 | // Sort containers by the utilization of their fixed tasks | 1013 | // Sort containers by the utilization of their fixed tasks |
962 | sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_lower_prio, NULL); | 1014 | sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_lower_prio, NULL); |
@@ -985,6 +1037,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
985 | tsk_rt(t)->task_params.utilization = container_list[i]->f_util + u_extra / remaining; | 1037 | tsk_rt(t)->task_params.utilization = container_list[i]->f_util + u_extra / remaining; |
986 | } | 1038 | } |
987 | } | 1039 | } |
1040 | sys_changed = 0; | ||
988 | 1041 | ||
989 | INIT_LIST_HEAD(&pending_adds); | 1042 | INIT_LIST_HEAD(&pending_adds); |
990 | 1043 | ||
@@ -1016,6 +1069,7 @@ static enum hrtimer_restart task_deadline_callback(struct hrtimer* timer) { | |||
1016 | struct task_struct *t = container_of(timer, struct task_struct, edfsc_deadline_timer); | 1069 | struct task_struct *t = container_of(timer, struct task_struct, edfsc_deadline_timer); |
1017 | 1070 | ||
1018 | BUG_ON(is_container(t)); | 1071 | BUG_ON(is_container(t)); |
1072 | printk("util: %d\n", sys_util); | ||
1019 | // This is true only if set to be migrating from container_boundary | 1073 | // This is true only if set to be migrating from container_boundary |
1020 | if (tsk_rt(t)->edfsc_params.move_to) { | 1074 | if (tsk_rt(t)->edfsc_params.move_to) { |
1021 | // Migrate here if the task is not late, otherwise migrate in job_complete | 1075 | // Migrate here if the task is not late, otherwise migrate in job_complete |
@@ -1328,6 +1382,7 @@ static int __init init_edfsc(void) | |||
1328 | 1382 | ||
1329 | sys_util = to_fp(0); | 1383 | sys_util = to_fp(0); |
1330 | m_util = to_fp(0); | 1384 | m_util = to_fp(0); |
1385 | sys_changed = 1; | ||
1331 | 1386 | ||
1332 | memset(container_tasks, 0, sizeof(struct task_struct) * num_online_cpus()); | 1387 | memset(container_tasks, 0, sizeof(struct task_struct) * num_online_cpus()); |
1333 | memset(container_domains, 0, sizeof(cont_domain_t) * num_online_cpus()); | 1388 | memset(container_domains, 0, sizeof(cont_domain_t) * num_online_cpus()); |