diff options
Diffstat (limited to 'litmus/sched_edfsc.c')
-rw-r--r-- | litmus/sched_edfsc.c | 41 |
1 files changed, 25 insertions, 16 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index 501aa1197a94..927f35ebc68f 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c | |||
@@ -645,8 +645,10 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | |||
645 | * this. | 645 | * this. |
646 | */ | 646 | */ |
647 | if (!np && (out_of_time || sleep)) { | 647 | if (!np && (out_of_time || sleep)) { |
648 | if (is_fixed(cedf->scheduled)) | 648 | if (is_fixed(cedf->scheduled)) { |
649 | printk("fixed task complete\n"); | ||
649 | c_job_completion(cedf->scheduled, !sleep); | 650 | c_job_completion(cedf->scheduled, !sleep); |
651 | } | ||
650 | else { | 652 | else { |
651 | g_job_completion(cedf->scheduled, !sleep); | 653 | g_job_completion(cedf->scheduled, !sleep); |
652 | } | 654 | } |
@@ -786,6 +788,7 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
786 | // Determine what to run next (set entry->linked) | 788 | // Determine what to run next (set entry->linked) |
787 | if (!entry->linked) { | 789 | if (!entry->linked) { |
788 | struct task_struct* task = __take_ready(&gsched_domain); | 790 | struct task_struct* task = __take_ready(&gsched_domain); |
791 | //printk("next task: %d\n", task); | ||
789 | // Make sure that containers are only scheduled on cores with same id | 792 | // Make sure that containers are only scheduled on cores with same id |
790 | if (is_container(task) && entry->cpu != tsk_rt(task)->edfsc_params.id) { | 793 | if (is_container(task) && entry->cpu != tsk_rt(task)->edfsc_params.id) { |
791 | // Get cpu_entry for task's core assignment | 794 | // Get cpu_entry for task's core assignment |
@@ -798,21 +801,24 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
798 | link_task_to_cpu(target->linked, entry); | 801 | link_task_to_cpu(target->linked, entry); |
799 | // Setup the container to run next on the remote core | 802 | // Setup the container to run next on the remote core |
800 | link_task_to_cpu(task, target); | 803 | link_task_to_cpu(task, target); |
804 | BUG_ON(budget_exhausted(task)); | ||
801 | // Alert the remote core that it now needs to reschedule | 805 | // Alert the remote core that it now needs to reschedule |
802 | preempt(target); | 806 | preempt(target); |
803 | } else { | 807 | } else { |
804 | // We'll now schedule the ready task here | 808 | // We'll now schedule the ready task here |
805 | link_task_to_cpu(task, entry); | 809 | link_task_to_cpu(task, entry); |
810 | BUG_ON(task && budget_exhausted(task)); | ||
806 | // Give up on this scheduling cycle, as we need to re-check the | 811 | // Give up on this scheduling cycle, as we need to re-check the |
807 | // conditions earlier on in edfsc_gschedule() | 812 | // conditions earlier on in edfsc_gschedule() |
808 | if (task) | 813 | //if (task) |
809 | preempt(entry); | 814 | // preempt(entry); |
810 | } | 815 | } |
816 | //printk("next linked: %d\n", entry->linked); | ||
811 | // We need to restart the scheduling process to make sure that linked | 817 | // We need to restart the scheduling process to make sure that linked |
812 | // is not out of budget OR we have nothing to run anyway | 818 | // is not out of budget OR we have nothing to run anyway |
813 | goto out; | 819 | //XXX modify this into a while loop? |
820 | //goto out; | ||
814 | } | 821 | } |
815 | |||
816 | BUG_ON(entry->linked && budget_exhausted(entry->linked)); | 822 | BUG_ON(entry->linked && budget_exhausted(entry->linked)); |
817 | BUG_ON(!bheap_node_in_heap(entry->hn) && entry->linked && tsk_rt(entry->linked)->edfsc_params.id != entry->cpu); | 823 | BUG_ON(!bheap_node_in_heap(entry->hn) && entry->linked && tsk_rt(entry->linked)->edfsc_params.id != entry->cpu); |
818 | BUG_ON(is_container(entry->linked) && tsk_rt(entry->linked)->edfsc_params.id != entry->cpu); | 824 | BUG_ON(is_container(entry->linked) && tsk_rt(entry->linked)->edfsc_params.id != entry->cpu); |
@@ -844,8 +850,8 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
844 | if (is_container(next)) | 850 | if (is_container(next)) |
845 | edfsc_cschedule(tsk_rt(next)->edfsc_params.domain, prev); | 851 | edfsc_cschedule(tsk_rt(next)->edfsc_params.domain, prev); |
846 | 852 | ||
847 | out: | 853 | //out: |
848 | sched_state_task_picked(); | 854 | // sched_state_task_picked(); |
849 | 855 | ||
850 | // if no fixed tasks to be scheduled by the container, then container->scheduled | 856 | // if no fixed tasks to be scheduled by the container, then container->scheduled |
851 | // should be the previous non-rt task if any | 857 | // should be the previous non-rt task if any |
@@ -865,7 +871,6 @@ out: | |||
865 | else if (exists && !next) | 871 | else if (exists && !next) |
866 | TRACE("becomes idle at %llu.\n", litmus_clock()); | 872 | TRACE("becomes idle at %llu.\n", litmus_clock()); |
867 | #endif | 873 | #endif |
868 | |||
869 | return next; | 874 | return next; |
870 | } | 875 | } |
871 | 876 | ||
@@ -888,7 +893,8 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
888 | 893 | ||
889 | raw_spin_lock_irqsave(&g_lock, flags); | 894 | raw_spin_lock_irqsave(&g_lock, flags); |
890 | 895 | ||
891 | printk("\ncontainer_boundary\n"); | 896 | //printk("\ncontainer_boundary\n"); |
897 | TRACE("container_boundary start\n", litmus_clock()); | ||
892 | 898 | ||
893 | now = litmus_clock(); | 899 | now = litmus_clock(); |
894 | 900 | ||
@@ -908,14 +914,15 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
908 | u_extra = to_fp(num_cpus) - sys_util; | 914 | u_extra = to_fp(num_cpus) - sys_util; |
909 | container = NULL; | 915 | container = NULL; |
910 | t = task_of_list_node(it); | 916 | t = task_of_list_node(it); |
911 | printk("sys_util: %d\n", sys_util); | 917 | //printk("sys_util: %d\n", sys_util); |
912 | printk("task util: %d\n", get_rt_utilization(t)); | 918 | //printk("task util: %d\n", get_rt_utilization(t)); |
913 | if (u_extra >= get_rt_utilization(t)) { | 919 | if (u_extra >= get_rt_utilization(t)) { |
914 | for (i = 0; i < num_cpus; i++) { | 920 | for (i = 0; i < num_cpus; i++) { |
915 | u64 leftover = to_fp(1) - container_domains[i].f_util; | 921 | u64 leftover = to_fp(1) - container_domains[i].f_util; |
916 | printk("container: %d\n", container_domains[i].f_util); | 922 | //printk("container: %d\n", container_domains[i].f_util); |
917 | if (leftover >= get_rt_utilization(t)) { | 923 | if (leftover >= get_rt_utilization(t)) { |
918 | container = &(container_domains[i]); | 924 | container = &(container_domains[i]); |
925 | printk("container id: %d\n", i); | ||
919 | break; | 926 | break; |
920 | } | 927 | } |
921 | } | 928 | } |
@@ -1026,8 +1033,8 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
1026 | for (i = 0; i < num_cpus; i++) { | 1033 | for (i = 0; i < num_cpus; i++) { |
1027 | // will first iterate through fully provisioned containers, then not fully provisioned ones | 1034 | // will first iterate through fully provisioned containers, then not fully provisioned ones |
1028 | struct task_struct* t = container_list[i]->container; | 1035 | struct task_struct* t = container_list[i]->container; |
1029 | printk("container id: %d\n", tsk_rt(t)->edfsc_params.id); | 1036 | //printk("container id: %d\n", tsk_rt(t)->edfsc_params.id); |
1030 | printk("container budget: %lld\n", budget_remaining(t)); | 1037 | //printk("container budget: %lld\n", budget_remaining(t)); |
1031 | // If the last job completed on time, release it now | 1038 | // If the last job completed on time, release it now |
1032 | if (budget_exhausted(t)) { | 1039 | if (budget_exhausted(t)) { |
1033 | BUG_ON(is_queued(t)); | 1040 | BUG_ON(is_queued(t)); |
@@ -1039,7 +1046,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
1039 | } | 1046 | } |
1040 | } | 1047 | } |
1041 | 1048 | ||
1042 | printk("/container_boundary\n\n"); | 1049 | //printk("/container_boundary\n\n"); |
1043 | raw_spin_unlock_irqrestore(&g_lock, flags); | 1050 | raw_spin_unlock_irqrestore(&g_lock, flags); |
1044 | 1051 | ||
1045 | hrtimer_add_expires_ns(timer, LITMUS_QUANTUM_LENGTH_NS); | 1052 | hrtimer_add_expires_ns(timer, LITMUS_QUANTUM_LENGTH_NS); |
@@ -1090,6 +1097,7 @@ static void edfsc_task_new(struct task_struct* t, int on_rq, int is_scheduled) | |||
1090 | 1097 | ||
1091 | raw_spin_lock_irqsave(&g_lock, flags); | 1098 | raw_spin_lock_irqsave(&g_lock, flags); |
1092 | // Queue this task and request a reschedule | 1099 | // Queue this task and request a reschedule |
1100 | printk("requeue task\n"); | ||
1093 | requeue(t); | 1101 | requeue(t); |
1094 | preempt(entry); | 1102 | preempt(entry); |
1095 | 1103 | ||
@@ -1128,7 +1136,8 @@ static void edfsc_task_exit(struct task_struct* t) | |||
1128 | raw_spin_lock_irqsave(&g_lock, flags); | 1136 | raw_spin_lock_irqsave(&g_lock, flags); |
1129 | 1137 | ||
1130 | // Remove this task from all members of its scheduling domain | 1138 | // Remove this task from all members of its scheduling domain |
1131 | unlink(t); | 1139 | if (is_migrating(t)) |
1140 | unlink(t); | ||
1132 | if (is_queued(t)) { | 1141 | if (is_queued(t)) { |
1133 | remove(tsk_rt(t)->domain, t); | 1142 | remove(tsk_rt(t)->domain, t); |
1134 | } else if (is_fixed(t)) { | 1143 | } else if (is_fixed(t)) { |