diff options
-rw-r--r-- | litmus/sched_edfsc.c | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index 74737eddd4af..00804ee94b84 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c | |||
@@ -453,6 +453,7 @@ static void migrate_task(struct task_struct *t) | |||
453 | prepare_for_next_period(t); | 453 | prepare_for_next_period(t); |
454 | tsk_rt(t)->domain = (rt_domain_t*)tsk_rt(t)->edfsc_params.move_to; | 454 | tsk_rt(t)->domain = (rt_domain_t*)tsk_rt(t)->edfsc_params.move_to; |
455 | tsk_rt(t)->edfsc_params.container_task = tsk_rt(t)->edfsc_params.move_to->container; | 455 | tsk_rt(t)->edfsc_params.container_task = tsk_rt(t)->edfsc_params.move_to->container; |
456 | BUG_ON(budget_enforced(t) && budget_exhausted(t)); | ||
456 | requeue(t); | 457 | requeue(t); |
457 | tsk_rt(t)->edfsc_params.move_to = NULL; | 458 | tsk_rt(t)->edfsc_params.move_to = NULL; |
458 | } | 459 | } |
@@ -472,7 +473,6 @@ static void c_release(struct task_struct *t) { | |||
472 | */ | 473 | */ |
473 | if (get_rt_utilization(t) == to_fp(1)) { | 474 | if (get_rt_utilization(t) == to_fp(1)) { |
474 | cpu_entry_t* entry = &per_cpu(edfsc_cpu_entries, tsk_rt(t)->edfsc_params.id); | 475 | cpu_entry_t* entry = &per_cpu(edfsc_cpu_entries, tsk_rt(t)->edfsc_params.id); |
475 | BUG_ON(entry->linked); | ||
476 | // Make this cpu unavailable to the global scheduler | 476 | // Make this cpu unavailable to the global scheduler |
477 | if (bheap_node_in_heap(entry->hn)) | 477 | if (bheap_node_in_heap(entry->hn)) |
478 | remove_cpu_from_global(entry); | 478 | remove_cpu_from_global(entry); |
@@ -502,6 +502,7 @@ static void c_release(struct task_struct *t) { | |||
502 | unlink(t); | 502 | unlink(t); |
503 | // Request to be scheduled globally again | 503 | // Request to be scheduled globally again |
504 | if (!is_queued(t)) | 504 | if (!is_queued(t)) |
505 | BUG_ON(budget_enforced(t) && budget_exhausted(t)); | ||
505 | requeue(t); | 506 | requeue(t); |
506 | // Re-run our EDF scheduling to adjust for the added core | 507 | // Re-run our EDF scheduling to adjust for the added core |
507 | g_preempt_check(); | 508 | g_preempt_check(); |
@@ -541,6 +542,7 @@ static noinline void g_job_completion(struct task_struct* t, int forced) | |||
541 | /* requeue | 542 | /* requeue |
542 | * But don't requeue a blocking task. */ | 543 | * But don't requeue a blocking task. */ |
543 | if (is_current_running()) { //since we don't support blocking, this should always be true | 544 | if (is_current_running()) { //since we don't support blocking, this should always be true |
545 | BUG_ON(budget_enforced(t) && budget_exhausted(t)); | ||
544 | requeue(t); | 546 | requeue(t); |
545 | g_preempt_check(); | 547 | g_preempt_check(); |
546 | } | 548 | } |
@@ -567,6 +569,7 @@ static void c_job_completion(struct task_struct* t, int forced) | |||
567 | 569 | ||
568 | tsk_rt(t)->completed = 0; | 570 | tsk_rt(t)->completed = 0; |
569 | prepare_for_next_period(t); | 571 | prepare_for_next_period(t); |
572 | BUG_ON(budget_enforced(t) && budget_exhausted(t)); | ||
570 | requeue(t); | 573 | requeue(t); |
571 | } | 574 | } |
572 | 575 | ||
@@ -859,10 +862,15 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
859 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | 862 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); |
860 | } | 863 | } |
861 | } else if (entry->scheduled) { | 864 | } else if (entry->scheduled) { |
862 | if (is_realtime(prev)) | 865 | if (is_container(entry->scheduled)) { |
866 | next = entry->scheduled; | ||
867 | tsk_rt(next)->scheduled_on = entry->cpu; | ||
868 | } | ||
869 | else if (is_realtime(prev)) | ||
863 | next = prev; | 870 | next = prev; |
864 | } | 871 | } |
865 | 872 | BUG_ON(next && tsk_rt(next)->scheduled_on != entry->cpu); | |
873 | //BUG_ON(next && tsk_rt(next)->linked_on != tsk_rt(next)->scheduled_on); | ||
866 | if (is_container(next)) { | 874 | if (is_container(next)) { |
867 | struct task_struct* temp = next; | 875 | struct task_struct* temp = next; |
868 | next = edfsc_cschedule(tsk_rt(next)->edfsc_params.domain, prev); | 876 | next = edfsc_cschedule(tsk_rt(next)->edfsc_params.domain, prev); |
@@ -871,7 +879,7 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
871 | } | 879 | } |
872 | 880 | ||
873 | //out: | 881 | //out: |
874 | // sched_state_task_picked(); | 882 | sched_state_task_picked(); |
875 | 883 | ||
876 | // if no fixed tasks to be scheduled by the container, then container->scheduled | 884 | // if no fixed tasks to be scheduled by the container, then container->scheduled |
877 | // should be the previous non-rt task if any | 885 | // should be the previous non-rt task if any |
@@ -883,6 +891,7 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
883 | 891 | ||
884 | BUG_ON(is_migrating(entry->linked) && next != entry->linked); | 892 | BUG_ON(is_migrating(entry->linked) && next != entry->linked); |
885 | BUG_ON(next && budget_enforced(next) && budget_exhausted(next)); | 893 | BUG_ON(next && budget_enforced(next) && budget_exhausted(next)); |
894 | BUG_ON(next && is_container(next)); | ||
886 | raw_spin_unlock_irqrestore(&g_lock, flags); | 895 | raw_spin_unlock_irqrestore(&g_lock, flags); |
887 | 896 | ||
888 | #ifdef WANT_ALL_SCHED_EVENTS | 897 | #ifdef WANT_ALL_SCHED_EVENTS |
@@ -936,6 +945,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
936 | u_extra = to_fp(num_cpus) - sys_util; | 945 | u_extra = to_fp(num_cpus) - sys_util; |
937 | container = NULL; | 946 | container = NULL; |
938 | t = task_of_list_node(it); | 947 | t = task_of_list_node(it); |
948 | list_del_init(it); | ||
939 | //printk("sys_util: %d\n", sys_util); | 949 | //printk("sys_util: %d\n", sys_util); |
940 | //printk("task util: %d\n", get_rt_utilization(t)); | 950 | //printk("task util: %d\n", get_rt_utilization(t)); |
941 | if (u_extra >= get_rt_utilization(t)) { | 951 | if (u_extra >= get_rt_utilization(t)) { |
@@ -964,7 +974,6 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
964 | // Setup the release time for the first job to be now | 974 | // Setup the release time for the first job to be now |
965 | release_at(t, litmus_clock()); | 975 | release_at(t, litmus_clock()); |
966 | } | 976 | } |
967 | list_del(it); | ||
968 | /* Unblock the task waiting on our admission decision. They will detect | 977 | /* Unblock the task waiting on our admission decision. They will detect |
969 | * if they have been admitted by examining if tsk_rt(t)->domain != NULL | 978 | * if they have been admitted by examining if tsk_rt(t)->domain != NULL |
970 | * This sets the the state to TASK_RUNNING, adds the task to the run | 979 | * This sets the the state to TASK_RUNNING, adds the task to the run |
@@ -1008,7 +1017,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
1008 | } | 1017 | } |
1009 | 1018 | ||
1010 | if (container) { | 1019 | if (container) { |
1011 | list_del(&t->edfsc_qnode); | 1020 | list_del_init(&t->edfsc_qnode); |
1012 | container->f_util += get_rt_utilization(t); | 1021 | container->f_util += get_rt_utilization(t); |
1013 | m_util -= get_rt_utilization(t); | 1022 | m_util -= get_rt_utilization(t); |
1014 | sys_util += get_rt_utilization(t); | 1023 | sys_util += get_rt_utilization(t); |