From 4eadde99c78d27e4a261be1827e3c3a6e3717b65 Mon Sep 17 00:00:00 2001 From: Zelin Tong Date: Thu, 14 May 2020 15:37:56 -0400 Subject: Fixed Task Exit issue --- litmus/sched_edfsc.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index 74737eddd4af..00804ee94b84 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c @@ -453,6 +453,7 @@ static void migrate_task(struct task_struct *t) prepare_for_next_period(t); tsk_rt(t)->domain = (rt_domain_t*)tsk_rt(t)->edfsc_params.move_to; tsk_rt(t)->edfsc_params.container_task = tsk_rt(t)->edfsc_params.move_to->container; + BUG_ON(budget_enforced(t) && budget_exhausted(t)); requeue(t); tsk_rt(t)->edfsc_params.move_to = NULL; } @@ -472,7 +473,6 @@ static void c_release(struct task_struct *t) { */ if (get_rt_utilization(t) == to_fp(1)) { cpu_entry_t* entry = &per_cpu(edfsc_cpu_entries, tsk_rt(t)->edfsc_params.id); - BUG_ON(entry->linked); // Make this cpu unavailable to the global scheduler if (bheap_node_in_heap(entry->hn)) remove_cpu_from_global(entry); @@ -502,6 +502,7 @@ static void c_release(struct task_struct *t) { unlink(t); // Request to be scheduled globally again if (!is_queued(t)) + BUG_ON(budget_enforced(t) && budget_exhausted(t)); requeue(t); // Re-run our EDF scheduling to adjust for the added core g_preempt_check(); @@ -541,6 +542,7 @@ static noinline void g_job_completion(struct task_struct* t, int forced) /* requeue * But don't requeue a blocking task. */ if (is_current_running()) { //since we don't support blocking, this should always be true + BUG_ON(budget_enforced(t) && budget_exhausted(t)); requeue(t); g_preempt_check(); } @@ -567,6 +569,7 @@ static void c_job_completion(struct task_struct* t, int forced) tsk_rt(t)->completed = 0; prepare_for_next_period(t); + BUG_ON(budget_enforced(t) && budget_exhausted(t)); requeue(t); } @@ -859,10 +862,15 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); } } else if (entry->scheduled) { - if (is_realtime(prev)) + if (is_container(entry->scheduled)) { + next = entry->scheduled; + tsk_rt(next)->scheduled_on = entry->cpu; + } + else if (is_realtime(prev)) next = prev; } - + BUG_ON(next && tsk_rt(next)->scheduled_on != entry->cpu); + //BUG_ON(next && tsk_rt(next)->linked_on != tsk_rt(next)->scheduled_on); if (is_container(next)) { struct task_struct* temp = next; next = edfsc_cschedule(tsk_rt(next)->edfsc_params.domain, prev); @@ -871,7 +879,7 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) } //out: -// sched_state_task_picked(); + sched_state_task_picked(); // if no fixed tasks to be scheduled by the container, then container->scheduled // should be the previous non-rt task if any @@ -883,6 +891,7 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) BUG_ON(is_migrating(entry->linked) && next != entry->linked); BUG_ON(next && budget_enforced(next) && budget_exhausted(next)); + BUG_ON(next && is_container(next)); raw_spin_unlock_irqrestore(&g_lock, flags); #ifdef WANT_ALL_SCHED_EVENTS @@ -936,6 +945,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) u_extra = to_fp(num_cpus) - sys_util; container = NULL; t = task_of_list_node(it); + list_del_init(it); //printk("sys_util: %d\n", sys_util); //printk("task util: %d\n", get_rt_utilization(t)); if (u_extra >= get_rt_utilization(t)) { @@ -964,7 +974,6 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) // Setup the release time for the first job to be now release_at(t, litmus_clock()); } - list_del(it); /* Unblock the task waiting on our admission decision. They will detect * if they have been admitted by examining if tsk_rt(t)->domain != NULL * This sets the the state to TASK_RUNNING, adds the task to the run @@ -1008,7 +1017,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) } if (container) { - list_del(&t->edfsc_qnode); + list_del_init(&t->edfsc_qnode); container->f_util += get_rt_utilization(t); m_util -= get_rt_utilization(t); sys_util += get_rt_utilization(t); -- cgit v1.2.2