From 721f4add1fedf8ac62d0473b886e9d379867b45f Mon Sep 17 00:00:00 2001 From: Zelin Tong Date: Tue, 28 Jul 2020 14:43:39 -0400 Subject: Latest Changes --- litmus/litmus_proc.c | 6 +++--- litmus/sched_edfsc.c | 55 ++++++++++++++++++++++------------------------------ 2 files changed, 26 insertions(+), 35 deletions(-) diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c index ebea92da0a3d..92ce29f6975a 100644 --- a/litmus/litmus_proc.c +++ b/litmus/litmus_proc.c @@ -39,10 +39,10 @@ static int litmus_stats_proc_show(struct seq_file *m, void *v) seq_printf(m, "real-time tasks = %d\n" "ready for release = %d\n" - "migrating tasks = %d\n", + /*"migrating tasks = %d\n"*/, atomic_read(&rt_task_count), - count_tasks_waiting_for_release(), - count_migrating_tasks()); + count_tasks_waiting_for_release() + /*,count_migrating_tasks()*/); return 0; } diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index ea2dce57b337..cd7f0045f029 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c @@ -66,6 +66,8 @@ static cont_domain_t** container_list; static rt_domain_t gsched_domain; #define g_lock (gsched_domain.ready_lock) +#define CONTAINER_PERIOD 50000000 + u64 m_util; u64 sys_util; @@ -138,7 +140,7 @@ static noinline void requeue(struct task_struct* task) BUG_ON(is_queued(task)); BUG_ON(is_migrating(task) && task->rt_param.edfsc_params.container_task != NULL); //BUG_ON(task && tsk_rt(task)->linked_on != NO_CPU); - //BUG_ON(is_completed(task) || (budget_enforced(task) && budget_exhausted(task))); + BUG_ON(budget_enforced(task) && budget_exhausted(task)); //BUG_ON(is_container(task) && ((cont_domain_t*)task->rt_param.edfsc_params.domain)->timer_armed); //BUG_ON(task && is_completed(task)); @@ -678,20 +680,12 @@ static noinline void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * p if (blocks || !exists || (!np && resched)) { BUG_ON(cedf->scheduled && !blocks && !out_of_time && !sleep && !is_migrating(cedf->scheduled) && !is_queued(cedf->scheduled)); next = __take_ready(edf); - // Check for direct swap (1->2, 2->1) scenarios, which can cause deadlock - /*if (next) { - other_entry = &per_cpu(edfsc_cpu_entries, next->cpu); - this_entry = this_cpu_ptr(&edfsc_cpu_entries); - if (other_entry != this_entry - && other_entry->cpu == this_entry->scheduled->cpu) { - requeue(next); - next = NULL; - } - }*/ + BUG_ON(next && budget_enforced(next) && budget_exhausted(next)); } else if (exists) { // This is safe when background scheduling, as we can only get here if // there were no other fixed tasks ready to run. BUG_ON(is_queued(cedf->scheduled)); + BUG_ON(budget_enforced(cedf->scheduled) && budget_exhausted(cedf->scheduled)); next = cedf->scheduled; } @@ -700,7 +694,7 @@ static noinline void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * p TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); // Give the container a little breathing room, otherwise, the core will be pounded with work // Will often trigger watchdog due to continous execution - } else { + } else if (!list_empty(&migrating_tasks)) { // Find a task in gsched_domain that isn't a container to background schedule bheap_init(&temp); next = __take_ready(&gsched_domain); @@ -883,8 +877,6 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) *child = NULL; } } - BUG_ON(is_migrating(entry->scheduled) && !tsk_rt(entry->scheduled)->edfsc_params.container_task - && !blocks && tsk_rt(entry->scheduled)->linked_on == NO_CPU && !is_queued(entry->scheduled)); // Tell LITMUS^RT that we choose a task and are done scheduling after return sched_state_task_picked(); @@ -911,7 +903,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) int i; struct list_head *it; struct list_head *temp; - u64 u_extra, leeway; + u64 u_extra; cont_domain_t *container; struct task_struct *t; int num_cpus = num_online_cpus(); @@ -922,7 +914,6 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) raw_spin_lock_irqsave(&g_lock, flags); t = NULL; - leeway = fp_div(1, 50); // Try to add tasks from the queue list_for_each_safe(it, temp, &pending_adds) { @@ -931,10 +922,10 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) list_del_init(it); //sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_lower_prio, NULL); // Best fit sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_higher_prio, NULL); // Worst fit - if (to_fp(num_cpus) > get_rt_utilization(t) + sys_util + leeway) { + if (to_fp(num_cpus) > get_rt_utilization(t) + sys_util) { for (i = 0; i < num_cpus; i++) { - if (to_fp(1) > get_rt_utilization(t) + container_list[i]->f_util + leeway) { - //if (to_fp(1) > get_rt_utilization(t) + container_domains[i].f_util + leeway) { + if (to_fp(1) > get_rt_utilization(t) + container_list[i]->f_util) { + //if (to_fp(1) > get_rt_utilization(t) + container_domains[i].f_util) { //container = &(container_domains[i]); container = container_list[i]; // Used for best/worst fit break; @@ -990,14 +981,14 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) container = NULL; //sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_lower_prio, NULL); // Best fit - //sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_higher_prio, NULL); // Worst fit + sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_higher_prio, NULL); // Worst fit for (i = 0; i < num_cpus; i++) { - u64 leftover = to_fp(1) - container_domains[i].f_util - leeway; - //if (to_fp(1) > get_rt_utilization(t) + container_list[i]->f_util + leeway && - if (to_fp(1) > get_rt_utilization(t) + container_domains[i].f_util + leeway && - to_fp(num_cpus) > get_rt_utilization(t) + sys_util + leeway) { - container = &(container_domains[i]); - //container = container_list[i]; // Used for best/worst fit + u64 leftover = to_fp(1) - container_domains[i].f_util; + if (to_fp(1) > get_rt_utilization(t) + container_list[i]->f_util && + //if (to_fp(1) > get_rt_utilization(t) + container_domains[i].f_util && + to_fp(num_cpus) > get_rt_utilization(t) + sys_util) { + //container = &(container_domains[i]); + container = container_list[i]; // Used for best/worst fit break; } } @@ -1018,7 +1009,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) int remaining; // Sort containers by the utilization of their fixed tasks sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_lower_prio, NULL); - u_extra = to_fp(num_cpus) - sys_util - leeway; + u_extra = to_fp(num_cpus) - sys_util; // Fully provision all the container tasks we can for (i = 0; i < num_cpus && u_extra >= to_fp(1) - container_list[i]->f_util; i++) { struct task_struct* t = container_list[i]->container; @@ -1076,7 +1067,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) TS_SCHED_TIMER_END - hrtimer_add_expires_ns(timer, LITMUS_QUANTUM_LENGTH_NS); + hrtimer_add_expires_ns(timer, CONTAINER_PERIOD); return HRTIMER_RESTART; } @@ -1373,12 +1364,12 @@ static long edfsc_activate_plugin(void) // Name the task its container ID mapped to ASCII snprintf(container_tasks[i].comm, TASK_COMM_LEN, "%d", i); container_tasks[i].pid = -i; - tsk_rt(&container_tasks[i])->task_params.exec_cost = LITMUS_QUANTUM_LENGTH_NS; + tsk_rt(&container_tasks[i])->task_params.exec_cost = CONTAINER_PERIOD; tsk_rt(&container_tasks[i])->task_params.period = - LITMUS_QUANTUM_LENGTH_NS; + CONTAINER_PERIOD; tsk_rt(&container_tasks[i])->task_params.utilization = to_fp(1); tsk_rt(&container_tasks[i])->task_params.relative_deadline = - LITMUS_QUANTUM_LENGTH_NS; + CONTAINER_PERIOD; tsk_rt(&container_tasks[i])->task_params.budget_policy = PRECISE_ENFORCEMENT; tsk_rt(&container_tasks[i])->edfsc_params.container_task = NULL; tsk_rt(&container_tasks[i])->domain = &gsched_domain; @@ -1422,7 +1413,7 @@ static long edfsc_activate_plugin(void) // Start the container boundary timer hrtimer_start(&container_release_timer, - ns_to_ktime(now + LITMUS_QUANTUM_LENGTH_NS), + ns_to_ktime(now + CONTAINER_PERIOD), HRTIMER_MODE_ABS_PINNED); return 0; -- cgit v1.2.2