aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoshua Bakita <jbakita@cs.unc.edu>2020-05-17 16:12:25 -0400
committerJoshua Bakita <jbakita@cs.unc.edu>2020-05-17 16:12:25 -0400
commit888ab3700f7d9e0b59795c6d8b0461b3ce0cdc81 (patch)
treee241e06729b5cdd7c483a237f82a33e0023d5194
parentc537a10b8b94bef12cbc52294bdcf81d9725ea29 (diff)
Non-logic-changing cleanup
Changes are: - Remove broken, commented out code - Remove printk debugging (known to be unreliable) - Name containers with IDs over 9 correctly - Clarify existing and add new TRACE logging calls - Fix inconsistent coding style
-rw-r--r--litmus/sched_edfsc.c58
1 files changed, 14 insertions, 44 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c
index 0a7d8131ab54..a91951480f33 100644
--- a/litmus/sched_edfsc.c
+++ b/litmus/sched_edfsc.c
@@ -305,24 +305,6 @@ static void g_preempt_check(void)
305 struct task_struct *task; 305 struct task_struct *task;
306 cpu_entry_t *last, *target; 306 cpu_entry_t *last, *target;
307 307
308#ifdef CONFIG_PREFER_LOCAL_LINKING
309
310 /* Before linking to other CPUs, check first whether the local CPU is
311 * idle. */
312 //TODO come back this this later. This is causing a problem with g_preempt
313 /*
314 cpu_entry_t *local = this_cpu_ptr(&edfsc_cpu_entries);
315 task = __peek_ready(&gsched_domain);
316
317 if (task && !local->linked) {
318 task = __take_ready(&gsched_domain);
319 TRACE_TASK(task, "linking to local CPU %d to avoid IPI\n", local->cpu);
320 link_task_to_cpu(task, local);
321 preempt(local);
322 }
323 */
324#endif
325
326 // Loop through CPUs in priority order, checking if anything needs preemption 308 // Loop through CPUs in priority order, checking if anything needs preemption
327 for (last = lowest_prio_cpu(); 309 for (last = lowest_prio_cpu();
328 edf_preemption_needed(&gsched_domain, last->linked); 310 edf_preemption_needed(&gsched_domain, last->linked);
@@ -449,8 +431,7 @@ static void c_release(struct task_struct *t) {
449 tsk_rt(t)->edfsc_params.domain->scheduled_last_exec_time = litmus_clock(); 431 tsk_rt(t)->edfsc_params.domain->scheduled_last_exec_time = litmus_clock();
450 // Run schedule again to make sure that we're run 432 // Run schedule again to make sure that we're run
451 preempt(entry); 433 preempt(entry);
452 } 434 } else {
453 else {
454 // Make our cpu available again 435 // Make our cpu available again
455 if (!bheap_node_in_heap(entry->hn)) 436 if (!bheap_node_in_heap(entry->hn))
456 add_cpu_to_global(entry); 437 add_cpu_to_global(entry);
@@ -575,7 +556,7 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev)
575{ 556{
576 rt_domain_t *edf = &cedf->domain; 557 rt_domain_t *edf = &cedf->domain;
577 558
578 struct task_struct* next; 559 struct task_struct* next;
579 struct bheap temp; 560 struct bheap temp;
580 int out_of_time, sleep, preempt, 561 int out_of_time, sleep, preempt,
581 np, exists, blocks, resched; 562 np, exists, blocks, resched;
@@ -590,13 +571,13 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev)
590 BUG_ON(cedf->scheduled && !is_realtime(cedf->scheduled)); 571 BUG_ON(cedf->scheduled && !is_realtime(cedf->scheduled));
591 572
592 /* (0) Determine state */ 573 /* (0) Determine state */
593 exists = cedf->scheduled != NULL; 574 exists = cedf->scheduled != NULL;
594 blocks = exists && !is_current_running(); 575 blocks = exists && !is_current_running();
595 out_of_time = exists && budget_enforced(cedf->scheduled) 576 out_of_time = exists && budget_enforced(cedf->scheduled)
596 && budget_exhausted(cedf->scheduled); 577 && budget_exhausted(cedf->scheduled);
597 np = exists && is_np(cedf->scheduled); 578 np = exists && is_np(cedf->scheduled);
598 sleep = exists && is_completed(cedf->scheduled); 579 sleep = exists && is_completed(cedf->scheduled);
599 preempt = (is_migrating(prev) && __peek_ready(edf)) || edf_preemption_needed(edf, prev); 580 preempt = (is_migrating(prev) && __peek_ready(edf)) || edf_preemption_needed(edf, prev);
600 581
601 /* If we need to preempt do so. 582 /* If we need to preempt do so.
602 * The following checks set resched to 1 in case of special 583 * The following checks set resched to 1 in case of special
@@ -622,9 +603,8 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev)
622 if (!np && (out_of_time || sleep)) { 603 if (!np && (out_of_time || sleep)) {
623 if (is_fixed(cedf->scheduled)) 604 if (is_fixed(cedf->scheduled))
624 c_job_completion(cedf->scheduled, !sleep); 605 c_job_completion(cedf->scheduled, !sleep);
625 else { 606 else
626 g_job_completion(cedf->scheduled, !sleep); 607 g_job_completion(cedf->scheduled, !sleep);
627 }
628 resched = 1; 608 resched = 1;
629 } 609 }
630 610
@@ -660,9 +640,9 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev)
660 BUG_ON(next && is_queued(next)); 640 BUG_ON(next && is_queued(next));
661 } 641 }
662 if (next) { 642 if (next) {
663 TRACE("stealing slack at %llu\n", litmus_clock()); 643 TRACE_TASK(next, "background scheduling at %llu\n", litmus_clock());
664 } else { 644 } else {
665 TRACE("cpu become idle at %llu\n", litmus_clock()); 645 TRACE("container becomes idle at %llu\n", litmus_clock());
666 } 646 }
667 while (bheap_peek(fifo_prio, &temp)) { 647 while (bheap_peek(fifo_prio, &temp)) {
668 requeue(bheap_take(fifo_prio, &temp)->value); 648 requeue(bheap_take(fifo_prio, &temp)->value);
@@ -712,9 +692,6 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev)
712 sleep = exists && !is_cont && is_completed(entry->scheduled); 692 sleep = exists && !is_cont && is_completed(entry->scheduled);
713 preempted = entry->scheduled != entry->linked; 693 preempted = entry->scheduled != entry->linked;
714 694
715#ifdef WANT_ALL_SCHED_EVENTS
716 TRACE_TASK(prev, "invoked gsnedf_schedule.\n");
717#endif
718 695
719 if (exists) 696 if (exists)
720 TRACE_TASK(prev, 697 TRACE_TASK(prev,
@@ -854,8 +831,6 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer)
854 831
855 raw_spin_lock_irqsave(&g_lock, flags); 832 raw_spin_lock_irqsave(&g_lock, flags);
856 833
857 printk("\ncontainer_boundary\n");
858
859 now = litmus_clock(); 834 now = litmus_clock();
860 835
861 // Update budget tracking for containers 836 // Update budget tracking for containers
@@ -874,13 +849,10 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer)
874 u_extra = to_fp(num_cpus) - sys_util; 849 u_extra = to_fp(num_cpus) - sys_util;
875 container = NULL; 850 container = NULL;
876 t = task_of_list_node(it); 851 t = task_of_list_node(it);
877 printk("sys_util: %d\n", sys_util);
878 printk("task util: %d\n", get_rt_utilization(t));
879 list_del_init(it); 852 list_del_init(it);
880 if (u_extra >= get_rt_utilization(t)) { 853 if (u_extra >= get_rt_utilization(t)) {
881 for (i = 0; i < num_cpus; i++) { 854 for (i = 0; i < num_cpus; i++) {
882 u64 leftover = to_fp(1) - container_domains[i].f_util; 855 u64 leftover = to_fp(1) - container_domains[i].f_util;
883 printk("container: %d\n", container_domains[i].f_util);
884 if (leftover >= get_rt_utilization(t)) { 856 if (leftover >= get_rt_utilization(t)) {
885 container = &(container_domains[i]); 857 container = &(container_domains[i]);
886 break; 858 break;
@@ -993,8 +965,6 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer)
993 for (i = 0; i < num_cpus; i++) { 965 for (i = 0; i < num_cpus; i++) {
994 // will first iterate through fully provisioned containers, then not fully provisioned ones 966 // will first iterate through fully provisioned containers, then not fully provisioned ones
995 struct task_struct* t = container_list[i]->container; 967 struct task_struct* t = container_list[i]->container;
996 printk("container id: %d\n", tsk_rt(t)->edfsc_params.id);
997 printk("container budget: %lld\n", budget_remaining(t));
998 // If the last job completed on time, release it now 968 // If the last job completed on time, release it now
999 if (budget_exhausted(t)) { 969 if (budget_exhausted(t)) {
1000 BUG_ON(is_queued(t)); 970 BUG_ON(is_queued(t));
@@ -1006,7 +976,6 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer)
1006 } 976 }
1007 } 977 }
1008 978
1009 printk("/container_boundary\n\n");
1010 raw_spin_unlock_irqrestore(&g_lock, flags); 979 raw_spin_unlock_irqrestore(&g_lock, flags);
1011 980
1012 hrtimer_add_expires_ns(timer, LITMUS_QUANTUM_LENGTH_NS); 981 hrtimer_add_expires_ns(timer, LITMUS_QUANTUM_LENGTH_NS);
@@ -1106,6 +1075,7 @@ static void edfsc_task_exit(struct task_struct* t)
1106 1075
1107 BUG_ON(is_container(t)); 1076 BUG_ON(is_container(t));
1108 raw_spin_lock_irqsave(&g_lock, flags); 1077 raw_spin_lock_irqsave(&g_lock, flags);
1078 TRACE_TASK(t, "called edfsc_task_exit\n");
1109 1079
1110 // Remove this task from all members of its scheduling domain 1080 // Remove this task from all members of its scheduling domain
1111 unlink(t); 1081 unlink(t);
@@ -1339,8 +1309,8 @@ static int __init init_edfsc(void)
1339 container_domains[i].idle_enforcement_timer.function = on_idle_enforcement_timeout; 1309 container_domains[i].idle_enforcement_timer.function = on_idle_enforcement_timeout;
1340 1310
1341 1311
1342 tsk_rt(&container_tasks[i])->scheduled_on = NO_CPU; 1312 // Name the task its container ID mapped to ASCII
1343 container_tasks[i].comm[0] = i + 48; // Name the task its container ID mapped to ASCII 1313 snprintf(container_tasks[i].comm, TASK_COMM_LEN, "%d", i);
1344 tsk_rt(&container_tasks[i])->task_params.exec_cost = LITMUS_QUANTUM_LENGTH_NS / 2; 1314 tsk_rt(&container_tasks[i])->task_params.exec_cost = LITMUS_QUANTUM_LENGTH_NS / 2;
1345 tsk_rt(&container_tasks[i])->task_params.period = 1315 tsk_rt(&container_tasks[i])->task_params.period =
1346 LITMUS_QUANTUM_LENGTH_NS; 1316 LITMUS_QUANTUM_LENGTH_NS;