diff options
| author | Zelin Tong <ztong@ludwig.cs.unc.edu> | 2020-07-28 14:43:39 -0400 |
|---|---|---|
| committer | Zelin Tong <ztong@ludwig.cs.unc.edu> | 2020-07-28 14:43:39 -0400 |
| commit | 721f4add1fedf8ac62d0473b886e9d379867b45f (patch) | |
| tree | bcec00bb97eb1bc90acad54ca7bff3bd4b868056 | |
| parent | 098a298ef73dd8dbacf0d697eef2a6f2daa2081c (diff) | |
Latest ChangesHEADwip-joshua
| -rw-r--r-- | litmus/litmus_proc.c | 6 | ||||
| -rw-r--r-- | litmus/sched_edfsc.c | 55 |
2 files changed, 26 insertions, 35 deletions
diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c index ebea92da0a3d..92ce29f6975a 100644 --- a/litmus/litmus_proc.c +++ b/litmus/litmus_proc.c | |||
| @@ -39,10 +39,10 @@ static int litmus_stats_proc_show(struct seq_file *m, void *v) | |||
| 39 | seq_printf(m, | 39 | seq_printf(m, |
| 40 | "real-time tasks = %d\n" | 40 | "real-time tasks = %d\n" |
| 41 | "ready for release = %d\n" | 41 | "ready for release = %d\n" |
| 42 | "migrating tasks = %d\n", | 42 | /*"migrating tasks = %d\n"*/, |
| 43 | atomic_read(&rt_task_count), | 43 | atomic_read(&rt_task_count), |
| 44 | count_tasks_waiting_for_release(), | 44 | count_tasks_waiting_for_release() |
| 45 | count_migrating_tasks()); | 45 | /*,count_migrating_tasks()*/); |
| 46 | return 0; | 46 | return 0; |
| 47 | } | 47 | } |
| 48 | 48 | ||
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index ea2dce57b337..cd7f0045f029 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c | |||
| @@ -66,6 +66,8 @@ static cont_domain_t** container_list; | |||
| 66 | static rt_domain_t gsched_domain; | 66 | static rt_domain_t gsched_domain; |
| 67 | #define g_lock (gsched_domain.ready_lock) | 67 | #define g_lock (gsched_domain.ready_lock) |
| 68 | 68 | ||
| 69 | #define CONTAINER_PERIOD 50000000 | ||
| 70 | |||
| 69 | u64 m_util; | 71 | u64 m_util; |
| 70 | u64 sys_util; | 72 | u64 sys_util; |
| 71 | 73 | ||
| @@ -138,7 +140,7 @@ static noinline void requeue(struct task_struct* task) | |||
| 138 | BUG_ON(is_queued(task)); | 140 | BUG_ON(is_queued(task)); |
| 139 | BUG_ON(is_migrating(task) && task->rt_param.edfsc_params.container_task != NULL); | 141 | BUG_ON(is_migrating(task) && task->rt_param.edfsc_params.container_task != NULL); |
| 140 | //BUG_ON(task && tsk_rt(task)->linked_on != NO_CPU); | 142 | //BUG_ON(task && tsk_rt(task)->linked_on != NO_CPU); |
| 141 | //BUG_ON(is_completed(task) || (budget_enforced(task) && budget_exhausted(task))); | 143 | BUG_ON(budget_enforced(task) && budget_exhausted(task)); |
| 142 | //BUG_ON(is_container(task) && ((cont_domain_t*)task->rt_param.edfsc_params.domain)->timer_armed); | 144 | //BUG_ON(is_container(task) && ((cont_domain_t*)task->rt_param.edfsc_params.domain)->timer_armed); |
| 143 | //BUG_ON(task && is_completed(task)); | 145 | //BUG_ON(task && is_completed(task)); |
| 144 | 146 | ||
| @@ -678,20 +680,12 @@ static noinline void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * p | |||
| 678 | if (blocks || !exists || (!np && resched)) { | 680 | if (blocks || !exists || (!np && resched)) { |
| 679 | BUG_ON(cedf->scheduled && !blocks && !out_of_time && !sleep && !is_migrating(cedf->scheduled) && !is_queued(cedf->scheduled)); | 681 | BUG_ON(cedf->scheduled && !blocks && !out_of_time && !sleep && !is_migrating(cedf->scheduled) && !is_queued(cedf->scheduled)); |
| 680 | next = __take_ready(edf); | 682 | next = __take_ready(edf); |
| 681 | // Check for direct swap (1->2, 2->1) scenarios, which can cause deadlock | 683 | BUG_ON(next && budget_enforced(next) && budget_exhausted(next)); |
| 682 | /*if (next) { | ||
| 683 | other_entry = &per_cpu(edfsc_cpu_entries, next->cpu); | ||
| 684 | this_entry = this_cpu_ptr(&edfsc_cpu_entries); | ||
| 685 | if (other_entry != this_entry | ||
| 686 | && other_entry->cpu == this_entry->scheduled->cpu) { | ||
| 687 | requeue(next); | ||
| 688 | next = NULL; | ||
| 689 | } | ||
| 690 | }*/ | ||
| 691 | } else if (exists) { | 684 | } else if (exists) { |
| 692 | // This is safe when background scheduling, as we can only get here if | 685 | // This is safe when background scheduling, as we can only get here if |
| 693 | // there were no other fixed tasks ready to run. | 686 | // there were no other fixed tasks ready to run. |
| 694 | BUG_ON(is_queued(cedf->scheduled)); | 687 | BUG_ON(is_queued(cedf->scheduled)); |
| 688 | BUG_ON(budget_enforced(cedf->scheduled) && budget_exhausted(cedf->scheduled)); | ||
| 695 | next = cedf->scheduled; | 689 | next = cedf->scheduled; |
| 696 | } | 690 | } |
| 697 | 691 | ||
| @@ -700,7 +694,7 @@ static noinline void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * p | |||
| 700 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | 694 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); |
| 701 | // Give the container a little breathing room, otherwise, the core will be pounded with work | 695 | // Give the container a little breathing room, otherwise, the core will be pounded with work |
| 702 | // Will often trigger watchdog due to continous execution | 696 | // Will often trigger watchdog due to continous execution |
| 703 | } else { | 697 | } else if (!list_empty(&migrating_tasks)) { |
| 704 | // Find a task in gsched_domain that isn't a container to background schedule | 698 | // Find a task in gsched_domain that isn't a container to background schedule |
| 705 | bheap_init(&temp); | 699 | bheap_init(&temp); |
| 706 | next = __take_ready(&gsched_domain); | 700 | next = __take_ready(&gsched_domain); |
| @@ -883,8 +877,6 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
| 883 | *child = NULL; | 877 | *child = NULL; |
| 884 | } | 878 | } |
| 885 | } | 879 | } |
| 886 | BUG_ON(is_migrating(entry->scheduled) && !tsk_rt(entry->scheduled)->edfsc_params.container_task | ||
| 887 | && !blocks && tsk_rt(entry->scheduled)->linked_on == NO_CPU && !is_queued(entry->scheduled)); | ||
| 888 | // Tell LITMUS^RT that we choose a task and are done scheduling after return | 880 | // Tell LITMUS^RT that we choose a task and are done scheduling after return |
| 889 | sched_state_task_picked(); | 881 | sched_state_task_picked(); |
| 890 | 882 | ||
| @@ -911,7 +903,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
| 911 | int i; | 903 | int i; |
| 912 | struct list_head *it; | 904 | struct list_head *it; |
| 913 | struct list_head *temp; | 905 | struct list_head *temp; |
| 914 | u64 u_extra, leeway; | 906 | u64 u_extra; |
| 915 | cont_domain_t *container; | 907 | cont_domain_t *container; |
| 916 | struct task_struct *t; | 908 | struct task_struct *t; |
| 917 | int num_cpus = num_online_cpus(); | 909 | int num_cpus = num_online_cpus(); |
| @@ -922,7 +914,6 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
| 922 | raw_spin_lock_irqsave(&g_lock, flags); | 914 | raw_spin_lock_irqsave(&g_lock, flags); |
| 923 | 915 | ||
| 924 | t = NULL; | 916 | t = NULL; |
| 925 | leeway = fp_div(1, 50); | ||
| 926 | 917 | ||
| 927 | // Try to add tasks from the queue | 918 | // Try to add tasks from the queue |
| 928 | list_for_each_safe(it, temp, &pending_adds) { | 919 | list_for_each_safe(it, temp, &pending_adds) { |
| @@ -931,10 +922,10 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
| 931 | list_del_init(it); | 922 | list_del_init(it); |
| 932 | //sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_lower_prio, NULL); // Best fit | 923 | //sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_lower_prio, NULL); // Best fit |
| 933 | sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_higher_prio, NULL); // Worst fit | 924 | sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_higher_prio, NULL); // Worst fit |
| 934 | if (to_fp(num_cpus) > get_rt_utilization(t) + sys_util + leeway) { | 925 | if (to_fp(num_cpus) > get_rt_utilization(t) + sys_util) { |
| 935 | for (i = 0; i < num_cpus; i++) { | 926 | for (i = 0; i < num_cpus; i++) { |
| 936 | if (to_fp(1) > get_rt_utilization(t) + container_list[i]->f_util + leeway) { | 927 | if (to_fp(1) > get_rt_utilization(t) + container_list[i]->f_util) { |
| 937 | //if (to_fp(1) > get_rt_utilization(t) + container_domains[i].f_util + leeway) { | 928 | //if (to_fp(1) > get_rt_utilization(t) + container_domains[i].f_util) { |
| 938 | //container = &(container_domains[i]); | 929 | //container = &(container_domains[i]); |
| 939 | container = container_list[i]; // Used for best/worst fit | 930 | container = container_list[i]; // Used for best/worst fit |
| 940 | break; | 931 | break; |
| @@ -990,14 +981,14 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
| 990 | 981 | ||
| 991 | container = NULL; | 982 | container = NULL; |
| 992 | //sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_lower_prio, NULL); // Best fit | 983 | //sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_lower_prio, NULL); // Best fit |
| 993 | //sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_higher_prio, NULL); // Worst fit | 984 | sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_higher_prio, NULL); // Worst fit |
| 994 | for (i = 0; i < num_cpus; i++) { | 985 | for (i = 0; i < num_cpus; i++) { |
| 995 | u64 leftover = to_fp(1) - container_domains[i].f_util - leeway; | 986 | u64 leftover = to_fp(1) - container_domains[i].f_util; |
| 996 | //if (to_fp(1) > get_rt_utilization(t) + container_list[i]->f_util + leeway && | 987 | if (to_fp(1) > get_rt_utilization(t) + container_list[i]->f_util && |
| 997 | if (to_fp(1) > get_rt_utilization(t) + container_domains[i].f_util + leeway && | 988 | //if (to_fp(1) > get_rt_utilization(t) + container_domains[i].f_util && |
| 998 | to_fp(num_cpus) > get_rt_utilization(t) + sys_util + leeway) { | 989 | to_fp(num_cpus) > get_rt_utilization(t) + sys_util) { |
| 999 | container = &(container_domains[i]); | 990 | //container = &(container_domains[i]); |
| 1000 | //container = container_list[i]; // Used for best/worst fit | 991 | container = container_list[i]; // Used for best/worst fit |
| 1001 | break; | 992 | break; |
| 1002 | } | 993 | } |
| 1003 | } | 994 | } |
| @@ -1018,7 +1009,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
| 1018 | int remaining; | 1009 | int remaining; |
| 1019 | // Sort containers by the utilization of their fixed tasks | 1010 | // Sort containers by the utilization of their fixed tasks |
| 1020 | sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_lower_prio, NULL); | 1011 | sort(container_list, num_cpus, sizeof(cont_domain_t *), &container_lower_prio, NULL); |
| 1021 | u_extra = to_fp(num_cpus) - sys_util - leeway; | 1012 | u_extra = to_fp(num_cpus) - sys_util; |
| 1022 | // Fully provision all the container tasks we can | 1013 | // Fully provision all the container tasks we can |
| 1023 | for (i = 0; i < num_cpus && u_extra >= to_fp(1) - container_list[i]->f_util; i++) { | 1014 | for (i = 0; i < num_cpus && u_extra >= to_fp(1) - container_list[i]->f_util; i++) { |
| 1024 | struct task_struct* t = container_list[i]->container; | 1015 | struct task_struct* t = container_list[i]->container; |
| @@ -1076,7 +1067,7 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
| 1076 | 1067 | ||
| 1077 | TS_SCHED_TIMER_END | 1068 | TS_SCHED_TIMER_END |
| 1078 | 1069 | ||
| 1079 | hrtimer_add_expires_ns(timer, LITMUS_QUANTUM_LENGTH_NS); | 1070 | hrtimer_add_expires_ns(timer, CONTAINER_PERIOD); |
| 1080 | return HRTIMER_RESTART; | 1071 | return HRTIMER_RESTART; |
| 1081 | } | 1072 | } |
| 1082 | 1073 | ||
| @@ -1373,12 +1364,12 @@ static long edfsc_activate_plugin(void) | |||
| 1373 | // Name the task its container ID mapped to ASCII | 1364 | // Name the task its container ID mapped to ASCII |
| 1374 | snprintf(container_tasks[i].comm, TASK_COMM_LEN, "%d", i); | 1365 | snprintf(container_tasks[i].comm, TASK_COMM_LEN, "%d", i); |
| 1375 | container_tasks[i].pid = -i; | 1366 | container_tasks[i].pid = -i; |
| 1376 | tsk_rt(&container_tasks[i])->task_params.exec_cost = LITMUS_QUANTUM_LENGTH_NS; | 1367 | tsk_rt(&container_tasks[i])->task_params.exec_cost = CONTAINER_PERIOD; |
| 1377 | tsk_rt(&container_tasks[i])->task_params.period = | 1368 | tsk_rt(&container_tasks[i])->task_params.period = |
| 1378 | LITMUS_QUANTUM_LENGTH_NS; | 1369 | CONTAINER_PERIOD; |
| 1379 | tsk_rt(&container_tasks[i])->task_params.utilization = to_fp(1); | 1370 | tsk_rt(&container_tasks[i])->task_params.utilization = to_fp(1); |
| 1380 | tsk_rt(&container_tasks[i])->task_params.relative_deadline = | 1371 | tsk_rt(&container_tasks[i])->task_params.relative_deadline = |
| 1381 | LITMUS_QUANTUM_LENGTH_NS; | 1372 | CONTAINER_PERIOD; |
| 1382 | tsk_rt(&container_tasks[i])->task_params.budget_policy = PRECISE_ENFORCEMENT; | 1373 | tsk_rt(&container_tasks[i])->task_params.budget_policy = PRECISE_ENFORCEMENT; |
| 1383 | tsk_rt(&container_tasks[i])->edfsc_params.container_task = NULL; | 1374 | tsk_rt(&container_tasks[i])->edfsc_params.container_task = NULL; |
| 1384 | tsk_rt(&container_tasks[i])->domain = &gsched_domain; | 1375 | tsk_rt(&container_tasks[i])->domain = &gsched_domain; |
| @@ -1422,7 +1413,7 @@ static long edfsc_activate_plugin(void) | |||
| 1422 | 1413 | ||
| 1423 | // Start the container boundary timer | 1414 | // Start the container boundary timer |
| 1424 | hrtimer_start(&container_release_timer, | 1415 | hrtimer_start(&container_release_timer, |
| 1425 | ns_to_ktime(now + LITMUS_QUANTUM_LENGTH_NS), | 1416 | ns_to_ktime(now + CONTAINER_PERIOD), |
| 1426 | HRTIMER_MODE_ABS_PINNED); | 1417 | HRTIMER_MODE_ABS_PINNED); |
| 1427 | 1418 | ||
| 1428 | return 0; | 1419 | return 0; |
