aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorpeter <ztong@cs.unc.edu>2019-04-21 23:07:10 -0400
committerpeter <ztong@cs.unc.edu>2019-04-21 23:07:10 -0400
commit3e3c067bc33d762da41dfc8c69482f3cd48dced2 (patch)
tree79032ca1c1dbcaa292f4bd9b190a59e5405d4e39 /litmus
parenta758725f19705759bbe9707fb0d56043f1748669 (diff)
Implemented container_boundary function(untested)
Diffstat (limited to 'litmus')
-rw-r--r--litmus/sched_edfsc.c229
1 files changed, 199 insertions, 30 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c
index e6eb50d05e46..3915477e59fd 100644
--- a/litmus/sched_edfsc.c
+++ b/litmus/sched_edfsc.c
@@ -3,6 +3,7 @@
3#include <linux/sched.h> 3#include <linux/sched.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/sort.h>
6 7
7#include <litmus/debug_trace.h> 8#include <litmus/debug_trace.h>
8#include <litmus/litmus.h> 9#include <litmus/litmus.h>
@@ -33,11 +34,18 @@ typedef struct {
33 struct task_struct* scheduled; //fixed task 34 struct task_struct* scheduled; //fixed task
34 lt_t scheduled_last_exec_time; //exec_time of the scheduled task when it was last scheduled 35 lt_t scheduled_last_exec_time; //exec_time of the scheduled task when it was last scheduled
35 lt_t changed_budget; //change to scheduled task's exec time due to container budget constraints 36 lt_t changed_budget; //change to scheduled task's exec time due to container budget constraints
36 u64 sys_util, future_sys_util; 37 u64 f_util, future_f_util;
38 sturct bheap_node* hn;
37 #define c_lock domain.ready_lock 39 #define c_lock domain.ready_lock
38} cont_domain_t; 40} cont_domain_t;
39 41
40struct list_head pending_adds; 42struct list_head pending_adds;
43INIT_LIST_HEAD(&pending_adds);
44
45struct list_head migrating_tasks;
46INIT_LIST_HEAD(&migrating_tasks);
47
48struct hrtimer container_release_timer;
41 49
42DEFINE_PER_CPU(cpu_entry_t, edfsc_cpu_entries); 50DEFINE_PER_CPU(cpu_entry_t, edfsc_cpu_entries);
43 51
@@ -45,15 +53,16 @@ cpu_entry_t* edfsc_cpus[NR_CPUS];
45 53
46struct task_struct* container_tasks[NR_CPUS]; 54struct task_struct* container_tasks[NR_CPUS];
47 55
48static cont_domaint_t container_domains[NR_CPUS]; 56static cont_domain_t container_domains[NR_CPUS];
57
58static cont_domain_t* container_list[NR_CPUS];
49 59
50static rt_domain_t gsched_domain; 60static rt_domain_t gsched_domain;
51#define g_lock (gsched_domain.ready_lock) 61#define g_lock (gsched_domain.ready_lock)
52 62
63u64 m_util, future_m_util;
53u64 sys_util, future_sys_util; 64u64 sys_util, future_sys_util;
54 65
55static u64 cont_job_id;
56
57#define is_container(task) task && task->container_domain == &gsched_domain 66#define is_container(task) task && task->container_domain == &gsched_domain
58#define is_fixed(task) task && task->container_task != NULL 67#define is_fixed(task) task && task->container_task != NULL
59#define is_migrating(task) task && task->container_domain == NULL && task->container_task == &gsched_domain 68#define is_migrating(task) task && task->container_domain == NULL && task->container_task == &gsched_domain
@@ -61,6 +70,143 @@ static u64 cont_job_id;
61#define FP_SHIFT 20 70#define FP_SHIFT 20
62#define fp_div(a, b) a * (1 << FP_SHIFT) / b 71#define fp_div(a, b) a * (1 << FP_SHIFT) / b
63#define to_fp(a) a << FP_SHIFT 72#define to_fp(a) a << FP_SHIFT
73#define from_fp(a) a >> FP_SHIFT
74
75static int container_lower_prio(const void* _a, const void* _b)
76{
77 cont_domain_t *a, *b;
78 a = *(const cont_domain_t**)a;
79 b = *(const cont_domain_t**)b;
80 if (a->future_f_util < b->future_f_util) return -1;
81 if (a->future_f_util > b->future_f_util) return 1;
82 return return 0;
83}
84
85// finds the task_struct of the hrtimer set by task_exit
86static struct task_struct* task_of_list_node(struct list_head* node)
87{
88 edfsc_params* a = container_of(node, edfsc_params, qnode);
89 rt_param* b = container_of(a, rt_params, edfsc_params);
90 return container_of(b, struct task_struct, rt_param);
91}
92
93static enum hrtimer_restart container_boundary(struct hrtimer* timer) {
94
95 int i;
96 struct list_head it;
97 struct list_head temp;
98 int u_extra;
99 int need_reweight;
100
101 raw_spin_lock(&gsnedf_lock);
102
103 list_for_each_safe(&it, &temp, &pending_adds) {
104 u_extra = NR_CPUS - sys_util;
105 cont_domain_t* container = NULL;
106 struct task_struct* t = task_of_list_node(it);
107 if (u_extra >= tsk_rt(t)->utilization) {
108 for (i = 0; i < NR_CPUS; i++) {
109 u64 leftover = to_fp(1) -
110 container_domains[i].future_f_util - tsk_rt(t)->utilization;
111 if (leftover >= 0) {
112 container = &(container_domains[i]);
113 break;
114 }
115 }
116
117 if (container) {
118 tsk_rt(t)->edfsc_params.container_domain = container;
119 requeue(t);
120 container->f_util += tsk_rt(t)->utilization;
121 container->future_f_util += tsk_rt(t)->utilization;
122 }
123 else {
124 tsk_rt(t)->edfsc_params.container_domain = &gsched_domain;
125 requeue(t);
126 m_util += tsk_rt(t)->utilization;
127 future_m_util += tsk_rt(t)->utilization;
128 list_add(tsk_rt(t)->edfsc_params.qnode, migrating_tasks);
129
130 }
131 sys_util += -= tsk_rt(t)->utilization;
132 need_reweight = 1;
133 }
134 else {
135 struct sched_param param = {0};
136 sched_setscheduler_nocheck(t, SCHED_NORMAL, &param);
137 //TODO: how to make the task not scheduled by us anymore?
138 }
139 list_del(it);
140 }
141
142 list_for_each(&it, &migrating_tasks) {
143 struct task_struct* t = task_of_list_node(it);
144 if (!(tsk_rt(t)->edfsc_params.will_remove) && !is_released(t) && get_deadline(t) < get_deadline(container_tasks[0]) + get_period(container_tasks[0])) {
145 tsk_rt(t)->edfsc_params.will_remove = 1;
146 tsk_rt(t)->edfsc_params.move_to = NULL;
147
148 cont_domain_t* container = NULL;
149 for (i = 0; i < NR_CPUS; i++) {
150 u64 leftover = to_fp(1) -
151 container_domains[i].future_f_util - tsk_rt(t)->utilization;
152 if (leftover >= 0) {
153 container = &(container_domains[i]);
154 break;
155 }
156 }
157 if (container) {
158 container->future_f_util += t->utilization;
159 tsk_rt(t)->edfsc_params.move_to = container;
160 need_reweight = 1;
161 }
162
163 }
164 }
165
166 if (need_reweight) {
167 sort(container_list, NR_CPUS * sizeof(cont_domain_t*), &container_lower_prio, NULL);
168 u64 u_extra = future_sys_util;
169 int i = 0;
170 while (i < NR_CPUS && u_extra >= to_fp(1) - container_list[i]->future_f_util) {
171 struct task_struct* t = container_list[i]->container;
172 tsk_rt(t)->task_params.exec_cost = tsk_rt(t)->task_params.period;
173 tsk_rt(t)->task_params.utilization = to_fp(1);
174 u_extra -= 1 - container_list[i]->future_f_util;
175 i++;
176 }
177 int remaining = NR_CPUS - i;
178 while (i < NR_CPUS) {
179 struct task_struct* t = container_list[i]->container;
180 tsk_rt(t)->task_params.utilization = container_list[i]->future_f_util + u_extra / remaining;
181 tsk_rt(t)->task_params.exec_cost = from_fp(tsk_rt(t)->task_params.utilization * tsk_rt(t)->task_params.period);
182 i++;
183 }
184 }
185 }
186
187 INIT_LIST_HEAD(&pending_adds);
188
189 for (i = 0; i < NR_CPUS; i++) {
190 if (budget_exhausted(t)) {
191 prepare_for_next_period(t);
192 if (is_early_releasing(t) || is_released(t, litmus_clock()))
193 sched_trace_task_release(t);
194 /* requeue
195 * But don't requeue a blocking task. */
196 if (is_current_running()) { //since we don't support blocking, this should always be true
197 if (is_container(t) && is_migrating(tsk_rt(t)->edfsc_params.container_domain.scheduled)) {
198 requeue(tsk_rt(t)->edfsc_params.container_domain.scheduled);
199 }
200 requeue(t);
201 g_preempt_check();
202 }
203 }else {
204 tsk_rt(container_tasks[i])->edfsc_params.can_release = 1;
205 }
206 }
207
208 raw_spin_unlock(&gsnedf_lock);
209}
64 210
65//preempts whatever is scheduled on that core. If it's a container, then preempt its fixed task 211//preempts whatever is scheduled on that core. If it's a container, then preempt its fixed task
66//works if entry->scheduled is null 212//works if entry->scheduled is null
@@ -267,22 +413,21 @@ static int c_preempt_check(container_domain_t* container)
267 413
268static void g_remove_task(struct task_struct *t) 414static void g_remove_task(struct task_struct *t)
269{ 415{
270 if (is_queued(t)) { 416 m_util -= tsk_rt(t)->task_params.utilization;
271 remove(&gsched_domain, t); 417 future_m_util -= tsk_rt(t)->task_params.utilization;
272 } 418 if (tsk_rt(t)->edfsc_params.move_to) {
273 sys_util -= tsk_rt(t)->task_params.utilization; 419 tsk_rt(t)->edfsc_params.container_domain = tsk_rt(t)->edfsc_params.move_to;
274 future_sys_util -= tsk_rt(t)->task_params.utilization; 420 requeue(t);
421 tsk_rt(t)->edfsc_params.container_domain.f_util += tsk_rt(t)->utilization;
422 }
275} 423}
276 424
277static void c_remove_task(struct task_struct *t) 425static void c_remove_task(struct task_struct *t)
278{ 426{
279 struct task_struct* container_task = tsk_rt(t)->edfsc_params.container_task; 427 struct task_struct* container_task = tsk_rt(t)->edfsc_params.container_task;
280 if (is_queued(t)) { 428 tsk_rt(container_task)->edfsc_params.container_domain.f_util -=
281 remove(tsk_rt(container_task)->edfsc_params.container_domain.domain, t);
282 }
283 tsk_rt(container_task)->edfsc_params.container_domain.sys_util -=
284 tsk_rt(t)->task_params.utilization; 429 tsk_rt(t)->task_params.utilization;
285 tsk_rt(container_task)->edfsc_params.container_domain.future_sys_util -= 430 tsk_rt(container_task)->edfsc_params.container_domain.future_f_util -=
286 tsk_rt(t)->task_params.utilization; 431 tsk_rt(t)->task_params.utilization;
287} 432}
288 433
@@ -298,18 +443,22 @@ static noinline void g_job_completion(struct task_struct* t, int forced)
298 /* set flags */ 443 /* set flags */
299 tsk_rt(t)->completed = 0; 444 tsk_rt(t)->completed = 0;
300 445
446 /* unlink */
447 unlink(t);
448
301 if (is_migrating(t) && tsk_rt(t)->edfsc_params.will_remove) { 449 if (is_migrating(t) && tsk_rt(t)->edfsc_params.will_remove) {
302 if (t->rt_param.job_params.lateness > 0) { 450 if (t->rt_param.job_params.lateness > 0) {
303 // remove the task now 451 // remove the task now
452 if (is_queued(t))
453 remove(tsk_rt(t)->edfsc_params.container_domain, t);
304 g_remove_task(t); 454 g_remove_task(t);
305 } 455 }
306 } else { 456 } else if (is_migrating(t) || (is_container(t) && tsk_rt(t)->edfsc_params.can_release)) {
457 tsk_rt(t)->edfsc_params.can_release = 0; //only matter for containers
307 /* prepare for next period */ 458 /* prepare for next period */
308 prepare_for_next_period(t); 459 prepare_for_next_period(t);
309 if (is_early_releasing(t) || is_released(t, litmus_clock())) 460 if (is_early_releasing(t) || is_released(t, litmus_clock()))
310 sched_trace_task_release(t); 461 sched_trace_task_release(t);
311 /* unlink */
312 unlink(t);
313 /* requeue 462 /* requeue
314 * But don't requeue a blocking task. */ 463 * But don't requeue a blocking task. */
315 if (is_current_running()) { //since we don't support blocking, this should always be true 464 if (is_current_running()) { //since we don't support blocking, this should always be true
@@ -333,6 +482,8 @@ static void c_job_completion(struct task_struct* t, int forced)
333 if (tsk_rt(t)->edfsc_params.will_remove) { 482 if (tsk_rt(t)->edfsc_params.will_remove) {
334 if (t->rt_param.job_params.lateness > 0) { 483 if (t->rt_param.job_params.lateness > 0) {
335 // remove the task now 484 // remove the task now
485 if (is_queued(t))
486 remove(tsk_rt(t)->edfsc_params.container_domain, t);
336 c_remove_task(t); 487 c_remove_task(t);
337 } 488 }
338 } else { 489 } else {
@@ -430,11 +581,8 @@ static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_stru
430 * the appropriate queue. 581 * the appropriate queue.
431 */ 582 */
432 if (cedf->scheduled && !blocks) 583 if (cedf->scheduled && !blocks)
433 if (is_fixed(cedf->scheduled)) 584 requeue(cedf->scheduled);
434 requeue(cedf->scheduled, edf); 585
435 else {
436 requeue(cedf->scheduled, &gsched_domain);
437 }
438 next = __take_ready(edf); 586 next = __take_ready(edf);
439 } else 587 } else
440 /* Only override Linux scheduler if we have a real-time task 588 /* Only override Linux scheduler if we have a real-time task
@@ -605,6 +753,7 @@ static void edfsc_task_new(struct task_struct* t, int on_rq, int is_scheduled)
605 tsk_rt(t)->edfsc_params.will_remove = 0; 753 tsk_rt(t)->edfsc_params.will_remove = 0;
606 tsk_rt(t).sporadic_release = 0; 754 tsk_rt(t).sporadic_release = 0;
607 hrtimer_init(&(tsk_rt(t)->edfsc_params.deadline_timer), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 755 hrtimer_init(&(tsk_rt(t)->edfsc_params.deadline_timer), CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
756 tsk_rt(t)->edfsc_params.timer_armed = 0;
608 757
609 tsk_rt(t)->task_params.utilization = fp_div(tsk_rt(t)->task_params.exec_cost, tsk_rt(t)->task_params.period); 758 tsk_rt(t)->task_params.utilization = fp_div(tsk_rt(t)->task_params.exec_cost, tsk_rt(t)->task_params.period);
610 759
@@ -635,13 +784,14 @@ static enum hrtimer_restart task_deadline_callback(struct hrtimer* timer) {
635 784
636 BUG_ON(is_container(t)); 785 BUG_ON(is_container(t));
637 786
638 if (is_completed(t)) { 787 if (!is_released(t) || budget_exhausted(t)) {
639 if (is_fixed(t)) { 788 if (is_fixed(t)) {
640 c_remove_task(t); 789 c_remove_task(t);
641 } else { 790 } else {
642 g_remove_task(t); 791 g_remove_task(t);
643 } 792 }
644 } 793 }
794 return HRTIMER_NORESTART
645} 795}
646 796
647static void edfsc_task_exit(struct task_struct* t) 797static void edfsc_task_exit(struct task_struct* t)
@@ -652,15 +802,34 @@ static void edfsc_task_exit(struct task_struct* t)
652 802
653 tsk_rt(t)->edfsc_params.will_remove = 1; 803 tsk_rt(t)->edfsc_params.will_remove = 1;
654 804
655 if (is_released(t, litmus_clock())) { 805 if (!is_released(t, litmus_clock())) {
656 hrtimer_start(&(tsk_rt(t)->edfsc_params.deadline_timer), 806 if (lt_after(tsk_rt(t)->edfsc_params.prev_deadline, litmus_clock)) {
657 ns_to_ktime(t->rt_param.job_params.deadline), 807 if (is_queued(t))
658 HRTIMER_MODE_ABS_PINNED); 808 remove(tsk_rt(t)->edfsc_params.container_domain, t);
659 tsk_rt(t)->edfsc_params.deadline_timer.function = asdf; //TODO: hook up task removal function 809 hrtimer_start(&(tsk_rt(t)->edfsc_params.deadline_timer),
810 ns_to_ktime(tsk_rt(t)->edfsc_params.prev_deadline),
811 HRTIMER_MODE_ABS_PINNED);
812 tsk_rt(t)->edfsc_params.timer_armed = 1;
813 tsk_rt(t)->edfsc_params.deadline_timer.function = task_deadline_callback;
814 }
815 else {
816 if (is_queued(t))
817 remove(tsk_rt(t)->edfsc_params.container_domain, t);
818 if (
819 }
660 } 820 }
661 else { 821 else {
662 //update system utilizations 822 //reserve the utilization, but remove it from being scheduled by litmus
663 //next job release will detect will_remove and remove the job 823 unlink(t);
824 if (tsk_rt(t)->scheduled_on != NO_CPU) {
825 edfsc_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL;
826 tsk_rt(t)->scheduled_on = NO_CPU;
827 }
828 hrtimer_start(&(tsk_rt(t)->edfsc_params.deadline_timer),
829 ns_to_ktime(get_deadline(t)),
830 HRTIMER_MODE_ABS_PINNED);
831 tsk_rt(t)->edfsc_params.timer_armed = 1;
832 tsk_rt(t)->edfsc_params.deadline_timer.function = task_deadline_callback;
664 } 833 }
665 834
666 raw_spin_unlock_irqrestore(&g_lock, flags); 835 raw_spin_unlock_irqrestore(&g_lock, flags);