diff options
author | peter <ztong@cs.unc.edu> | 2019-04-18 15:39:14 -0400 |
---|---|---|
committer | peter <ztong@cs.unc.edu> | 2019-04-18 15:39:14 -0400 |
commit | 3d4334bb0588d40b7067c5b221331fd5055d809b (patch) | |
tree | c32f2bf244deca66b974a1079a6dcbc3e158bb90 /litmus | |
parent | cf6a5d498ab119eae311e78b1825a2be00c268b3 (diff) |
Added edfsc parameters in rt_param.h
updated references to parameters in edfsc plugin
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/sched_edfsc.c | 120 |
1 files changed, 79 insertions, 41 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index 3abe2cf93664..1f6e82a9b954 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c | |||
@@ -125,14 +125,8 @@ static void update_cpu_position(cpu_entry_t *entry) | |||
125 | bheap_insert(cpu_lower_prio, &edfsc_cpu_heap, entry->hn); | 125 | bheap_insert(cpu_lower_prio, &edfsc_cpu_heap, entry->hn); |
126 | } | 126 | } |
127 | 127 | ||
128 | ///////////////////////////////////////////////////////////////////////////////////// | 128 | /* link_task_to_cpu - Links a migrating task or container to a CPU |
129 | /* | 129 | * Update the link of a CPU. |
130 | * | ||
131 | * GLOBAL SCHEDULING | ||
132 | * | ||
133 | */ | ||
134 | |||
135 | /* link_task_to_cpu - Update the link of a CPU. | ||
136 | * Handles the case where the to-be-linked task is already | 130 | * Handles the case where the to-be-linked task is already |
137 | * scheduled on a different CPU. | 131 | * scheduled on a different CPU. |
138 | */ | 132 | */ |
@@ -144,6 +138,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
144 | int on_cpu; | 138 | int on_cpu; |
145 | 139 | ||
146 | BUG_ON(linked && !is_realtime(linked)); | 140 | BUG_ON(linked && !is_realtime(linked)); |
141 | BUG_ON(is_fixed(linked)); | ||
147 | 142 | ||
148 | /* Currently linked task is set to be unlinked. */ | 143 | /* Currently linked task is set to be unlinked. */ |
149 | if (entry->linked) { | 144 | if (entry->linked) { |
@@ -188,6 +183,30 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
188 | update_cpu_position(entry); | 183 | update_cpu_position(entry); |
189 | } | 184 | } |
190 | 185 | ||
186 | /* unlink - Make sure a task is not linked any longer to an entry | ||
187 | * where it was linked before. Must hold gsnedf_lock. | ||
188 | */ | ||
189 | static noinline void unlink(struct task_struct* t) | ||
190 | { | ||
191 | cpu_entry_t *entry; | ||
192 | |||
193 | if (t->rt_param.linked_on != NO_CPU) { | ||
194 | /* unlink */ | ||
195 | entry = &per_cpu(edfsc_cpu_entries, t->rt_param.linked_on); | ||
196 | t->rt_param.linked_on = NO_CPU; | ||
197 | link_task_to_cpu(NULL, entry); | ||
198 | } else if (is_queued(t)) { | ||
199 | /* This is an interesting situation: t is scheduled, | ||
200 | * but was just recently unlinked. It cannot be | ||
201 | * linked anywhere else (because then it would have | ||
202 | * been relinked to this CPU), thus it must be in some | ||
203 | * queue. We must remove it from the list in this | ||
204 | * case. | ||
205 | */ | ||
206 | remove(&gsched_domain, t); | ||
207 | } | ||
208 | } | ||
209 | |||
191 | static void g_preempt_check(void) | 210 | static void g_preempt_check(void) |
192 | { | 211 | { |
193 | struct task_struct *task; | 212 | struct task_struct *task; |
@@ -234,7 +253,7 @@ static int c_preempt_check(container_domain_t* container) | |||
234 | return 0; | 253 | return 0; |
235 | } | 254 | } |
236 | 255 | ||
237 | //migrating or container task job_completion | 256 | //migrating or container task job_completion, called from edfsc_gschedule |
238 | static noinline void g_job_completion(struct task_struct* t, int forced) | 257 | static noinline void g_job_completion(struct task_struct* t, int forced) |
239 | { | 258 | { |
240 | BUG_ON(!t); | 259 | BUG_ON(!t); |
@@ -247,34 +266,46 @@ static noinline void g_job_completion(struct task_struct* t, int forced) | |||
247 | tsk_rt(t)->completed = 0; | 266 | tsk_rt(t)->completed = 0; |
248 | 267 | ||
249 | //is at later of job completion and job deadline | 268 | //is at later of job completion and job deadline |
250 | if (is_migrating(t) && t->rt_param.job_params.lateness > 0 && t->edfsc_params.will_remove) { | 269 | if (is_migrating(t) && t->rt_param.job_params.lateness > 0 && tsk_rt(t)->edfsc_params.will_remove) { |
251 | //update task utilizations | 270 | //update task utilizations |
252 | } | 271 | } |
253 | 272 | ||
254 | /* prepare for next period */ | 273 | if (is_migrating(t) && tsk_rt(t)->edfsc_params.will_remove) { |
255 | prepare_for_next_period(t); | 274 | if (t->rt_param.job_params.lateness > 0) { |
256 | if (is_early_releasing(t) || is_released(t, litmus_clock())) | 275 | //remove the task now |
257 | sched_trace_task_release(t); | 276 | } |
258 | /* unlink */ | 277 | } |
259 | unlink(t); | 278 | else { |
260 | /* requeue | 279 | /* prepare for next period */ |
261 | * But don't requeue a blocking task. */ | 280 | prepare_for_next_period(t); |
262 | if (is_current_running()) | 281 | if (is_early_releasing(t) || is_released(t, litmus_clock())) |
263 | requeue(t, gsched_domain); | 282 | sched_trace_task_release(t); |
264 | g_preempt_check(); | 283 | /* unlink */ |
284 | unlink(t); | ||
285 | /* requeue | ||
286 | * But don't requeue a blocking task. */ | ||
287 | if (is_current_running()) { | ||
288 | requeue(t, gsched_domain); | ||
289 | g_preempt_check(); | ||
290 | } | ||
291 | } | ||
265 | } | 292 | } |
266 | 293 | ||
267 | //fixed task job_completion | 294 | //fixed task job_completion, called from edfsc_cschedule |
268 | static void c_job_completion(struct task_struct* t, int forced) | 295 | static void c_job_completion(struct task_struct* t, int forced) |
269 | { | 296 | { |
270 | sched_trace_task_completion(t, forced); | 297 | sched_trace_task_completion(t, forced); |
271 | TRACE_TASK(t, "job_completion(forced=%d).\n", forced); | 298 | TRACE_TASK(t, "job_completion(forced=%d).\n", forced); |
272 | 299 | ||
273 | tsk_rt(t)->completed = 0; | 300 | tsk_rt(t)->completed = 0; |
274 | prepare_for_next_period(t); | ||
275 | 301 | ||
276 | if (t->rt_param.job_params.lateness > 0 && t->edfsc_params.will_remove) { | 302 | if (tsk_rt(t)->edfsc_params.will_remove) { |
277 | //update task utilizations | 303 | if (t->rt_param.job_params.lateness > 0) { |
304 | //remove the task now | ||
305 | } | ||
306 | } | ||
307 | else { | ||
308 | prepare_for_next_period(t); | ||
278 | } | 309 | } |
279 | } | 310 | } |
280 | 311 | ||
@@ -292,10 +323,10 @@ static void g_finish_switch(struct task_struct *prev) | |||
292 | 323 | ||
293 | //takes in the container_domain pointer in container task_struct | 324 | //takes in the container_domain pointer in container task_struct |
294 | //assuming prev is previous task running on the processor before calling schedule | 325 | //assuming prev is previous task running on the processor before calling schedule |
295 | static struct task_struct* edfsc_cschedule(rt_domain_t* edf, struct task_struct * prev) | 326 | static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) |
296 | { | 327 | { |
297 | cont_domain_t* cedf = container_of(edf, cont_domain_t, domain); | 328 | rt_domain_t* edf = cedf.domain; |
298 | 329 | ||
299 | struct task_struct* next; | 330 | struct task_struct* next; |
300 | 331 | ||
301 | int out_of_time, sleep, preempt, | 332 | int out_of_time, sleep, preempt, |
@@ -358,7 +389,7 @@ static struct task_struct* edfsc_cschedule(rt_domain_t* edf, struct task_struct | |||
358 | * the appropriate queue. | 389 | * the appropriate queue. |
359 | */ | 390 | */ |
360 | if (cedf->scheduled && !blocks) | 391 | if (cedf->scheduled && !blocks) |
361 | requeue(pedf->scheduled, edf); | 392 | requeue(cedf->scheduled, edf); |
362 | next = __take_ready(edf); | 393 | next = __take_ready(edf); |
363 | } else | 394 | } else |
364 | /* Only override Linux scheduler if we have a real-time task | 395 | /* Only override Linux scheduler if we have a real-time task |
@@ -394,10 +425,10 @@ static struct task_struct* edfsc_gschedule(struct task_struct * prev) | |||
394 | BUG_ON(entry->scheduled && !is_realtime(prev)); | 425 | BUG_ON(entry->scheduled && !is_realtime(prev)); |
395 | BUG_ON(is_realtime(prev) && !entry->scheduled); | 426 | BUG_ON(is_realtime(prev) && !entry->scheduled); |
396 | 427 | ||
397 | if (prev && prev->container_task) { //update container budget if fixed task | 428 | if (prev && is_fixed(prev)) { //update container budget if fixed task |
398 | cdomain = prev->container_task->container_domain; | 429 | cont_domain_t* cdomain = tsk_rt(prev->container_task)->edfsc_params.container_domain; |
399 | prev->job_params.exec_time -= cdomain->changed_budget; | 430 | prev->job_params.exec_time -= cdomain->changed_budget; |
400 | prev->container_task->job_params.exec_time += | 431 | tsk_rt(prev->edfsc_params)->container_task->job_params.exec_time += |
401 | prev->job_params.exec_time - cdomain->scheduled_last_exec_time; | 432 | prev->job_params.exec_time - cdomain->scheduled_last_exec_time; |
402 | } | 433 | } |
403 | 434 | ||
@@ -423,7 +454,7 @@ static struct task_struct* edfsc_gschedule(struct task_struct * prev) | |||
423 | prev->state, signal_pending(prev), is_cont); | 454 | prev->state, signal_pending(prev), is_cont); |
424 | 455 | ||
425 | if (is_cont && !sleep && !preempt && !out_of_time) | 456 | if (is_cont && !sleep && !preempt && !out_of_time) |
426 | return edfsc_cschedule(entry->scheduled->container_domain, prev); | 457 | return edfsc_cschedule(tsk_rt(entry->scheduled)->edfsc_params.container_domain, prev); |
427 | 458 | ||
428 | if (entry->linked && preempt) | 459 | if (entry->linked && preempt) |
429 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 460 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
@@ -452,7 +483,7 @@ static struct task_struct* edfsc_gschedule(struct task_struct * prev) | |||
452 | * for blocked jobs). | 483 | * for blocked jobs). |
453 | */ | 484 | */ |
454 | if (!np && (out_of_time || sleep)) | 485 | if (!np && (out_of_time || sleep)) |
455 | curr_job_completion(entry->scheduled, !sleep); | 486 | g_job_completion(entry->scheduled, !sleep); |
456 | 487 | ||
457 | /* Link pending task if we became unlinked. | 488 | /* Link pending task if we became unlinked. |
458 | */ | 489 | */ |
@@ -505,11 +536,11 @@ static void edfsc_task_new(struct task_struct* t, int on_rq, int is_scheduled) { | |||
505 | 536 | ||
506 | raw_spin_lock_irqsave(&g_lock, flags); | 537 | raw_spin_lock_irqsave(&g_lock, flags); |
507 | 538 | ||
508 | t->edfsc_params.will_remove = 0; | 539 | tsk_rt(t)->edfsc_params.will_remove = 0; |
509 | t->rt_param.sporadic_release = 0; | 540 | tsk_rt(t).sporadic_release = 0; |
510 | hrtimer_init(&(t->edfsc_params.deadline_timer), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 541 | hrtimer_init(&(tsk_rt(t)->edfsc_params.deadline_timer), CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
511 | 542 | ||
512 | list_add_tail(t->edfsc_params.qnode, pending_adds); | 543 | list_add_tail(tsk_rt(t)->edfsc_params.qnode, pending_adds); |
513 | 544 | ||
514 | if (is_scheduled) { | 545 | if (is_scheduled) { |
515 | entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); | 546 | entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); |
@@ -523,19 +554,26 @@ static void edfsc_task_new(struct task_struct* t, int on_rq, int is_scheduled) { | |||
523 | raw_spin_unlock_irqrestore(&g_lock, flags); | 554 | raw_spin_unlock_irqrestore(&g_lock, flags); |
524 | } | 555 | } |
525 | 556 | ||
557 | //finds the task_struct of the hrtimer set by task_exit | ||
558 | static struct task_struct* task_of_timer(struct hrtimer* timer) { | ||
559 | edfsc_params* a = container_of(timer, edfsc_params, deadline_timer); | ||
560 | rt_param* b = container_of(a, rt_params, edfsc_params); | ||
561 | return container_of(b, struct task_struct, rt_param); | ||
562 | } | ||
563 | |||
526 | static void edfsc_task_exit(struct task_struct* t) { | 564 | static void edfsc_task_exit(struct task_struct* t) { |
527 | 565 | ||
528 | unsigned long flags; | 566 | unsigned long flags; |
529 | 567 | ||
530 | raw_spin_lock_irqsave(&g_lock, flags); | 568 | raw_spin_lock_irqsave(&g_lock, flags); |
531 | 569 | ||
532 | t->edfsc_params.will_remove = 1; | 570 | tsk_rt(t)->edfsc_params.will_remove = 1; |
533 | 571 | ||
534 | if (is_released(t, litmus_clock())) { | 572 | if (is_released(t, litmus_clock())) { |
535 | hrtimer_start(&(t->edfsc_params.deadline_timer), | 573 | hrtimer_start(&(tsk_rt(t)->edfsc_params.deadline_timer), |
536 | ns_to_ktime(t->rt_param.job_params.deadline), | 574 | ns_to_ktime(t->rt_param.job_params.deadline), |
537 | HRTIMER_MODE_ABS_PINNED); | 575 | HRTIMER_MODE_ABS_PINNED); |
538 | t->edfsc_params.deadline_timer.function = asdf; //TODO: update system utilizations | 576 | tsk_rt(t)->edfsc_params.deadline_timer.function = asdf; //TODO: hook up task removal function |
539 | } | 577 | } |
540 | else { | 578 | else { |
541 | //update system utilizations | 579 | //update system utilizations |