diff options
author | peter <ztong@cs.unc.edu> | 2019-04-19 16:17:42 -0400 |
---|---|---|
committer | peter <ztong@cs.unc.edu> | 2019-04-19 16:17:42 -0400 |
commit | a758725f19705759bbe9707fb0d56043f1748669 (patch) | |
tree | 7374d8f58ca096432569eca7db9ddb12cc82bf8d /litmus | |
parent | 1cfafe5a060b63bc6e9a4a79d848144214aa2756 (diff) |
Implemented background scheduling
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/sched_edfsc.c | 113 |
1 files changed, 81 insertions, 32 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index 3db7c4f44822..e6eb50d05e46 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c | |||
@@ -54,15 +54,16 @@ u64 sys_util, future_sys_util; | |||
54 | 54 | ||
55 | static u64 cont_job_id; | 55 | static u64 cont_job_id; |
56 | 56 | ||
57 | #define is_container(task) task->container_domain == NULL | 57 | #define is_container(task) task && task->container_domain == &gsched_domain |
58 | #define is_fixed(task) task->container_task != NULL | 58 | #define is_fixed(task) task && task->container_task != NULL |
59 | #define is_migrating(task) !is_fixed(task) && !is_container(task) | 59 | #define is_migrating(task) task && task->container_domain == NULL && task->container_task == &gsched_domain |
60 | 60 | ||
61 | #define FP_SHIFT 20 | 61 | #define FP_SHIFT 20 |
62 | #define fp_div(a, b) a * (1 << FP_SHIFT) / b | 62 | #define fp_div(a, b) a * (1 << FP_SHIFT) / b |
63 | #define to_fp(a) a << FP_SHIFT | 63 | #define to_fp(a) a << FP_SHIFT |
64 | 64 | ||
65 | //preempts whatever is scheduled on that core. If it's a container, then preempt its fixed task | 65 | //preempts whatever is scheduled on that core. If it's a container, then preempt its fixed task |
66 | //works if entry->scheduled is null | ||
66 | static void preempt(cpu_entry_t *entry) | 67 | static void preempt(cpu_entry_t *entry) |
67 | { | 68 | { |
68 | if (is_container(entry->scheduled)) | 69 | if (is_container(entry->scheduled)) |
@@ -71,18 +72,18 @@ static void preempt(cpu_entry_t *entry) | |||
71 | preempt_if_preemptable(entry->scheduled, entry->cpu); | 72 | preempt_if_preemptable(entry->scheduled, entry->cpu); |
72 | } | 73 | } |
73 | 74 | ||
74 | //requeues task in the specified domain | 75 | //requeues task in the domain recorded in its edfsc_params |
75 | static noinline void requeue(struct task_struct* task, rt_domain_t* domain) | 76 | static noinline void requeue(struct task_struct* task) |
76 | { | 77 | { |
77 | BUG_ON(!task); | 78 | BUG_ON(!task); |
78 | /* sanity check before insertion */ | 79 | /* sanity check before insertion */ |
79 | BUG_ON(is_queued(task)); | 80 | BUG_ON(is_queued(task)); |
80 | 81 | ||
81 | if (is_early_releasing(task) || is_released(task, litmus_clock())) | 82 | if (is_early_releasing(task) || is_released(task, litmus_clock())) |
82 | __add_ready(domain, task); | 83 | __add_ready(tsk_rt(task)->edfsc_params.container_domain, task); |
83 | else { | 84 | else { |
84 | /* it has got to wait */ | 85 | /* it has got to wait */ |
85 | add_release(domain, task); | 86 | add_release(tsk_rt(task)->edfsc_params.container_domain, task); |
86 | } | 87 | } |
87 | } | 88 | } |
88 | 89 | ||
@@ -104,7 +105,7 @@ static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | |||
104 | /* Note that a and b are inverted: we want the lowest-priority CPU at | 105 | /* Note that a and b are inverted: we want the lowest-priority CPU at |
105 | * the top of the heap. | 106 | * the top of the heap. |
106 | */ | 107 | */ |
107 | return edf_higher_prio(b->linked, a->linked); | 108 | return (is_container(b->linked) && tsk_rt(b->linked)->utilization == to_fp(1)) || edf_higher_prio(b->linked, a->linked); |
108 | } | 109 | } |
109 | 110 | ||
110 | /* caller must hold gsnedf lock */ | 111 | /* caller must hold gsnedf lock */ |
@@ -207,10 +208,12 @@ static noinline void unlink(struct task_struct* t) | |||
207 | } | 208 | } |
208 | } | 209 | } |
209 | 210 | ||
211 | //TODO change local linking | ||
210 | static void g_preempt_check(void) | 212 | static void g_preempt_check(void) |
211 | { | 213 | { |
212 | struct task_struct *task; | 214 | struct task_struct *task; |
213 | cpu_entry_t *last; | 215 | cpu_entry_t *last; |
216 | cpu_entry_t *target; | ||
214 | 217 | ||
215 | #ifdef CONFIG_PREFER_LOCAL_LINKING | 218 | #ifdef CONFIG_PREFER_LOCAL_LINKING |
216 | cpu_entry_t *local; | 219 | cpu_entry_t *local; |
@@ -231,22 +234,31 @@ static void g_preempt_check(void) | |||
231 | for (last = lowest_prio_cpu(); | 234 | for (last = lowest_prio_cpu(); |
232 | edf_preemption_needed(&gsched_domain, last->linked); | 235 | edf_preemption_needed(&gsched_domain, last->linked); |
233 | last = lowest_prio_cpu()) { | 236 | last = lowest_prio_cpu()) { |
237 | target = last; | ||
238 | if (is_container(last->linked) && tsk_rt(last->linked)->utilization == to_fp(1)) | ||
239 | break; | ||
234 | /* preemption necessary */ | 240 | /* preemption necessary */ |
235 | task = __take_ready(&gsched_domain); | 241 | task = __take_ready(&gsched_domain); |
236 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | 242 | if (is_container(task)) |
237 | task->pid, last->cpu); | 243 | cpu_entry_t *target = edfsc_cpus[tsk_rt(task)->edfsc_params.id]; |
238 | |||
239 | if (requeue_preempted_job(last->linked)) | 244 | if (requeue_preempted_job(last->linked)) |
240 | requeue(last->linked); | 245 | requeue(last->linked); |
241 | 246 | TRACE("check_for_preemptions: attempting to link task %d to %d\n", | |
242 | link_task_to_cpu(task, last); | 247 | task->pid, target->cpu); |
243 | preempt(last); | 248 | if (target != last) { |
249 | TRACE("check_for_preemptions: swapping tasks linked on %d and %d\n", | ||
250 | last->cpu, target->cpu); | ||
251 | link_task_to_cpu(target->linked, last); | ||
252 | preempt(last); | ||
253 | } | ||
254 | link_task_to_cpu(task, target); | ||
255 | preempt(target); | ||
244 | } | 256 | } |
245 | } | 257 | } |
246 | 258 | ||
247 | static int c_preempt_check(container_domain_t* container) | 259 | static int c_preempt_check(container_domain_t* container) |
248 | { | 260 | { |
249 | if (edf_preemption_needed(&container->domain, container->scheduled)) { | 261 | if (is_migrating(container->scheduled) || edf_preemption_needed(&container->domain, container->scheduled)) { |
250 | preempt(&container->domain); | 262 | preempt(&container->domain); |
251 | return 1; | 263 | return 1; |
252 | } else | 264 | } else |
@@ -266,7 +278,7 @@ static void c_remove_task(struct task_struct *t) | |||
266 | { | 278 | { |
267 | struct task_struct* container_task = tsk_rt(t)->edfsc_params.container_task; | 279 | struct task_struct* container_task = tsk_rt(t)->edfsc_params.container_task; |
268 | if (is_queued(t)) { | 280 | if (is_queued(t)) { |
269 | remove(tsk_rt(container_task)->edfsc_params.container_domain, t); | 281 | remove(tsk_rt(container_task)->edfsc_params.container_domain.domain, t); |
270 | } | 282 | } |
271 | tsk_rt(container_task)->edfsc_params.container_domain.sys_util -= | 283 | tsk_rt(container_task)->edfsc_params.container_domain.sys_util -= |
272 | tsk_rt(t)->task_params.utilization; | 284 | tsk_rt(t)->task_params.utilization; |
@@ -300,8 +312,11 @@ static noinline void g_job_completion(struct task_struct* t, int forced) | |||
300 | unlink(t); | 312 | unlink(t); |
301 | /* requeue | 313 | /* requeue |
302 | * But don't requeue a blocking task. */ | 314 | * But don't requeue a blocking task. */ |
303 | if (is_current_running()) { | 315 | if (is_current_running()) { //since we don't support blocking, this should always be true |
304 | requeue(t, gsched_domain); | 316 | if (is_container(t) && is_container(tsk_rt(t)->edfsc_params.container_domain.scheduled)) { |
317 | requeue(tsk_rt(t)->edfsc_params.container_domain.scheduled); | ||
318 | } | ||
319 | requeue(t); | ||
305 | g_preempt_check(); | 320 | g_preempt_check(); |
306 | } | 321 | } |
307 | } | 322 | } |
@@ -337,8 +352,13 @@ static void g_finish_switch(struct task_struct *prev) | |||
337 | #endif | 352 | #endif |
338 | } | 353 | } |
339 | 354 | ||
355 | static int fifo_prio(struct bheap_node* _a, struct bheap_node* _b) { | ||
356 | return 0; | ||
357 | } | ||
358 | |||
340 | // takes in the container_domain pointer in container task_struct | 359 | // takes in the container_domain pointer in container task_struct |
341 | // assuming prev is previous task running on the processor before calling schedule | 360 | // assuming prev is previous task running on the processor before calling schedule |
361 | // global lock in effect | ||
342 | static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | 362 | static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) |
343 | { | 363 | { |
344 | rt_domain_t* edf = cedf.domain; | 364 | rt_domain_t* edf = cedf.domain; |
@@ -357,6 +377,7 @@ static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_stru | |||
357 | */ | 377 | */ |
358 | BUG_ON(cedf->scheduled && cedf->scheduled != prev); | 378 | BUG_ON(cedf->scheduled && cedf->scheduled != prev); |
359 | BUG_ON(cedf->scheduled && !is_realtime(prev)); | 379 | BUG_ON(cedf->scheduled && !is_realtime(prev)); |
380 | BUG_ON(is_migrating(cedf->scheduled)); | ||
360 | 381 | ||
361 | /* (0) Determine state */ | 382 | /* (0) Determine state */ |
362 | exists = cedf->scheduled != NULL; | 383 | exists = cedf->scheduled != NULL; |
@@ -365,7 +386,7 @@ static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_stru | |||
365 | && budget_exhausted(cedf->scheduled); | 386 | && budget_exhausted(cedf->scheduled); |
366 | np = exists && is_np(cedf->scheduled); | 387 | np = exists && is_np(cedf->scheduled); |
367 | sleep = exists && is_completed(cedf->scheduled); | 388 | sleep = exists && is_completed(cedf->scheduled); |
368 | preempt = edf_preemption_needed(edf, prev); | 389 | preempt = is_migrating(prev) || edf_preemption_needed(edf, prev); |
369 | 390 | ||
370 | /* If we need to preempt do so. | 391 | /* If we need to preempt do so. |
371 | * The following checks set resched to 1 in case of special | 392 | * The following checks set resched to 1 in case of special |
@@ -389,7 +410,11 @@ static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_stru | |||
389 | * this. | 410 | * this. |
390 | */ | 411 | */ |
391 | if (!np && (out_of_time || sleep)) { | 412 | if (!np && (out_of_time || sleep)) { |
392 | c_job_completion(cedf->scheduled, !sleep); | 413 | if (is_fixed(cedf->scheduled)) |
414 | c_job_completion(cedf->scheduled, !sleep); | ||
415 | else { | ||
416 | g_job_completion(cedf->scheduled, !sleep); | ||
417 | } | ||
393 | resched = 1; | 418 | resched = 1; |
394 | } | 419 | } |
395 | 420 | ||
@@ -405,7 +430,11 @@ static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_stru | |||
405 | * the appropriate queue. | 430 | * the appropriate queue. |
406 | */ | 431 | */ |
407 | if (cedf->scheduled && !blocks) | 432 | if (cedf->scheduled && !blocks) |
408 | requeue(cedf->scheduled, edf); | 433 | if (is_fixed(cedf->scheduled)) |
434 | requeue(cedf->scheduled, edf); | ||
435 | else { | ||
436 | requeue(cedf->scheduled, &gsched_domain); | ||
437 | } | ||
409 | next = __take_ready(edf); | 438 | next = __take_ready(edf); |
410 | } else | 439 | } else |
411 | /* Only override Linux scheduler if we have a real-time task | 440 | /* Only override Linux scheduler if we have a real-time task |
@@ -417,11 +446,27 @@ static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_stru | |||
417 | if (next) { | 446 | if (next) { |
418 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | 447 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); |
419 | } else { | 448 | } else { |
420 | TRACE("becoming idle at %llu\n", litmus_clock()); | 449 | struct bheap temp; |
450 | next = __take_ready(&gsched_domain); | ||
451 | while (is_container(next)) { | ||
452 | bheap_insert(fifo_prio, &temp, tsk_rt(next)->heap_node); | ||
453 | next = __take_ready(&gsched_domain); | ||
454 | } | ||
455 | if (next) { | ||
456 | TRACE("stealing stack at %llu\n", litmus_clock()); | ||
457 | } else { | ||
458 | TRACE("cpu become idle at %llu\n", litmus_clock()); | ||
459 | } | ||
460 | while (bheap_peak(fifo_prio, &temp)) { | ||
461 | __add_ready(&gsched_domain, bheap_take(fifo_prio, &temp)); | ||
462 | } | ||
421 | } | 463 | } |
422 | 464 | ||
465 | cedf->changed_budget = (budget_remaining(next) > budget_remaining(cedf->container)) ? budget_remaining(cedf->container) : budget_remaining(next); | ||
466 | cedf->scheduled_last_exec_time = get_exec_time(next); | ||
467 | tsk_rt(next)->job_params.exec_time += cedf->changed_budget; | ||
468 | |||
423 | cedf->scheduled = next; | 469 | cedf->scheduled = next; |
424 | sched_state_task_picked(); | ||
425 | raw_spin_unlock(&cedf->c_lock); | 470 | raw_spin_unlock(&cedf->c_lock); |
426 | 471 | ||
427 | return next; | 472 | return next; |
@@ -437,16 +482,20 @@ static struct task_struct* edfsc_gschedule(struct task_struct * prev) | |||
437 | raw_spin_lock(&g_lock); | 482 | raw_spin_lock(&g_lock); |
438 | 483 | ||
439 | /* sanity checking */ | 484 | /* sanity checking */ |
440 | BUG_ON(entry->scheduled && entry->scheduled != prev && entry->scheduled->container_domain != NULL); | 485 | BUG_ON(entry->scheduled && entry->scheduled != prev && !is_container(entry->scheduled)); |
441 | BUG_ON(entry->scheduled && !is_realtime(prev)); | 486 | BUG_ON(entry->scheduled && !is_realtime(prev)); |
442 | BUG_ON(is_realtime(prev) && !entry->scheduled); | 487 | BUG_ON(is_realtime(prev) && !entry->scheduled); |
443 | 488 | ||
444 | // update container budget if fixed task | 489 | // update container budget if prev was a fixed or background scheduled task |
445 | if (prev && is_fixed(prev)) { | 490 | if (prev != entry->scheduled) { |
446 | cont_domain_t* cdomain = tsk_rt(prev->container_task)->edfsc_params.container_domain; | 491 | cont_domain_t* cdomain = tsk_rt(entry->scheduled)->edfsc_params.container_domain; |
447 | prev->job_params.exec_time -= cdomain->changed_budget; | 492 | prev->job_params.exec_time -= cdomain->changed_budget; |
448 | tsk_rt(prev->edfsc_params)->container_task->job_params.exec_time += | 493 | tsk_rt(entry->scheduled)->job_params.exec_time += |
449 | prev->job_params.exec_time - cdomain->scheduled_last_exec_time; | 494 | prev->job_params.exec_time - cdomain->scheduled_last_exec_time; |
495 | if (cdomain->changed_budget) | ||
496 | tsk_rt(prev)->completed = 0; | ||
497 | if (budget_exhausted(entry->scheduled) | ||
498 | tsk_rt(entry->scheduled)->completed = 1; | ||
450 | } | 499 | } |
451 | 500 | ||
452 | /* (0) Determine state */ | 501 | /* (0) Determine state */ |
@@ -470,9 +519,6 @@ static struct task_struct* edfsc_gschedule(struct task_struct * prev) | |||
470 | blocks, out_of_time, np, sleep, preempt, | 519 | blocks, out_of_time, np, sleep, preempt, |
471 | prev->state, signal_pending(prev), is_cont); | 520 | prev->state, signal_pending(prev), is_cont); |
472 | 521 | ||
473 | if (is_cont && !sleep && !preempt && !out_of_time) | ||
474 | return edfsc_cschedule(tsk_rt(entry->scheduled)->edfsc_params.container_domain, prev); | ||
475 | |||
476 | if (entry->linked && preempt) | 522 | if (entry->linked && preempt) |
477 | TRACE_TASK(prev, "will be preempted by %s/%d\n", | 523 | TRACE_TASK(prev, "will be preempted by %s/%d\n", |
478 | entry->linked->comm, entry->linked->pid); | 524 | entry->linked->comm, entry->linked->pid); |
@@ -530,6 +576,9 @@ static struct task_struct* edfsc_gschedule(struct task_struct * prev) | |||
530 | if (exists) | 576 | if (exists) |
531 | next = prev; | 577 | next = prev; |
532 | 578 | ||
579 | if (is_container(next)) | ||
580 | next = edfsc_cschedule(tsk_rt(next)->edfsc_params.container_domain, prev); | ||
581 | |||
533 | sched_state_task_picked(); | 582 | sched_state_task_picked(); |
534 | 583 | ||
535 | raw_spin_unlock(&g_lock); | 584 | raw_spin_unlock(&g_lock); |