diff options
author | Zelin Tong <ztong@ludwig.cs.unc.edu> | 2020-05-13 14:16:36 -0400 |
---|---|---|
committer | Zelin Tong <ztong@ludwig.cs.unc.edu> | 2020-05-13 14:16:36 -0400 |
commit | e764a0121a5eb749d579b201027fe4b355b04e78 (patch) | |
tree | a1b17ee53b3d0cb884a60ebad46fbf2a0ac156ce | |
parent | dd2b7b8783f0e56921b38abe66a0fcd3d0df3b2d (diff) |
WIP with my local changes
contains changes regarding link_task_to_cpu and how budget enforcement
works
-rw-r--r-- | litmus/sched_edfsc.c | 127 |
1 files changed, 74 insertions, 53 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c index 927f35ebc68f..74737eddd4af 100644 --- a/litmus/sched_edfsc.c +++ b/litmus/sched_edfsc.c | |||
@@ -114,6 +114,7 @@ static noinline void requeue(struct task_struct* task) | |||
114 | /* sanity check before insertion */ | 114 | /* sanity check before insertion */ |
115 | BUG_ON(is_queued(task)); | 115 | BUG_ON(is_queued(task)); |
116 | BUG_ON(!is_realtime(task)); | 116 | BUG_ON(!is_realtime(task)); |
117 | BUG_ON(budget_enforced(task) && budget_exhausted(task)); | ||
117 | 118 | ||
118 | if (is_early_releasing(task) || is_released(task, litmus_clock())) { | 119 | if (is_early_releasing(task) || is_released(task, litmus_clock())) { |
119 | __add_ready((rt_domain_t *) tsk_rt(task)->domain, task); | 120 | __add_ready((rt_domain_t *) tsk_rt(task)->domain, task); |
@@ -260,6 +261,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
260 | 261 | ||
261 | BUG_ON(linked && !is_realtime(linked)); | 262 | BUG_ON(linked && !is_realtime(linked)); |
262 | BUG_ON(is_fixed(linked)); | 263 | BUG_ON(is_fixed(linked)); |
264 | BUG_ON(is_container(linked) && tsk_rt(linked)->edfsc_params.id != entry->cpu); | ||
263 | 265 | ||
264 | /* Currently linked task is set to be unlinked. */ | 266 | /* Currently linked task is set to be unlinked. */ |
265 | if (entry->linked) { | 267 | if (entry->linked) { |
@@ -269,9 +271,9 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
269 | /* Link new task to CPU. */ | 271 | /* Link new task to CPU. */ |
270 | if (linked) { | 272 | if (linked) { |
271 | /* handle task is already scheduled somewhere! */ | 273 | /* handle task is already scheduled somewhere! */ |
272 | on_cpu = linked->rt_param.scheduled_on; | 274 | //on_cpu = linked->rt_param.scheduled_on; |
273 | if (on_cpu != NO_CPU) { | 275 | //if (on_cpu != NO_CPU) { |
274 | sched = &per_cpu(edfsc_cpu_entries, on_cpu); | 276 | //sched = &per_cpu(edfsc_cpu_entries, on_cpu); |
275 | /* this should only happen if not linked already */ | 277 | /* this should only happen if not linked already */ |
276 | // Except in the case of swapping linked tasks between cpus | 278 | // Except in the case of swapping linked tasks between cpus |
277 | //BUG_ON(sched->linked == linked); | 279 | //BUG_ON(sched->linked == linked); |
@@ -281,21 +283,23 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
281 | * we just link ourselves to the CPU and depend on | 283 | * we just link ourselves to the CPU and depend on |
282 | * the caller to get things right. | 284 | * the caller to get things right. |
283 | */ | 285 | */ |
284 | if (entry != sched) { | 286 | //if (entry != sched) { |
285 | TRACE_TASK(linked, | 287 | // BUG_ON(true); |
286 | "already scheduled on %d, updating link.\n", | 288 | // TRACE_TASK(linked, |
287 | sched->cpu); | 289 | // "already scheduled on %d, updating link.\n", |
288 | tmp = sched->linked; | 290 | // sched->cpu); |
289 | linked->rt_param.linked_on = sched->cpu; | 291 | // tmp = sched->linked; |
290 | sched->linked = linked; | 292 | // linked->rt_param.linked_on = sched->cpu; |
291 | update_cpu_position(sched); | 293 | // sched->linked = linked; |
292 | linked = tmp; | 294 | // update_cpu_position(sched); |
293 | } | 295 | // linked = tmp; |
294 | } | 296 | //} |
297 | //} | ||
295 | if (linked) /* might be NULL due to swap */ | 298 | if (linked) /* might be NULL due to swap */ |
296 | linked->rt_param.linked_on = entry->cpu; | 299 | linked->rt_param.linked_on = entry->cpu; |
297 | } | 300 | } |
298 | entry->linked = linked; | 301 | entry->linked = linked; |
302 | BUG_ON(is_container(linked) && tsk_rt(linked)->edfsc_params.id != entry->cpu); | ||
299 | #ifdef WANT_ALL_SCHED_EVENTS | 303 | #ifdef WANT_ALL_SCHED_EVENTS |
300 | if (linked) | 304 | if (linked) |
301 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | 305 | TRACE_TASK(linked, "linked to %d.\n", entry->cpu); |
@@ -374,9 +378,11 @@ static void g_preempt_check(void) | |||
374 | if (target != last) { | 378 | if (target != last) { |
375 | TRACE("check_for_preemptions: swapping tasks linked on %d and %d\n", | 379 | TRACE("check_for_preemptions: swapping tasks linked on %d and %d\n", |
376 | last->cpu, target->cpu); | 380 | last->cpu, target->cpu); |
381 | BUG_ON(is_container(target->linked) && last->cpu != tsk_rt(target->linked)->edfsc_params.id); | ||
377 | link_task_to_cpu(target->linked, last); | 382 | link_task_to_cpu(target->linked, last); |
378 | preempt(last); | 383 | preempt(last); |
379 | } | 384 | } |
385 | BUG_ON(is_container(task) && target->cpu != tsk_rt(task)->edfsc_params.id); | ||
380 | link_task_to_cpu(task, target); | 386 | link_task_to_cpu(task, target); |
381 | preempt(target); | 387 | preempt(target); |
382 | } | 388 | } |
@@ -466,6 +472,7 @@ static void c_release(struct task_struct *t) { | |||
466 | */ | 472 | */ |
467 | if (get_rt_utilization(t) == to_fp(1)) { | 473 | if (get_rt_utilization(t) == to_fp(1)) { |
468 | cpu_entry_t* entry = &per_cpu(edfsc_cpu_entries, tsk_rt(t)->edfsc_params.id); | 474 | cpu_entry_t* entry = &per_cpu(edfsc_cpu_entries, tsk_rt(t)->edfsc_params.id); |
475 | BUG_ON(entry->linked); | ||
469 | // Make this cpu unavailable to the global scheduler | 476 | // Make this cpu unavailable to the global scheduler |
470 | if (bheap_node_in_heap(entry->hn)) | 477 | if (bheap_node_in_heap(entry->hn)) |
471 | remove_cpu_from_global(entry); | 478 | remove_cpu_from_global(entry); |
@@ -487,10 +494,10 @@ static void c_release(struct task_struct *t) { | |||
487 | if (!bheap_node_in_heap(entry->hn)) | 494 | if (!bheap_node_in_heap(entry->hn)) |
488 | add_cpu_to_global(entry); | 495 | add_cpu_to_global(entry); |
489 | if (is_current_running()) { //since we don't support blocking, this should always be true | 496 | if (is_current_running()) { //since we don't support blocking, this should always be true |
490 | if (tsk_rt(t)->edfsc_params.domain->scheduled) { | 497 | //if (tsk_rt(t)->edfsc_params.domain->scheduled) { |
491 | requeue(tsk_rt(t)->edfsc_params.domain->scheduled); | 498 | // requeue(tsk_rt(t)->edfsc_params.domain->scheduled); |
492 | tsk_rt(t)->edfsc_params.domain->scheduled = NULL; | 499 | // tsk_rt(t)->edfsc_params.domain->scheduled = NULL; |
493 | } | 500 | //} |
494 | // Let g_preempt_check() decide what to run, don't impose | 501 | // Let g_preempt_check() decide what to run, don't impose |
495 | unlink(t); | 502 | unlink(t); |
496 | // Request to be scheduled globally again | 503 | // Request to be scheduled globally again |
@@ -499,6 +506,8 @@ static void c_release(struct task_struct *t) { | |||
499 | // Re-run our EDF scheduling to adjust for the added core | 506 | // Re-run our EDF scheduling to adjust for the added core |
500 | g_preempt_check(); | 507 | g_preempt_check(); |
501 | } | 508 | } |
509 | else | ||
510 | BUG_ON(true); | ||
502 | } | 511 | } |
503 | } | 512 | } |
504 | 513 | ||
@@ -535,6 +544,8 @@ static noinline void g_job_completion(struct task_struct* t, int forced) | |||
535 | requeue(t); | 544 | requeue(t); |
536 | g_preempt_check(); | 545 | g_preempt_check(); |
537 | } | 546 | } |
547 | else | ||
548 | BUG_ON(true); | ||
538 | // When a container job finishes late | 549 | // When a container job finishes late |
539 | } else if (is_container(t) && tsk_rt(t)->edfsc_params.can_release) { | 550 | } else if (is_container(t) && tsk_rt(t)->edfsc_params.can_release) { |
540 | tsk_rt(t)->edfsc_params.can_release = 0; | 551 | tsk_rt(t)->edfsc_params.can_release = 0; |
@@ -542,6 +553,10 @@ static noinline void g_job_completion(struct task_struct* t, int forced) | |||
542 | if (get_rt_utilization(t) == to_fp(1)) | 553 | if (get_rt_utilization(t) == to_fp(1)) |
543 | manage_idle_enforcement_timer(t); | 554 | manage_idle_enforcement_timer(t); |
544 | } | 555 | } |
556 | else { | ||
557 | BUG_ON(is_queued(t)); | ||
558 | } | ||
559 | BUG_ON(is_migrating(t) && budget_enforced(t) && budget_exhausted(t)); | ||
545 | } | 560 | } |
546 | 561 | ||
547 | // fixed task job_completion, called from edfsc_cschedule | 562 | // fixed task job_completion, called from edfsc_cschedule |
@@ -576,10 +591,12 @@ static void g_finish_switch(struct task_struct *prev) | |||
576 | entry->scheduled = entry->linked; | 591 | entry->scheduled = entry->linked; |
577 | } | 592 | } |
578 | 593 | ||
579 | if (!is_container(entry->scheduled) && tsk_rt(container)->edfsc_params.domain->scheduled) { | 594 | BUG_ON(is_migrating(entry->linked) && !entry->scheduled); |
580 | requeue(tsk_rt(container)->edfsc_params.domain->scheduled); | 595 | BUG_ON(is_realtime(current) && !entry->scheduled); |
581 | tsk_rt(container)->edfsc_params.domain->scheduled = NULL; | 596 | //if (!is_container(entry->scheduled) && tsk_rt(container)->edfsc_params.domain->scheduled) { |
582 | } | 597 | // requeue(tsk_rt(container)->edfsc_params.domain->scheduled); |
598 | // tsk_rt(container)->edfsc_params.domain->scheduled = NULL; | ||
599 | //} | ||
583 | #ifdef WANT_ALL_SCHED_EVENTS | 600 | #ifdef WANT_ALL_SCHED_EVENTS |
584 | TRACE_TASK(prev, "switched away from\n"); | 601 | TRACE_TASK(prev, "switched away from\n"); |
585 | #endif | 602 | #endif |
@@ -596,7 +613,7 @@ static int fifo_prio(struct bheap_node* _a, struct bheap_node* _b) | |||
596 | * @param cedf Pointer to tsk_rt(container)->edfsc_params->domain | 613 | * @param cedf Pointer to tsk_rt(container)->edfsc_params->domain |
597 | * @param prev Previous task running on this processor before schedule was called | 614 | * @param prev Previous task running on this processor before schedule was called |
598 | */ | 615 | */ |
599 | static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | 616 | static struct task_struct* edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) |
600 | { | 617 | { |
601 | rt_domain_t *edf = &cedf->domain; | 618 | rt_domain_t *edf = &cedf->domain; |
602 | 619 | ||
@@ -611,7 +628,7 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | |||
611 | * differently from gedf, when a task exits (dead) | 628 | * differently from gedf, when a task exits (dead) |
612 | * cedf->schedule may be null and prev _is_ realtime | 629 | * cedf->schedule may be null and prev _is_ realtime |
613 | */ | 630 | */ |
614 | BUG_ON(cedf->scheduled && cedf->scheduled != prev && is_realtime(prev)); | 631 | //BUG_ON(cedf->scheduled && cedf->scheduled != prev && is_realtime(prev)); |
615 | BUG_ON(cedf->scheduled && !is_realtime(cedf->scheduled)); | 632 | BUG_ON(cedf->scheduled && !is_realtime(cedf->scheduled)); |
616 | 633 | ||
617 | /* (0) Determine state */ | 634 | /* (0) Determine state */ |
@@ -621,7 +638,7 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | |||
621 | && budget_exhausted(cedf->scheduled); | 638 | && budget_exhausted(cedf->scheduled); |
622 | np = exists && is_np(cedf->scheduled); | 639 | np = exists && is_np(cedf->scheduled); |
623 | sleep = exists && is_completed(cedf->scheduled); | 640 | sleep = exists && is_completed(cedf->scheduled); |
624 | preempt = (is_migrating(prev) && __peek_ready(edf)) || edf_preemption_needed(edf, prev); | 641 | preempt = (is_migrating(cedf->scheduled) && __peek_ready(edf)) || edf_preemption_needed(edf, cedf->scheduled); |
625 | 642 | ||
626 | /* If we need to preempt do so. | 643 | /* If we need to preempt do so. |
627 | * The following checks set resched to 1 in case of special | 644 | * The following checks set resched to 1 in case of special |
@@ -668,13 +685,12 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | |||
668 | */ | 685 | */ |
669 | next = __take_ready(edf); | 686 | next = __take_ready(edf); |
670 | } else if (exists) { | 687 | } else if (exists) { |
671 | BUG_ON(!is_realtime(prev)); | ||
672 | /* Only override Linux scheduler if we have a real-time task | 688 | /* Only override Linux scheduler if we have a real-time task |
673 | * scheduled that needs to continue. | 689 | * scheduled that needs to continue. |
674 | */ | 690 | */ |
675 | next = prev; | 691 | next = cedf->scheduled; |
676 | } | 692 | } |
677 | 693 | /* | |
678 | if (next) { | 694 | if (next) { |
679 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | 695 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); |
680 | } else { | 696 | } else { |
@@ -689,14 +705,17 @@ static void edfsc_cschedule(cont_domain_t* cedf, struct task_struct * prev) | |||
689 | if (next) { | 705 | if (next) { |
690 | TRACE("stealing slack at %llu\n", litmus_clock()); | 706 | TRACE("stealing slack at %llu\n", litmus_clock()); |
691 | } else { | 707 | } else { |
708 | tsk_rt(t)->task_params.budget_policy = PRECISE_ENFORCEMENT; | ||
692 | TRACE("cpu become idle at %llu\n", litmus_clock()); | 709 | TRACE("cpu become idle at %llu\n", litmus_clock()); |
693 | } | 710 | } |
694 | while (bheap_peek(fifo_prio, &temp)) { | 711 | while (bheap_peek(fifo_prio, &temp)) { |
695 | requeue(bheap_take(fifo_prio, &temp)->value); | 712 | requeue(bheap_take(fifo_prio, &temp)->value); |
696 | } | 713 | } |
697 | } | 714 | } |
698 | 715 | */ | |
699 | cedf->scheduled = next; | 716 | cedf->scheduled = next; |
717 | BUG_ON(next && budget_enforced(next) && budget_exhausted(next)); | ||
718 | return next; | ||
700 | } | 719 | } |
701 | 720 | ||
702 | //assuming prev is previous task running on the processor before calling schedule | 721 | //assuming prev is previous task running on the processor before calling schedule |
@@ -720,7 +739,7 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
720 | // jobs of fixed tasks. | 739 | // jobs of fixed tasks. |
721 | BUG_ON(entry->scheduled && !is_container(entry->scheduled) && !is_realtime(prev)); | 740 | BUG_ON(entry->scheduled && !is_container(entry->scheduled) && !is_realtime(prev)); |
722 | // Bug if we didn't think anything was scheduled, but a realtime task was running on our CPU | 741 | // Bug if we didn't think anything was scheduled, but a realtime task was running on our CPU |
723 | BUG_ON(is_realtime(prev) && !entry->scheduled && entry->cpu == tsk_rt(prev)->scheduled_on); | 742 | //BUG_ON(is_realtime(prev) && !entry->scheduled && entry->cpu == tsk_rt(prev)->scheduled_on); |
724 | 743 | ||
725 | if (is_container(entry->scheduled)) { | 744 | if (is_container(entry->scheduled)) { |
726 | lt_t now = litmus_clock(); | 745 | lt_t now = litmus_clock(); |
@@ -758,8 +777,9 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
758 | /* If a task blocks we have no choice but to reschedule. | 777 | /* If a task blocks we have no choice but to reschedule. |
759 | * Note: containers never block | 778 | * Note: containers never block |
760 | */ | 779 | */ |
761 | if (blocks) | 780 | if (blocks) { |
762 | unlink(entry->scheduled); | 781 | unlink(entry->scheduled); |
782 | } | ||
763 | 783 | ||
764 | /* Request a sys_exit_np() call if we would like to preempt but cannot. | 784 | /* Request a sys_exit_np() call if we would like to preempt but cannot. |
765 | * We need to make sure to update the link structure anyway in case | 785 | * We need to make sure to update the link structure anyway in case |
@@ -778,17 +798,16 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
778 | */ | 798 | */ |
779 | if (!np && (out_of_time || sleep)) { | 799 | if (!np && (out_of_time || sleep)) { |
780 | // This is not a global job completion if we're in a fully provisioned container | 800 | // This is not a global job completion if we're in a fully provisioned container |
781 | if (bheap_node_in_heap(entry->hn)) | 801 | if (is_migrating(entry->scheduled) || bheap_node_in_heap(entry->hn)) |
782 | g_job_completion(entry->scheduled, !sleep); | 802 | g_job_completion(entry->scheduled, !sleep); |
783 | else | 803 | else |
784 | unlink(entry->scheduled); | 804 | unlink(entry->scheduled); |
785 | BUG_ON(entry->linked && budget_exhausted(entry->linked)); | 805 | BUG_ON(entry->linked && budget_enforced(entry->linked) && budget_exhausted(entry->linked)); |
786 | } | 806 | } |
787 | 807 | ||
788 | // Determine what to run next (set entry->linked) | 808 | // Determine what to run next (set entry->linked) |
789 | if (!entry->linked) { | 809 | if (!entry->linked) { |
790 | struct task_struct* task = __take_ready(&gsched_domain); | 810 | struct task_struct* task = __take_ready(&gsched_domain); |
791 | //printk("next task: %d\n", task); | ||
792 | // Make sure that containers are only scheduled on cores with same id | 811 | // Make sure that containers are only scheduled on cores with same id |
793 | if (is_container(task) && entry->cpu != tsk_rt(task)->edfsc_params.id) { | 812 | if (is_container(task) && entry->cpu != tsk_rt(task)->edfsc_params.id) { |
794 | // Get cpu_entry for task's core assignment | 813 | // Get cpu_entry for task's core assignment |
@@ -799,15 +818,16 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
799 | // I guess this is one way to avoid overwriting target->linked? | 818 | // I guess this is one way to avoid overwriting target->linked? |
800 | // Peter: This was in your original implementation, is this right? -Joshua | 819 | // Peter: This was in your original implementation, is this right? -Joshua |
801 | link_task_to_cpu(target->linked, entry); | 820 | link_task_to_cpu(target->linked, entry); |
821 | BUG_ON(is_container(target->linked) && tsk_rt(target->linked)->edfsc_params.id != entry->cpu); | ||
802 | // Setup the container to run next on the remote core | 822 | // Setup the container to run next on the remote core |
803 | link_task_to_cpu(task, target); | 823 | link_task_to_cpu(task, target); |
804 | BUG_ON(budget_exhausted(task)); | 824 | BUG_ON(budget_enforced(task) && budget_exhausted(task)); |
805 | // Alert the remote core that it now needs to reschedule | 825 | // Alert the remote core that it now needs to reschedule |
806 | preempt(target); | 826 | preempt(target); |
807 | } else { | 827 | } else { |
808 | // We'll now schedule the ready task here | 828 | // We'll now schedule the ready task here |
809 | link_task_to_cpu(task, entry); | 829 | link_task_to_cpu(task, entry); |
810 | BUG_ON(task && budget_exhausted(task)); | 830 | BUG_ON(task && budget_enforced(task) && budget_exhausted(task)); |
811 | // Give up on this scheduling cycle, as we need to re-check the | 831 | // Give up on this scheduling cycle, as we need to re-check the |
812 | // conditions earlier on in edfsc_gschedule() | 832 | // conditions earlier on in edfsc_gschedule() |
813 | //if (task) | 833 | //if (task) |
@@ -819,7 +839,7 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
819 | //XXX modify this into a while loop? | 839 | //XXX modify this into a while loop? |
820 | //goto out; | 840 | //goto out; |
821 | } | 841 | } |
822 | BUG_ON(entry->linked && budget_exhausted(entry->linked)); | 842 | BUG_ON(entry->linked && budget_enforced(entry->linked) && budget_exhausted(entry->linked)); |
823 | BUG_ON(!bheap_node_in_heap(entry->hn) && entry->linked && tsk_rt(entry->linked)->edfsc_params.id != entry->cpu); | 843 | BUG_ON(!bheap_node_in_heap(entry->hn) && entry->linked && tsk_rt(entry->linked)->edfsc_params.id != entry->cpu); |
824 | BUG_ON(is_container(entry->linked) && tsk_rt(entry->linked)->edfsc_params.id != entry->cpu); | 844 | BUG_ON(is_container(entry->linked) && tsk_rt(entry->linked)->edfsc_params.id != entry->cpu); |
825 | 845 | ||
@@ -839,28 +859,30 @@ static struct task_struct *edfsc_gschedule(struct task_struct *prev) | |||
839 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | 859 | TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); |
840 | } | 860 | } |
841 | } else if (entry->scheduled) { | 861 | } else if (entry->scheduled) { |
842 | // If we've been running a container, make sure that it has nothing new to schedule | 862 | if (is_realtime(prev)) |
843 | if (is_container(entry->scheduled)) | ||
844 | next = entry->scheduled; | ||
845 | // Otherwise we can keep running any tasks we previously scheduled | ||
846 | else if (is_realtime(prev)) | ||
847 | next = prev; | 863 | next = prev; |
848 | } | 864 | } |
849 | 865 | ||
850 | if (is_container(next)) | 866 | if (is_container(next)) { |
851 | edfsc_cschedule(tsk_rt(next)->edfsc_params.domain, prev); | 867 | struct task_struct* temp = next; |
868 | next = edfsc_cschedule(tsk_rt(next)->edfsc_params.domain, prev); | ||
869 | if (bheap_node_in_heap(entry->hn)) | ||
870 | manage_idle_enforcement_timer(temp); | ||
871 | } | ||
852 | 872 | ||
853 | //out: | 873 | //out: |
854 | // sched_state_task_picked(); | 874 | // sched_state_task_picked(); |
855 | 875 | ||
856 | // if no fixed tasks to be scheduled by the container, then container->scheduled | 876 | // if no fixed tasks to be scheduled by the container, then container->scheduled |
857 | // should be the previous non-rt task if any | 877 | // should be the previous non-rt task if any |
858 | if (is_container(next)) { | 878 | //if (is_container(next)) { |
859 | if (bheap_node_in_heap(entry->hn)) | 879 | // if (bheap_node_in_heap(entry->hn)) |
860 | manage_idle_enforcement_timer(next); | 880 | // manage_idle_enforcement_timer(next); |
861 | next = tsk_rt(next)->edfsc_params.domain->scheduled; | 881 | // next = tsk_rt(next)->edfsc_params.domain->scheduled; |
862 | } | 882 | //} |
863 | 883 | ||
884 | BUG_ON(is_migrating(entry->linked) && next != entry->linked); | ||
885 | BUG_ON(next && budget_enforced(next) && budget_exhausted(next)); | ||
864 | raw_spin_unlock_irqrestore(&g_lock, flags); | 886 | raw_spin_unlock_irqrestore(&g_lock, flags); |
865 | 887 | ||
866 | #ifdef WANT_ALL_SCHED_EVENTS | 888 | #ifdef WANT_ALL_SCHED_EVENTS |
@@ -922,7 +944,6 @@ static enum hrtimer_restart container_boundary(struct hrtimer *timer) | |||
922 | //printk("container: %d\n", container_domains[i].f_util); | 944 | //printk("container: %d\n", container_domains[i].f_util); |
923 | if (leftover >= get_rt_utilization(t)) { | 945 | if (leftover >= get_rt_utilization(t)) { |
924 | container = &(container_domains[i]); | 946 | container = &(container_domains[i]); |
925 | printk("container id: %d\n", i); | ||
926 | break; | 947 | break; |
927 | } | 948 | } |
928 | } | 949 | } |
@@ -1063,7 +1084,7 @@ static enum hrtimer_restart task_deadline_callback(struct hrtimer* timer) { | |||
1063 | // This is true only if set to be migrating from container_boundary | 1084 | // This is true only if set to be migrating from container_boundary |
1064 | if (tsk_rt(t)->edfsc_params.move_to) { | 1085 | if (tsk_rt(t)->edfsc_params.move_to) { |
1065 | // Migrate here if the task is not late, otherwise migrate in job_complete | 1086 | // Migrate here if the task is not late, otherwise migrate in job_complete |
1066 | if (!is_released(t, litmus_clock()) || budget_exhausted(t)) | 1087 | if (!is_released(t, litmus_clock()) || (budget_exhausted(t) && budget_enforced(t)) || is_completed(t)) |
1067 | migrate_task(t); | 1088 | migrate_task(t); |
1068 | } else { | 1089 | } else { |
1069 | // A move to NULL means deletion | 1090 | // A move to NULL means deletion |
@@ -1406,7 +1427,7 @@ static int __init init_edfsc(void) | |||
1406 | entry->linked = NULL; | 1427 | entry->linked = NULL; |
1407 | entry->hn = &edfsc_cpu_heap_node[i]; | 1428 | entry->hn = &edfsc_cpu_heap_node[i]; |
1408 | bheap_node_init(&entry->hn, entry); | 1429 | bheap_node_init(&entry->hn, entry); |
1409 | entry->scheduled = NULL; | 1430 | bheap_insert(cpu_lower_prio, &edfsc_cpu_heap, entry->hn); |
1410 | } | 1431 | } |
1411 | 1432 | ||
1412 | return register_sched_plugin(&edfsc_plugin); | 1433 | return register_sched_plugin(&edfsc_plugin); |