aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_mc2.c
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2016-04-30 19:46:44 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2016-04-30 19:46:44 -0400
commitfc35ca6c9592d43b067a45c49f98cf4b5b361b87 (patch)
tree9a7d7d6eec9ea24bea317de137cc0431ff54cb8b /litmus/sched_mc2.c
parentf0e07f0e5cba027377c57e1aa25101023640c62b (diff)
PGM supportwip-mc2-new
Diffstat (limited to 'litmus/sched_mc2.c')
-rw-r--r--litmus/sched_mc2.c76
1 files changed, 74 insertions, 2 deletions
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index a310723dce24..9ed92d355d7a 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -25,6 +25,7 @@
25#include <litmus/mc2_common.h> 25#include <litmus/mc2_common.h>
26#include <litmus/reservation.h> 26#include <litmus/reservation.h>
27#include <litmus/polling_reservations.h> 27#include <litmus/polling_reservations.h>
28#include <litmus/pgm.h>
28 29
29#define BUDGET_ENFORCEMENT_AT_C 0 30#define BUDGET_ENFORCEMENT_AT_C 0
30 31
@@ -614,14 +615,82 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
614 /* next == NULL means "schedule background work". */ 615 /* next == NULL means "schedule background work". */
615 lt_t now; 616 lt_t now;
616 struct mc2_cpu_state *state = local_cpu_state(); 617 struct mc2_cpu_state *state = local_cpu_state();
618 int sleep, preempt, np, exists, blocks;
617 619
618 pre_schedule(prev, state->cpu); 620 pre_schedule(prev, state->cpu);
619 621
620 raw_spin_lock(&_global_env.lock); 622 raw_spin_lock(&_global_env.lock);
621 raw_spin_lock(&state->lock); 623 raw_spin_lock(&state->lock);
622 624
623 //BUG_ON(state->scheduled && state->scheduled != prev); 625 /* sanity check */
624 //BUG_ON(state->scheduled && !is_realtime(prev)); 626 BUG_ON(state->scheduled && state->scheduled != prev);
627 BUG_ON(state->scheduled && !is_realtime(prev));
628 BUG_ON(is_realtime(prev) && !state->scheduled);
629
630 /* (0) Determine state */
631 exists = state->scheduled != NULL;
632 blocks = exists && !is_running(state->scheduled);
633 sleep = exists && is_completed(state->scheduled);
634 np = exists && is_np(state->scheduled);
635 preempt = _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule;
636
637 if (exists) {
638 if (is_pgm_sending(state->scheduled)) {
639 if (!is_pgm_satisfied(state->scheduled)) {
640 if (!is_priority_boosted(state->scheduled)) {
641 TRACE_TASK(state->scheduled, "is sending PGM tokens and needs boosting.\n");
642 BUG_ON(is_pgm_satisfied(state->scheduled));
643
644 /* We are either sending tokens or waiting for tokes.
645 If waiting: Boost priority so we'll be scheduled
646 immediately when needed tokens arrive.
647 If sending: Boost priority so no one (specifically, our
648 consumers) will preempt us while signalling the token
649 transmission.
650 */
651 tsk_rt(state->scheduled)->priority_boosted = 1;
652 tsk_rt(state->scheduled)->boost_start_time = litmus_clock();
653
654 if (likely(!blocks)) {
655 task_departs(state->scheduled, is_completed(prev));
656 task_arrives(state, state->scheduled);
657 /* we may regain the processor */
658 if (preempt) {
659 //preempt = resched_cpu[state->cpu];
660 litmus_reschedule_local();
661 if (!preempt) {
662 TRACE_TASK(state->scheduled, "blocked preemption by lazy boosting.\n");
663 }
664 }
665 }
666 }
667 }
668 else { /* sending is satisfied */
669 tsk_rt(state->scheduled)->ctrl_page->pgm_sending = 0;
670 tsk_rt(state->scheduled)->ctrl_page->pgm_satisfied = 0;
671
672 if (is_priority_boosted(state->scheduled)) {
673 TRACE_TASK(state->scheduled,
674 "is done sending PGM tokens must relinquish boosting.\n");
675 /* clear boosting */
676 tsk_rt(state->scheduled)->priority_boosted = 0;
677 if(likely(!blocks)) {
678 /* recheck priority */
679 task_departs(state->scheduled, is_completed(prev));
680 task_arrives(state, state->scheduled);
681 /* we may lose the processor */
682 if (!preempt) {
683 //preempt = resched_cpu[state->cpu];
684 litmus_reschedule_local();
685 if (preempt) {
686 TRACE_TASK(state->scheduled, "preempted by lazy unboosting.\n");
687 }
688 }
689 }
690 }
691 }
692 }
693 }
625 694
626 /* update time */ 695 /* update time */
627 state->sup_env.will_schedule = true; 696 state->sup_env.will_schedule = true;
@@ -701,6 +770,9 @@ static void resume_legacy_task_model_updates(struct task_struct *tsk)
701 sched_trace_task_release(tsk); 770 sched_trace_task_release(tsk);
702 } 771 }
703 } 772 }
773 //if (is_pgm_waiting(tsk)) {
774 // setup_pgm_release(tsk);
775 //}
704} 776}
705 777
706/* mc2_task_resume - Called when the state of tsk changes back to 778/* mc2_task_resume - Called when the state of tsk changes back to