From 5ba38eb6290a0c1767932c03b15edb0627ffd6b2 Mon Sep 17 00:00:00 2001 From: Namhoon Kim Date: Wed, 28 Jan 2015 09:26:59 -0500 Subject: LV c --- include/litmus/reservation.h | 22 +- litmus/mc2_common.c | 3 +- litmus/polling_reservations.c | 7 +- litmus/reservation.c | 356 ++++++++++++++++++++- litmus/sched_mc2.c | 729 +++++++++++++++++++++++++++++++++++------- 5 files changed, 985 insertions(+), 132 deletions(-) diff --git a/include/litmus/reservation.h b/include/litmus/reservation.h index 0e656ad2667e..fc7e31918a60 100644 --- a/include/litmus/reservation.h +++ b/include/litmus/reservation.h @@ -129,8 +129,10 @@ struct reservation { /* for global env. */ int scheduled_on; - /* for blocked by ghost */ + /* for blocked by ghost. Do not charge budget when ACTIVE */ int blocked_by_ghost; + /* ghost_job. If it is clear, do not charge budget when ACTIVE_IDLE */ + int is_ghost; }; void reservation_init(struct reservation *res); @@ -199,11 +201,19 @@ struct reservation* sup_find_by_id(struct sup_reservation_environment* sup_env, unsigned int id); /* A global multiprocessor reservation environment. */ +/* +typedef enum { + EVENT_REPLENISH = 0, + EVENT_DRAIN, + EVENT_OTHERS, +} event_type_t; +*/ struct next_timer_event { lt_t next_update; int timer_armed_on; - unsigned int id; + //unsigned int id; + //event_type_t type; struct list_head list; }; @@ -222,6 +232,7 @@ struct gmp_reservation_environment { /* timer event ordered by next_update */ struct list_head next_events; + /* (schedule_now == true) means call gmp_dispatch() now */ bool schedule_now; /* set to true if a call to gmp_dispatch() is imminent */ @@ -231,9 +242,12 @@ struct gmp_reservation_environment { void gmp_init(struct gmp_reservation_environment* gmp_env); void gmp_add_new_reservation(struct gmp_reservation_environment* gmp_env, struct reservation* new_res); -void gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now); +void gmp_scheduler_update_after(struct gmp_reservation_environment* gmp_env, + lt_t timeout); +bool gmp_update_time(struct gmp_reservation_environment* gmp_env, lt_t now); struct task_struct* gmp_dispatch(struct gmp_reservation_environment* gmp_env); -struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id); +//struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id); +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, lt_t when); struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env, unsigned int id); diff --git a/litmus/mc2_common.c b/litmus/mc2_common.c index d0a42c69d65c..a8ea5d9889f3 100644 --- a/litmus/mc2_common.c +++ b/litmus/mc2_common.c @@ -67,7 +67,8 @@ asmlinkage long sys_set_mc2_task_param(pid_t pid, struct mc2_task __user * param goto out_unlock; } - target->rt_param.plugin_state = mp; + //target->rt_param.plugin_state = mp; + target->rt_param.mc2_data = mp; retval = 0; out_unlock: diff --git a/litmus/polling_reservations.c b/litmus/polling_reservations.c index ec5cadd19b4f..d2c54c46442c 100644 --- a/litmus/polling_reservations.c +++ b/litmus/polling_reservations.c @@ -32,8 +32,8 @@ static void periodic_polling_client_arrives( } TRACE("ENV_TIME_ZERO %llu\n", res->env->time_zero); - TRACE("pol-res: activate tmp=%llu instances=%llu period=%llu nextrp=%llu cur=%llu\n", - tmp, instances, pres->period, res->next_replenishment, + TRACE("pol-res: R%d activate tmp=%llu instances=%llu period=%llu nextrp=%llu cur=%llu\n", + res->id, tmp, instances, pres->period, res->next_replenishment, res->env->current_time); res->env->change_state(res->env, res, @@ -147,7 +147,8 @@ static void common_drain_budget( switch (res->state) { case RESERVATION_DEPLETED: case RESERVATION_INACTIVE: - BUG(); + //BUG(); + TRACE("!!!!!!!!!!!!!!!STATE ERROR R%d STATE(%d)\n", res->id, res->state); break; case RESERVATION_ACTIVE_IDLE: diff --git a/litmus/reservation.c b/litmus/reservation.c index 16b3a4818e1e..e30892c72f4a 100644 --- a/litmus/reservation.c +++ b/litmus/reservation.c @@ -1,4 +1,5 @@ #include +#include #include #include @@ -48,7 +49,7 @@ static void sup_scheduler_update_at( struct sup_reservation_environment* sup_env, lt_t when) { - TRACE("SCHEDULER_UPDATE_AT update: %llu > when %llu\n", sup_env->next_scheduler_update, when); + //TRACE("SCHEDULER_UPDATE_AT update: %llu > when %llu\n", sup_env->next_scheduler_update, when); if (sup_env->next_scheduler_update > when) sup_env->next_scheduler_update = when; } @@ -252,7 +253,7 @@ void sup_update_time( /* If the time didn't advance, there is nothing to do. * This check makes it safe to call sup_advance_time() potentially * multiple times (e.g., via different code paths. */ - TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time); + //TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time); if (unlikely(now <= sup_env->env.current_time)) return; @@ -264,11 +265,11 @@ void sup_update_time( sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE; /* deplete budgets by passage of time */ - TRACE("CHARGE###\n"); + //TRACE("CHARGE###\n"); sup_charge_budget(sup_env, delta); /* check if any budgets where replenished */ - TRACE("REPLENISH###\n"); + //TRACE("REPLENISH###\n"); sup_replenish_budgets(sup_env); } @@ -325,3 +326,350 @@ void sup_init(struct sup_reservation_environment* sup_env) sup_env->next_scheduler_update = SUP_NO_SCHEDULER_UPDATE; } + +struct reservation* gmp_find_by_id(struct gmp_reservation_environment* gmp_env, + unsigned int id) +{ + struct reservation *res; + + list_for_each_entry(res, &gmp_env->active_reservations, list) { + if (res->id == id) + return res; + } + list_for_each_entry(res, &gmp_env->inactive_reservations, list) { + if (res->id == id) + return res; + } + list_for_each_entry(res, &gmp_env->depleted_reservations, list) { + if (res->id == id) + return res; + } + + return NULL; +} + +/* +struct next_timer_event* gmp_find_event_by_id(struct gmp_reservation_environment* gmp_env, + unsigned int id) +{ + struct next_timer_event *event; + + list_for_each_entry(event, &gmp_env->next_events, list) { + if (event->id == id) + return event; + } + + return NULL; +} +*/ + +struct next_timer_event* gmp_find_event_by_time(struct gmp_reservation_environment* gmp_env, + lt_t when) +{ + struct next_timer_event *event; + + list_for_each_entry(event, &gmp_env->next_events, list) { + if (event->next_update == when) + return event; + } + + return NULL; +} + +/* +static void gmp_scheduler_update_at( + struct gmp_reservation_environment* gmp_env, unsigned int id, + event_type_t type, lt_t when) +{ + struct next_timer_event *nevent, *queued; + struct list_head *pos; + int found = 0; + + nevent = gmp_find_event_by_id(gmp_env, id); + + if (!nevent) { + nevent = kzalloc(sizeof(*nevent), GFP_KERNEL); + nevent->next_update = when; + nevent->id = id; + nevent->timer_armed_on = NO_CPU; + nevent->type = type; + + list_for_each(pos, &gmp_env->next_events) { + queued = list_entry(pos, struct next_timer_event, list); + if (queued->next_update > nevent->next_update) { + list_add(&nevent->list, pos->prev); + found = 1; + TRACE("NEXT_EVENT ADDED after %llu\n", queued->next_update); + break; + } + } + + if (!found) { + list_add_tail(&nevent->list, &gmp_env->next_events); + TRACE("NEXT_EVENT ADDED at [0]\n"); + } + } else { + TRACE("EVENT FOUND at %llu T(%d), NEW EVENT %llu T(%d)\n", nevent->next_update, nevent->type, when, type); + } +} +*/ +#define TIMER_RESOLUTION 100000L + +static void gmp_scheduler_update_at( + struct gmp_reservation_environment* gmp_env, + lt_t when) +{ + struct next_timer_event *nevent, *queued; + struct list_head *pos; + int found = 0; + + //when = div64_u64(when, TIMER_RESOLUTION); + //when *= TIMER_RESOLUTION; + + nevent = gmp_find_event_by_time(gmp_env, when); + + if (!nevent) { + nevent = kzalloc(sizeof(*nevent), GFP_KERNEL); + nevent->next_update = when; + nevent->timer_armed_on = NO_CPU; + + list_for_each(pos, &gmp_env->next_events) { + queued = list_entry(pos, struct next_timer_event, list); + if (queued->next_update > nevent->next_update) { + list_add(&nevent->list, pos->prev); + found = 1; + TRACE("NEXT_EVENT at %llu ADDED before %llu\n", nevent->next_update, queued->next_update); + break; + } + } + + if (!found) { + list_add_tail(&nevent->list, &gmp_env->next_events); + TRACE("NEXT_EVENT ADDED at %llu ADDED at HEAD\n", nevent->next_update); + } + } else { + ; //TRACE("EVENT FOUND at %llu, NEW EVENT %llu\n", nevent->next_update, when); + } +} + +void gmp_scheduler_update_after( + struct gmp_reservation_environment* gmp_env, lt_t timeout) +{ + gmp_scheduler_update_at(gmp_env, gmp_env->env.current_time + timeout); +} + +static void gmp_queue_depleted( + struct gmp_reservation_environment* gmp_env, + struct reservation *res) +{ + struct list_head *pos; + struct reservation *queued; + int found = 0; + + list_for_each(pos, &gmp_env->depleted_reservations) { + queued = list_entry(pos, struct reservation, list); + if (queued->next_replenishment > res->next_replenishment) { + list_add(&res->list, pos->prev); + found = 1; + } + } + + if (!found) + list_add_tail(&res->list, &gmp_env->depleted_reservations); + + gmp_scheduler_update_at(gmp_env, res->next_replenishment); +} + +static void gmp_queue_active( + struct gmp_reservation_environment* gmp_env, + struct reservation *res) +{ + struct list_head *pos; + struct reservation *queued; + int check_preempt = 1, found = 0; + + list_for_each(pos, &gmp_env->active_reservations) { + queued = list_entry(pos, struct reservation, list); + if (queued->priority > res->priority) { + list_add(&res->list, pos->prev); + found = 1; + break; + } else if (queued->scheduled_on == NO_CPU) + check_preempt = 0; + } + + if (!found) + list_add_tail(&res->list, &gmp_env->active_reservations); + + /* check for possible preemption */ + if (res->state == RESERVATION_ACTIVE && !check_preempt) + gmp_env->schedule_now = true; + + gmp_scheduler_update_after(gmp_env, res->cur_budget); +} + +static void gmp_queue_reservation( + struct gmp_reservation_environment* gmp_env, + struct reservation *res) +{ + switch (res->state) { + case RESERVATION_INACTIVE: + list_add(&res->list, &gmp_env->inactive_reservations); + break; + + case RESERVATION_DEPLETED: + gmp_queue_depleted(gmp_env, res); + break; + + case RESERVATION_ACTIVE_IDLE: + case RESERVATION_ACTIVE: + gmp_queue_active(gmp_env, res); + break; + } +} + +void gmp_add_new_reservation( + struct gmp_reservation_environment* gmp_env, + struct reservation* new_res) +{ + new_res->env = &gmp_env->env; + gmp_queue_reservation(gmp_env, new_res); +} + +static void gmp_charge_budget( + struct gmp_reservation_environment* gmp_env, + lt_t delta) +{ + struct list_head *pos, *next; + struct reservation *res; + + list_for_each_safe(pos, next, &gmp_env->active_reservations) { + int drained = 0; + /* charge all ACTIVE_IDLE up to the first ACTIVE reservation */ + res = list_entry(pos, struct reservation, list); + if (res->state == RESERVATION_ACTIVE) { + TRACE("gmp_charge_budget ACTIVE R%u drain %llu\n", res->id, delta); + if (res->scheduled_on != NO_CPU && res->blocked_by_ghost == 0) { + TRACE("DRAIN !!\n"); + drained = 1; + res->ops->drain_budget(res, delta); + } + } else { + //BUG_ON(res->state != RESERVATION_ACTIVE_IDLE); + if (res->state != RESERVATION_ACTIVE_IDLE) + TRACE("BUG!!!!!!!!!!!! gmp_charge_budget()\n"); + TRACE("gmp_charge_budget INACTIVE R%u drain %llu\n", res->id, delta); + //if (res->is_ghost == 1) { + TRACE("DRAIN !!\n"); + drained = 1; + res->ops->drain_budget(res, delta); + //} + } + if ((res->state == RESERVATION_ACTIVE || + res->state == RESERVATION_ACTIVE_IDLE) && (drained == 1)) + { + /* make sure scheduler is invoked when this reservation expires + * its remaining budget */ + TRACE("requesting gmp_scheduler update for reservation %u in %llu nanoseconds\n", + res->id, res->cur_budget); + gmp_scheduler_update_after(gmp_env, res->cur_budget); + } + //if (encountered_active == 2) + /* stop at the first ACTIVE reservation */ + // break; + } + //TRACE("finished charging budgets\n"); +} + +static void gmp_replenish_budgets(struct gmp_reservation_environment* gmp_env) +{ + struct list_head *pos, *next; + struct reservation *res; + + list_for_each_safe(pos, next, &gmp_env->depleted_reservations) { + res = list_entry(pos, struct reservation, list); + if (res->next_replenishment <= gmp_env->env.current_time) { + res->ops->replenish(res); + } else { + /* list is ordered by increasing depletion times */ + break; + } + } + //TRACE("finished replenishing budgets\n"); + + /* request a scheduler update at the next replenishment instant */ + res = list_first_entry_or_null(&gmp_env->depleted_reservations, + struct reservation, list); + if (res) + gmp_scheduler_update_at(gmp_env, res->next_replenishment); +} + +/* return schedule_now */ +bool gmp_update_time( + struct gmp_reservation_environment* gmp_env, + lt_t now) +{ + lt_t delta; + + if (!gmp_env) { + TRACE("BUG****************************************\n"); + return false; + } + /* If the time didn't advance, there is nothing to do. + * This check makes it safe to call sup_advance_time() potentially + * multiple times (e.g., via different code paths. */ + //TRACE("(sup_update_time) now: %llu, current_time: %llu\n", now, sup_env->env.current_time); + if (unlikely(now <= gmp_env->env.current_time)) + return gmp_env->schedule_now; + + delta = now - gmp_env->env.current_time; + gmp_env->env.current_time = now; + + + /* deplete budgets by passage of time */ + //TRACE("CHARGE###\n"); + gmp_charge_budget(gmp_env, delta); + + /* check if any budgets where replenished */ + //TRACE("REPLENISH###\n"); + gmp_replenish_budgets(gmp_env); + + return gmp_env->schedule_now; +} + +static void gmp_res_change_state( + struct reservation_environment* env, + struct reservation *res, + reservation_state_t new_state) +{ + struct gmp_reservation_environment* gmp_env; + + gmp_env = container_of(env, struct gmp_reservation_environment, env); + + TRACE("GMP reservation R%d state %d->%d at %llu\n", + res->id, res->state, new_state, env->current_time); + + list_del(&res->list); + /* check if we need to reschedule because we lost an active reservation */ + if (res->state == RESERVATION_ACTIVE && !gmp_env->will_schedule) + gmp_env->schedule_now = true; + res->state = new_state; + gmp_queue_reservation(gmp_env, res); +} + +void gmp_init(struct gmp_reservation_environment* gmp_env) +{ + memset(gmp_env, sizeof(*gmp_env), 0); + + INIT_LIST_HEAD(&gmp_env->active_reservations); + INIT_LIST_HEAD(&gmp_env->depleted_reservations); + INIT_LIST_HEAD(&gmp_env->inactive_reservations); + INIT_LIST_HEAD(&gmp_env->next_events); + + gmp_env->env.change_state = gmp_res_change_state; + + gmp_env->schedule_now = false; + gmp_env->will_schedule = false; + + raw_spin_lock_init(&gmp_env->lock); +} \ No newline at end of file diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 0c260190f287..6dee1ec2c99c 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c @@ -15,6 +15,23 @@ #include #include +struct gmp_reservation_environment _global_env; + +struct cpu_entry { + struct task_struct *scheduled; + lt_t deadline; + int cpu; + enum crit_level lv; + bool will_schedule; +}; + +struct cpu_priority { + raw_spinlock_t lock; + struct cpu_entry cpu_entries[NR_CPUS]; +}; + +struct cpu_priority _lowest_prio_cpu; + struct mc2_task_state { struct task_client res_info; int cpu; @@ -51,11 +68,39 @@ static struct mc2_task_state* get_mc2_state(struct task_struct *tsk) } static enum crit_level get_task_crit_level(struct task_struct *tsk) { - struct mc2_task_state *tinfo = get_mc2_state(tsk); - if (!tinfo) + //struct mc2_task_state *tinfo = get_mc2_state(tsk); + struct mc2_task *mp; + + if (!tsk || !is_realtime(tsk)) + return NUM_CRIT_LEVELS; + + mp = tsk_rt(tsk)->mc2_data; + + if (!mp) return NUM_CRIT_LEVELS; else - return tinfo->mc2_param.crit; + return mp->crit; +} + +static struct reservation* res_find_by_id(struct mc2_cpu_state *state, unsigned int id) +{ + struct reservation *res; + + res = sup_find_by_id(&state->sup_env, id); + if (!res) + res = gmp_find_by_id(&_global_env, id); + + return res; +} + +static void mc2_update_time(enum crit_level lv, struct mc2_cpu_state *state, lt_t time) +{ + if (lv < CRIT_LEVEL_C) + sup_update_time(&state->sup_env, time); + else if (lv == CRIT_LEVEL_C) + gmp_update_time(&_global_env, time); + else + TRACE("update_time(): Criticality level error!!!!\n"); } static void task_departs(struct task_struct *tsk, int job_complete) @@ -78,6 +123,7 @@ static void task_departs(struct task_struct *tsk, int job_complete) ce = &state->crit_entries[lv]; ce->running = tsk; + res->is_ghost = 1; TRACE_TASK(tsk, "BECOME GHOST at %llu\n", litmus_clock()); //BUG_ON(hrtimer_active(&ce->ghost_timer)); @@ -107,11 +153,44 @@ static void task_arrives(struct mc2_cpu_state *state, struct task_struct *tsk) } } +/* return: NO_CPU - all CPUs are running tasks with higher priority than Level C */ +static int get_lowest_prio_cpu(void) +{ + struct cpu_entry *ce; + int cpu, ret = NO_CPU; + lt_t latest_deadline = 0; + + raw_spin_lock(&_lowest_prio_cpu.lock); + for_each_online_cpu(cpu) { + ce = &_lowest_prio_cpu.cpu_entries[cpu]; + if (!ce->will_schedule) { + if (!ce->scheduled) { + raw_spin_unlock(&_lowest_prio_cpu.lock); + return ce->cpu; + } else if (ce->lv == CRIT_LEVEL_C && ce->deadline > latest_deadline) { + latest_deadline = ce->deadline; + ret = ce->cpu; + } + } + } + + raw_spin_unlock(&_lowest_prio_cpu.lock); + + return ret; +} + /* NOTE: drops state->lock */ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) { int local; lt_t update, now; + enum crit_level lv = get_task_crit_level(state->scheduled); + struct next_timer_event *event, *next; + int found_event = 0; + + //TRACE_TASK(state->scheduled, "update_timer!\n"); + if (lv != NUM_CRIT_LEVELS) + TRACE_TASK(state->scheduled, "UPDATE_TIMER LV = %d\n", lv); update = state->sup_env.next_scheduler_update; now = state->sup_env.env.current_time; @@ -163,6 +242,37 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) litmus_reschedule(state->cpu); } } + + raw_spin_lock(&_global_env.lock); + list_for_each_entry_safe(event, next, &_global_env.next_events, list) { + if (event->timer_armed_on == NO_CPU) { + found_event = 1; + if (event->next_update < litmus_clock()) { + int cpu = get_lowest_prio_cpu(); + TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); + list_del(&event->list); + kfree(event); + if (cpu != NO_CPU) { + raw_spin_lock(&_lowest_prio_cpu.lock); + _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; + raw_spin_unlock(&_lowest_prio_cpu.lock); + litmus_reschedule(cpu); + } + } else if (!hrtimer_active(&state->g_timer)) { + int ret; + TRACE("setting global scheduler timer for %llu\n", event->next_update); + ret = __hrtimer_start_range_ns(&state->g_timer, + ns_to_ktime(event->next_update), + 0 /* timer coalescing slack */, + HRTIMER_MODE_ABS_PINNED, + 0 /* wakeup */); + if (!ret) { + event->timer_armed_on = state->cpu; + } + } + } + } + raw_spin_unlock(&_global_env.lock); } static void mc2_update_ghost_state(struct mc2_cpu_state *state) @@ -176,16 +286,20 @@ static void mc2_update_ghost_state(struct mc2_cpu_state *state) ce = &state->crit_entries[lv]; if (ce->running != NULL) { tinfo = get_mc2_state(ce->running); + /* if (lv != CRIT_LEVEL_C) res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); else continue; + */ + res = res_find_by_id(state, tinfo->mc2_param.res_id); TRACE("LV %d running id %d budget %llu\n", lv, tinfo->mc2_param.res_id, res->cur_budget); if (!res->cur_budget) { struct sup_reservation_environment* sup_env = &state->sup_env; TRACE("GHOST FINISH id %d at %llu\n", tinfo->mc2_param.res_id, litmus_clock()); ce->running = NULL; + res->is_ghost = 0; res = list_first_entry_or_null(&sup_env->active_reservations, struct reservation, list); if (res) litmus_reschedule_local(); @@ -215,6 +329,95 @@ static enum hrtimer_restart on_ghost_timer(struct hrtimer *timer) } */ +static void update_cpu_prio(struct mc2_cpu_state *state) +{ + struct cpu_entry *ce = &_lowest_prio_cpu.cpu_entries[state->cpu]; + enum crit_level lv = get_task_crit_level(state->scheduled); + + if (!state->scheduled) { + // cpu is idle. + ce->scheduled = NULL; + ce->deadline = ULLONG_MAX; + ce->lv = NUM_CRIT_LEVELS; + } else if (lv == CRIT_LEVEL_C) { + ce->scheduled = state->scheduled; + ce->deadline = get_deadline(state->scheduled); + ce->lv = lv; + } else if (lv < CRIT_LEVEL_C) { + ce->scheduled = state->scheduled; + ce->deadline = 0; + ce->lv = lv; + } +}; + +static enum hrtimer_restart on_global_scheduling_timer(struct hrtimer *timer) +{ + unsigned long flags; + enum hrtimer_restart restart = HRTIMER_NORESTART; + struct mc2_cpu_state *state; + struct next_timer_event *event, *next; + bool schedule_now; + lt_t update, now; + int found_event = 0; + + state = container_of(timer, struct mc2_cpu_state, g_timer); + + /* The scheduling timer should only fire on the local CPU, because + * otherwise deadlocks via timer_cancel() are possible. + * Note: this does not interfere with dedicated interrupt handling, as + * even under dedicated interrupt handling scheduling timers for + * budget enforcement must occur locally on each CPU. + */ + //BUG_ON(state->cpu != raw_smp_processor_id()); + if (state->cpu != raw_smp_processor_id()) + TRACE("BUG!!!!!!!!!!!!! TIMER FIRED ON THE OTHER CPU\n"); + + raw_spin_lock_irqsave(&_global_env.lock, flags); + + update = litmus_clock(); + TRACE("GLOBAL TIMER FIRED at %llu\n", update); + + list_for_each_entry_safe(event, next, &_global_env.next_events, list) { + if (event->next_update < update) { + found_event = 1; + list_del(&event->list); + TRACE("EVENT at %llu IS DELETED\n", event->next_update); + kfree(event); + } + } + + if (!found_event) { + raw_spin_unlock_irqrestore(&_global_env.lock, flags); + return restart; + } + + schedule_now = gmp_update_time(&_global_env, update); + + raw_spin_lock(&state->lock); + mc2_update_ghost_state(state); + raw_spin_unlock(&state->lock); + + now = _global_env.env.current_time; + + TRACE_CUR("on_global_scheduling_timer at %llu, upd:%llu (for cpu=%d) SCHEDULE_NOW = %d\n", + now, update, state->cpu, schedule_now); + + if (schedule_now) { + int cpu = get_lowest_prio_cpu(); + if (cpu != NO_CPU) { + raw_spin_lock(&_lowest_prio_cpu.lock); + _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; + raw_spin_unlock(&_lowest_prio_cpu.lock); + TRACE("LOWEST CPU = P%d\n", cpu); + litmus_reschedule(cpu); + } + } + + raw_spin_unlock_irqrestore(&_global_env.lock, flags); + + return restart; +} + static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) { unsigned long flags; @@ -276,6 +479,7 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st if (likely(!ce->running)) { sup_scheduler_update_after(sup_env, res->cur_budget); res->blocked_by_ghost = 0; + res->is_ghost = 0; return tsk; } else { res->blocked_by_ghost = 1; @@ -284,7 +488,34 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st } } } - + // no level A or B tasks + + list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { + if (res->state == RESERVATION_ACTIVE && res->scheduled_on == NO_CPU) { + tsk = res->ops->dispatch_client(res, &time_slice); + if (likely(tsk)) { + lv = get_task_crit_level(tsk); + if (lv == NUM_CRIT_LEVELS) { + gmp_scheduler_update_after(&_global_env, res->cur_budget); + //raw_spin_unlock(&_global_env.lock); + return tsk; + } else { + ce = &state->crit_entries[lv]; + if (likely(!ce->running)) { + gmp_scheduler_update_after(&_global_env, res->cur_budget); + res->blocked_by_ghost = 0; + res->is_ghost = 0; + res->scheduled_on = state->cpu; + //raw_spin_unlock(&_global_env.lock); + return tsk; + } else { + res->blocked_by_ghost = 1; + } + } + } + } + } + return NULL; } @@ -292,17 +523,30 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) { /* next == NULL means "schedule background work". */ struct mc2_cpu_state *state = local_cpu_state(); - + + raw_spin_lock(&_lowest_prio_cpu.lock); + if (_lowest_prio_cpu.cpu_entries[state->cpu].will_schedule == true) + _lowest_prio_cpu.cpu_entries[state->cpu].will_schedule = false; + raw_spin_unlock(&_lowest_prio_cpu.lock); + raw_spin_lock(&state->lock); - BUG_ON(state->scheduled && state->scheduled != prev); - BUG_ON(state->scheduled && !is_realtime(prev)); + //BUG_ON(state->scheduled && state->scheduled != prev); + //BUG_ON(state->scheduled && !is_realtime(prev)); + if (state->scheduled && state->scheduled != prev) + TRACE("BUG1!!!!!!!!\n"); + if (state->scheduled && !is_realtime(prev)) + TRACE("BUG2!!!!!!!!\n"); /* update time */ state->sup_env.will_schedule = true; - TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time ####\n"); + //TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time ####\n"); sup_update_time(&state->sup_env, litmus_clock()); - TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time !!!!\n"); + + raw_spin_lock(&_global_env.lock); + gmp_update_time(&_global_env, litmus_clock()); + + //TRACE_TASK(prev, "MC2_SCHEDULE sup_update_time !!!!\n"); mc2_update_ghost_state(state); /* remove task from reservation if it blocks */ @@ -311,16 +555,29 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) /* figure out what to schedule next */ state->scheduled = mc2_dispatch(&state->sup_env, state); - + if (state->scheduled && is_realtime(state->scheduled)) + TRACE_TASK(state->scheduled, "mc2_dispatch picked me!\n"); + + raw_spin_lock(&_lowest_prio_cpu.lock); + update_cpu_prio(state); + raw_spin_unlock(&_lowest_prio_cpu.lock); + /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ sched_state_task_picked(); /* program scheduler timer */ state->sup_env.will_schedule = false; + + raw_spin_unlock(&_global_env.lock); + /* NOTE: drops state->lock */ mc2_update_timer_and_unlock(state); if (prev != state->scheduled && is_realtime(prev)) { + struct mc2_task_state* tinfo = get_mc2_state(prev); + struct reservation* res = tinfo->res_info.client.reservation; + TRACE_TASK(prev, "PREV JOB scheduled_on = P%d\n", res->scheduled_on); + res->scheduled_on = NO_CPU; TRACE_TASK(prev, "descheduled.\n"); } if (state->scheduled) { @@ -354,10 +611,15 @@ static void mc2_task_resume(struct task_struct *tsk) { unsigned long flags; struct mc2_task_state* tinfo = get_mc2_state(tsk); - struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); + struct mc2_cpu_state *state; TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); + if (tinfo->cpu != -1) + state = cpu_state_for(tinfo->cpu); + else + state = local_cpu_state(); + raw_spin_lock_irqsave(&state->lock, flags); /* Requeue only if self-suspension was already processed. */ if (tinfo->has_departed) @@ -365,7 +627,16 @@ static void mc2_task_resume(struct task_struct *tsk) /* Assumption: litmus_clock() is synchronized across cores, * since we might not actually be executing on tinfo->cpu * at the moment. */ - sup_update_time(&state->sup_env, litmus_clock()); + if (tinfo->cpu != -1) { + sup_update_time(&state->sup_env, litmus_clock()); + } else { + raw_spin_lock(&_global_env.lock); + TRACE("RESUME UPDATE ####\n"); + gmp_update_time(&_global_env, litmus_clock()); + TRACE("RESUME UPDATE $$$$\n"); + raw_spin_unlock(&_global_env.lock); + } + mc2_update_ghost_state(state); task_arrives(state, tsk); /* NOTE: drops state->lock */ @@ -385,37 +656,55 @@ static long mc2_complete_job(void) { ktime_t next_release; long err; - struct mc2_cpu_state *state = local_cpu_state(); - struct reservation_environment *env = &(state->sup_env.env); - struct mc2_task_state *tinfo = get_mc2_state(current); - struct reservation *res; - - res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); - if (!res) - ; // find in global env - - TRACE_CUR("mc2_complete_job at %llu (deadline: %llu) (cur->budget: %llu)\n", litmus_clock(), - get_deadline(current), res->cur_budget); + + TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(), + get_deadline(current)); tsk_rt(current)->completed = 1; if (tsk_rt(current)->sporadic_release) { - env->time_zero = tsk_rt(current)->sporadic_release_time; + struct mc2_cpu_state *state; + struct reservation_environment *env; + struct mc2_task_state *tinfo; + struct reservation *res; + unsigned long flags; + + local_irq_save(flags); + + state = local_cpu_state(); + env = &(state->sup_env.env); + tinfo = get_mc2_state(current); + + res = res_find_by_id(state, tsk_rt(current)->mc2_data->res_id); + + if (get_task_crit_level(current) < CRIT_LEVEL_C) { + raw_spin_lock(&state->lock); + env->time_zero = tsk_rt(current)->sporadic_release_time; + } else { + raw_spin_lock(&_global_env.lock); + _global_env.env.time_zero = tsk_rt(current)->sporadic_release_time; + } + res->next_replenishment = tsk_rt(current)->sporadic_release_time; - res->cur_budget = 0; - res->env->change_state(res->env, res, RESERVATION_DEPLETED); - if (tinfo->mc2_param.crit == CRIT_LEVEL_A) { + if (get_task_crit_level(current) == CRIT_LEVEL_A) { struct table_driven_reservation *tdres; - - //sup_update_time(&state->sup_env, litmus_clock()); - //res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); tdres = container_of(res, struct table_driven_reservation, res); tdres->next_interval = 0; tdres->major_cycle_start = tsk_rt(current)->sporadic_release_time; res->next_replenishment += tdres->intervals[0].start; } - TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); + res->cur_budget = 0; + res->env->change_state(res->env, res, RESERVATION_DEPLETED); + + //TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); + if (get_task_crit_level(current) < CRIT_LEVEL_C) { + raw_spin_unlock(&state->lock); + } else { + raw_spin_unlock(&_global_env.lock); + } + + local_irq_restore(flags); } prepare_for_next_period(current); @@ -443,8 +732,9 @@ static long mc2_admit_task(struct task_struct *tsk) struct reservation *res; struct mc2_cpu_state *state; struct mc2_task_state *tinfo = kzalloc(sizeof(*tinfo), GFP_ATOMIC); - struct mc2_task *mp = tsk_rt(tsk)->plugin_state; - + struct mc2_task *mp = tsk_rt(tsk)->mc2_data; + enum crit_level lv; + if (!tinfo) return -ENOMEM; @@ -453,33 +743,61 @@ static long mc2_admit_task(struct task_struct *tsk) return err; } + lv = mp->crit; preempt_disable(); - state = cpu_state_for(task_cpu(tsk)); - raw_spin_lock_irqsave(&state->lock, flags); + if (lv < CRIT_LEVEL_C) { + state = cpu_state_for(task_cpu(tsk)); + raw_spin_lock_irqsave(&state->lock, flags); - res = sup_find_by_id(&state->sup_env, mp->res_id); + res = sup_find_by_id(&state->sup_env, mp->res_id); - /* found the appropriate reservation (or vCPU) */ - if (res) { - TRACE_TASK(tsk, "FOUND RES ID\n"); - tinfo->mc2_param.crit = mp->crit; - tinfo->mc2_param.res_id = mp->res_id; - - kfree(tsk_rt(tsk)->plugin_state); - tsk_rt(tsk)->plugin_state = NULL; + /* found the appropriate reservation (or vCPU) */ + if (res) { + TRACE_TASK(tsk, "SUP FOUND RES ID\n"); + tinfo->mc2_param.crit = mp->crit; + tinfo->mc2_param.res_id = mp->res_id; + + //kfree(tsk_rt(tsk)->plugin_state); + //tsk_rt(tsk)->plugin_state = NULL; + + err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); + tinfo->cpu = task_cpu(tsk); + tinfo->has_departed = true; + tsk_rt(tsk)->plugin_state = tinfo; + + /* disable LITMUS^RT's per-thread budget enforcement */ + tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; + } + + raw_spin_unlock_irqrestore(&state->lock, flags); + } else if (lv == CRIT_LEVEL_C) { + raw_spin_lock_irqsave(&_global_env.lock, flags); - err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); - tinfo->cpu = task_cpu(tsk); - tinfo->has_departed = true; - tsk_rt(tsk)->plugin_state = tinfo; + res = gmp_find_by_id(&_global_env, mp->res_id); - /* disable LITMUS^RT's per-thread budget enforcement */ - tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; - } + /* found the appropriate reservation (or vCPU) */ + if (res) { + TRACE_TASK(tsk, "GMP FOUND RES ID\n"); + tinfo->mc2_param.crit = mp->crit; + tinfo->mc2_param.res_id = mp->res_id; + + //kfree(tsk_rt(tsk)->plugin_state); + //tsk_rt(tsk)->plugin_state = NULL; + + err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); + tinfo->cpu = -1; + tinfo->has_departed = true; + tsk_rt(tsk)->plugin_state = tinfo; - raw_spin_unlock_irqrestore(&state->lock, flags); + /* disable LITMUS^RT's per-thread budget enforcement */ + tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; + } + raw_spin_unlock_irqrestore(&_global_env.lock, flags); + + } + preempt_enable(); if (err) @@ -493,12 +811,18 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, { unsigned long flags; struct mc2_task_state* tinfo = get_mc2_state(tsk); - struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); + struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); struct reservation *res; - + enum crit_level lv = get_task_crit_level(tsk); + TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", litmus_clock(), on_runqueue, is_running); + if (tinfo->cpu == -1) + state = local_cpu_state(); + else + state = cpu_state_for(tinfo->cpu); + /* acquire the lock protecting the state and disable interrupts */ raw_spin_lock_irqsave(&state->lock, flags); @@ -511,7 +835,9 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, if (on_runqueue || is_running) { /* Assumption: litmus_clock() is synchronized across cores * [see comment in pres_task_resume()] */ - sup_update_time(&state->sup_env, litmus_clock()); + raw_spin_lock(&_global_env.lock); + mc2_update_time(lv, state, litmus_clock()); + raw_spin_unlock(&_global_env.lock); mc2_update_ghost_state(state); task_arrives(state, tsk); /* NOTE: drops state->lock */ @@ -521,12 +847,14 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, } else raw_spin_unlock_irqrestore(&state->lock, flags); - res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); - release_at(tsk, res->next_replenishment); - if (res) - TRACE_TASK(tsk, "next_replenishment = %llu\n", res->next_replenishment); + res = res_find_by_id(state, tinfo->mc2_param.res_id); + + if (res) { + TRACE_TASK(tsk, "mc2_task_new() next_replenishment = %llu\n", res->next_replenishment); + release_at(tsk, res->next_replenishment); + } else - TRACE_TASK(tsk, "next_replenishment = NULL\n"); + TRACE_TASK(tsk, "mc2_task_new() next_replenishment = NULL\n"); } static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) @@ -537,43 +865,71 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) struct sup_reservation_environment *sup_env; int found = 0; enum crit_level lv = get_task_crit_level(current); - - state = cpu_state_for(cpu); - raw_spin_lock(&state->lock); -// res = sup_find_by_id(&state->sup_env, reservation_id); - sup_env = &state->sup_env; - //if (!res) { - list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { - if (res->id == reservation_id) { - if (lv == CRIT_LEVEL_A) { - struct table_driven_reservation *tdres; - tdres = container_of(res, struct table_driven_reservation, res); - kfree(tdres->intervals); - } - list_del(&res->list); - kfree(res); - found = 1; - ret = 0; - } - } - if (!found) { - list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) { + if (cpu == -1) { + raw_spin_lock(&_global_env.lock); + + list_for_each_entry_safe(res, next, &_global_env.depleted_reservations, list) { if (res->id == reservation_id) { - if (lv == CRIT_LEVEL_A) { - struct table_driven_reservation *tdres; - tdres = container_of(res, struct table_driven_reservation, res); - kfree(tdres->intervals); - } + TRACE("DESTROY RES FOUND!!!\n"); list_del(&res->list); kfree(res); found = 1; ret = 0; } } - } - if (!found) { - list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { + if (!found) { + list_for_each_entry_safe(res, next, &_global_env.inactive_reservations, list) { + if (res->id == reservation_id) { + TRACE("DESTROY RES FOUND!!!\n"); + list_del(&res->list); + kfree(res); + found = 1; + ret = 0; + } + } + } + if (!found) { + list_for_each_entry_safe(res, next, &_global_env.active_reservations, list) { + if (res->id == reservation_id) { + TRACE("DESTROY RES FOUND!!!\n"); + list_del(&res->list); + kfree(res); + found = 1; + ret = 0; + } + } + } + +/* +list_for_each_entry(res, &_global_env.depleted_reservations, list) { + TRACE("DEPLETED LIST R%d\n", res->id); +} +list_for_each_entry(res, &_global_env.inactive_reservations, list) { + TRACE("INACTIVE LIST R%d\n", res->id); +} +list_for_each_entry(res, &_global_env.active_reservations, list) { + TRACE("ACTIVE LIST R%d\n", res->id); +} +*/ + if (list_empty(&_global_env.active_reservations)) + INIT_LIST_HEAD(&_global_env.active_reservations); + if (list_empty(&_global_env.depleted_reservations)) + INIT_LIST_HEAD(&_global_env.depleted_reservations); + if (list_empty(&_global_env.inactive_reservations)) + INIT_LIST_HEAD(&_global_env.inactive_reservations); + if (list_empty(&_global_env.next_events)) + INIT_LIST_HEAD(&_global_env.next_events); + + raw_spin_unlock(&_global_env.lock); + } else { + state = cpu_state_for(cpu); + raw_spin_lock(&state->lock); + + // res = sup_find_by_id(&state->sup_env, reservation_id); + sup_env = &state->sup_env; + //if (!res) { + list_for_each_entry_safe(res, next, &sup_env->depleted_reservations, list) { if (res->id == reservation_id) { if (lv == CRIT_LEVEL_A) { struct table_driven_reservation *tdres; @@ -586,10 +942,40 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) ret = 0; } } - } - //} + if (!found) { + list_for_each_entry_safe(res, next, &sup_env->inactive_reservations, list) { + if (res->id == reservation_id) { + if (lv == CRIT_LEVEL_A) { + struct table_driven_reservation *tdres; + tdres = container_of(res, struct table_driven_reservation, res); + kfree(tdres->intervals); + } + list_del(&res->list); + kfree(res); + found = 1; + ret = 0; + } + } + } + if (!found) { + list_for_each_entry_safe(res, next, &sup_env->active_reservations, list) { + if (res->id == reservation_id) { + if (lv == CRIT_LEVEL_A) { + struct table_driven_reservation *tdres; + tdres = container_of(res, struct table_driven_reservation, res); + kfree(tdres->intervals); + } + list_del(&res->list); + kfree(res); + found = 1; + ret = 0; + } + } + } + //} - raw_spin_unlock(&state->lock); + raw_spin_unlock(&state->lock); + } TRACE("RESERVATION_DESTROY ret = %d\n", ret); return ret; @@ -599,10 +985,15 @@ static void mc2_task_exit(struct task_struct *tsk) { unsigned long flags; struct mc2_task_state* tinfo = get_mc2_state(tsk); - struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); + struct mc2_cpu_state *state; enum crit_level lv = tinfo->mc2_param.crit; struct crit_entry* ce; + if (tinfo->cpu != -1) + state = cpu_state_for(tinfo->cpu); + else + state = local_cpu_state(); + raw_spin_lock_irqsave(&state->lock, flags); if (state->scheduled == tsk) @@ -616,7 +1007,11 @@ static void mc2_task_exit(struct task_struct *tsk) if (is_running(tsk)) { /* Assumption: litmus_clock() is synchronized across cores * [see comment in pres_task_resume()] */ - sup_update_time(&state->sup_env, litmus_clock()); + //if (lv < CRIT_LEVEL_C) + // sup_update_time(&state->sup_env, litmus_clock()); + raw_spin_lock(&_global_env.lock); + mc2_update_time(lv, state, litmus_clock()); + raw_spin_unlock(&_global_env.lock); mc2_update_ghost_state(state); task_departs(tsk, 0); @@ -644,6 +1039,8 @@ static void mc2_task_exit(struct task_struct *tsk) */ kfree(tsk_rt(tsk)->plugin_state); tsk_rt(tsk)->plugin_state = NULL; + kfree(tsk_rt(tsk)->mc2_data); + tsk_rt(tsk)->mc2_data = NULL; } static long create_polling_reservation( @@ -685,28 +1082,54 @@ static long create_polling_reservation( if (!pres) return -ENOMEM; - state = cpu_state_for(config->cpu); - raw_spin_lock_irqsave(&state->lock, flags); + if (config->cpu != -1) { + state = cpu_state_for(config->cpu); + raw_spin_lock_irqsave(&state->lock, flags); + + res = sup_find_by_id(&state->sup_env, config->id); + if (!res) { + polling_reservation_init(pres, use_edf, periodic, + config->polling_params.budget, + config->polling_params.period, + config->polling_params.relative_deadline, + config->polling_params.offset); + pres->res.id = config->id; + pres->res.blocked_by_ghost = 0; + pres->res.is_ghost = 0; + if (!use_edf) + pres->res.priority = config->priority; + sup_add_new_reservation(&state->sup_env, &pres->res); + err = config->id; + } else { + err = -EEXIST; + } - res = sup_find_by_id(&state->sup_env, config->id); - if (!res) { - polling_reservation_init(pres, use_edf, periodic, - config->polling_params.budget, - config->polling_params.period, - config->polling_params.relative_deadline, - config->polling_params.offset); - pres->res.id = config->id; - pres->res.blocked_by_ghost = 0; - if (!use_edf) - pres->res.priority = config->priority; - sup_add_new_reservation(&state->sup_env, &pres->res); - err = config->id; + raw_spin_unlock_irqrestore(&state->lock, flags); } else { - err = -EEXIST; + raw_spin_lock_irqsave(&_global_env.lock, flags); + + res = gmp_find_by_id(&_global_env, config->id); + if (!res) { + polling_reservation_init(pres, use_edf, periodic, + config->polling_params.budget, + config->polling_params.period, + config->polling_params.relative_deadline, + config->polling_params.offset); + pres->res.id = config->id; + pres->res.blocked_by_ghost = 0; + pres->res.scheduled_on = NO_CPU; + pres->res.is_ghost = 0; + if (!use_edf) + pres->res.priority = config->priority; + gmp_add_new_reservation(&_global_env, &pres->res); + TRACE("GMP_ADD_NEW_RESERVATION R%d\n", pres->res.id); + err = config->id; + } else { + err = -EEXIST; + } + raw_spin_unlock_irqrestore(&_global_env.lock, flags); } - - raw_spin_unlock_irqrestore(&state->lock, flags); - + if (err < 0) kfree(pres); @@ -825,10 +1248,12 @@ static long mc2_reservation_create(int res_type, void* __user _config) if (copy_from_user(&config, _config, sizeof(config))) return -EFAULT; - if (config.cpu < 0 || !cpu_online(config.cpu)) { - printk(KERN_ERR "invalid polling reservation (%u): " - "CPU %d offline\n", config.id, config.cpu); - return -EINVAL; + if (config.cpu != -1) { + if (config.cpu < 0 || !cpu_online(config.cpu)) { + printk(KERN_ERR "invalid polling reservation (%u): " + "CPU %d offline\n", config.id, config.cpu); + return -EINVAL; + } } switch (res_type) { @@ -885,19 +1310,30 @@ static long mc2_activate_plugin(void) { int cpu, lv; struct mc2_cpu_state *state; + struct cpu_entry *ce; + gmp_init(&_global_env); + raw_spin_lock_init(&_lowest_prio_cpu.lock); + for_each_online_cpu(cpu) { TRACE("Initializing CPU%d...\n", cpu); state = cpu_state_for(cpu); + ce = &_lowest_prio_cpu.cpu_entries[cpu]; + + ce->cpu = cpu; + ce->scheduled = NULL; + ce->deadline = ULLONG_MAX; + ce->lv = NUM_CRIT_LEVELS; + ce->will_schedule = false; raw_spin_lock_init(&state->lock); state->cpu = cpu; state->scheduled = NULL; for (lv = 0; lv < NUM_CRIT_LEVELS; lv++) { - struct crit_entry *ce = &state->crit_entries[lv]; - ce->level = lv; - ce->running = NULL; + struct crit_entry *cr_entry = &state->crit_entries[lv]; + cr_entry->level = lv; + cr_entry->running = NULL; //hrtimer_init(&ce->ghost_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); //ce->ghost_timer.function = on_ghost_timer; } @@ -905,6 +1341,9 @@ static long mc2_activate_plugin(void) hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); state->timer.function = on_scheduling_timer; + + hrtimer_init(&state->g_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + state->g_timer.function = on_global_scheduling_timer; } mc2_setup_domain_proc(); @@ -912,17 +1351,26 @@ static long mc2_activate_plugin(void) return 0; } +static void mc2_finish_switch(struct task_struct *prev) +{ + struct mc2_cpu_state *state = local_cpu_state(); + + state->scheduled = is_realtime(current) ? current : NULL; +} + static long mc2_deactivate_plugin(void) { int cpu; struct mc2_cpu_state *state; struct reservation *res; + struct next_timer_event *event; for_each_online_cpu(cpu) { state = cpu_state_for(cpu); raw_spin_lock(&state->lock); hrtimer_cancel(&state->timer); + hrtimer_cancel(&state->g_timer); /* Delete all reservations --- assumes struct reservation * is prefix of containing struct. */ @@ -954,6 +1402,46 @@ static long mc2_deactivate_plugin(void) raw_spin_unlock(&state->lock); } + raw_spin_lock(&_global_env.lock); + + while (!list_empty(&_global_env.active_reservations)) { + TRACE("RES FOUND!!!\n"); + res = list_first_entry( + &_global_env.active_reservations, + struct reservation, list); + list_del(&res->list); + kfree(res); + } + + while (!list_empty(&_global_env.inactive_reservations)) { + TRACE("RES FOUND!!!\n"); + res = list_first_entry( + &_global_env.inactive_reservations, + struct reservation, list); + list_del(&res->list); + kfree(res); + } + + while (!list_empty(&_global_env.depleted_reservations)) { + TRACE("RES FOUND!!!\n"); + res = list_first_entry( + &_global_env.depleted_reservations, + struct reservation, list); + list_del(&res->list); + kfree(res); + } + + while (!list_empty(&_global_env.next_events)) { + TRACE("EVENT FOUND!!!\n"); + event = list_first_entry( + &_global_env.next_events, + struct next_timer_event, list); + list_del(&event->list); + kfree(event); + } + + raw_spin_unlock(&_global_env.lock); + destroy_domain_proc_info(&mc2_domain_proc_info); return 0; } @@ -961,6 +1449,7 @@ static long mc2_deactivate_plugin(void) static struct sched_plugin mc2_plugin = { .plugin_name = "MC2", .schedule = mc2_schedule, + .finish_switch = mc2_finish_switch, .task_wake_up = mc2_task_resume, .admit_task = mc2_admit_task, .task_new = mc2_task_new, -- cgit v1.2.2