From 16a64e75ff5d1deeeb8adaaa0d11b1d6fe236bbe Mon Sep 17 00:00:00 2001 From: Namhoon Kim Date: Wed, 21 Jan 2015 15:54:21 -0500 Subject: Initial lv C impl. --- litmus/sched_mc2.c | 453 ++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 382 insertions(+), 71 deletions(-) (limited to 'litmus/sched_mc2.c') diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index b9f05238461b..3c8aa739345d 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c @@ -15,6 +15,8 @@ #include #include +struct gmp_reservation_environment _global_env; + struct mc2_task_state { struct task_client res_info; int cpu; @@ -26,11 +28,18 @@ struct mc2_cpu_state { raw_spinlock_t lock; struct sup_reservation_environment sup_env; + struct gmp_reservation_environment* gmp_env; struct hrtimer timer; int cpu; struct task_struct* scheduled; + struct task_struct* will_schedule; + struct task_struct* linked; // for level C enum crit_level run_level; + struct task_struct* crit_entry[NUM_CRIT_LEVELS]; // mc2_task_state (get_mc2_state) + + // indicate the current timer event is global + bool is_global_event; }; static DEFINE_PER_CPU(struct mc2_cpu_state, mc2_cpu_state); @@ -74,12 +83,14 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) { int local; lt_t update, now; - + struct next_timer_event *n_event = NULL; + int global_found = 0; + update = state->sup_env.next_scheduler_update; now = state->sup_env.env.current_time; - + /* Be sure we're actually running on the right core, - * as pres_update_timer() is also called from pres_task_resume(), + * as mc2_update_timer() is also called from mc2_task_resume(), * which might be called on any CPU when a thread resumes. */ local = local_cpu_state() == state; @@ -87,16 +98,50 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) /* Must drop state lock before calling into hrtimer_start(), which * may raise a softirq, which in turn may wake ksoftirqd. */ raw_spin_unlock(&state->lock); + + raw_spin_lock(&(_global_env.event_lock)); + list_for_each_entry(n_event, &state->gmp_env->next_events, list) { + TRACE("G_EVENT time: %llu, timer_armed_on: %d\n", n_event->next_event, n_event->timer_armed_on == NO_CPU?(-1):n_event->timer_armed_on); + if (n_event->timer_armed_on == NO_CPU) { + global_found = 1; + break; + } + } + + if (global_found == 1) { + if (update >= n_event->next_event) { + update = n_event->next_event; + now = _global_env.env.current_time; + //state->is_global_event = true; + //n_event->timer_armed_on = state->cpu; + } else { // next event is sup + global_found = 0; + } + } + raw_spin_unlock(&(_global_env.event_lock)); + if (update <= now) { litmus_reschedule(state->cpu); } else if (likely(local && update != SUP_NO_SCHEDULER_UPDATE)) { /* Reprogram only if not already set correctly. */ if (!hrtimer_active(&state->timer) || ktime_to_ns(hrtimer_get_expires(&state->timer)) != update) { + + if ((ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) && (state->is_global_event == true)) { + struct next_timer_event *prev_event = NULL; + raw_spin_lock(&(_global_env.event_lock)); + list_for_each_entry(prev_event, &state->gmp_env->next_events, list) { + if (prev_event->timer_armed_on == state->cpu) { + prev_event->timer_armed_on = NO_CPU; + break; + } + } + raw_spin_unlock(&(_global_env.event_lock)); + } TRACE("canceling timer...\n"); hrtimer_cancel(&state->timer); - TRACE("setting scheduler timer for %llu\n", update); + TRACE("setting scheduler (global: %d) timer for %llu\n", state->is_global_event, update); /* We cannot use hrtimer_start() here because the * wakeup flag must be set to zero. */ __hrtimer_start_range_ns(&state->timer, @@ -104,6 +149,16 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) 0 /* timer coalescing slack */, HRTIMER_MODE_ABS_PINNED, 0 /* wakeup */); + if (global_found) { + raw_spin_lock(&(_global_env.event_lock)); + state->is_global_event = true; + n_event->timer_armed_on = state->cpu; + raw_spin_unlock(&(_global_env.event_lock)); + } else { + state->is_global_event = false; + } + + TRACE("set scheduler (global: %d) timer for %llu on P%d\n", state->is_global_event, update, n_event->timer_armed_on); } } else if (unlikely(!local && update != SUP_NO_SCHEDULER_UPDATE)) { /* Poke remote core only if timer needs to be set earlier than @@ -132,6 +187,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) unsigned long flags; enum hrtimer_restart restart = HRTIMER_NORESTART; struct mc2_cpu_state *state; + struct next_timer_event *n_event, *next; lt_t update, now; state = container_of(timer, struct mc2_cpu_state, timer); @@ -144,16 +200,47 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) */ BUG_ON(state->cpu != raw_smp_processor_id()); + TRACE("TIMER fired at %llu\n", litmus_clock()); + + if (state->is_global_event == true) { + + raw_spin_lock_irqsave(&(_global_env.event_lock), flags); + + TRACE("GLOBAL EVENT FIRED\n"); + list_for_each_entry_safe(n_event, next, &state->gmp_env->next_events, list) { + if (n_event->timer_armed_on == state->cpu) { + list_del(&n_event->list); + TRACE("EVENT ENTRY IS DELETED\n"); + break; + } + } + gmp_update_time(state->gmp_env, litmus_clock()); + + + update = n_event->next_event; + now = state->gmp_env->env.current_time; + + kfree(n_event); + TRACE("ON TIMER UPDATE = %llu, NOW = %llu\n", update, now); + raw_spin_unlock_irqrestore(&(_global_env.event_lock), flags); + } + raw_spin_lock_irqsave(&state->lock, flags); - sup_update_time(&state->sup_env, litmus_clock()); + - update = state->sup_env.next_scheduler_update; - now = state->sup_env.env.current_time; + if (state->is_global_event != true) { + sup_update_time(&state->sup_env, litmus_clock()); + + update = state->sup_env.next_scheduler_update; + now = state->sup_env.env.current_time; + } else { + state->is_global_event = false; + } TRACE_CUR("on_scheduling_timer at %llu, upd:%llu (for cpu=%d)\n", now, update, state->cpu); - if (update <= now) { + if (update <= now || state->gmp_env->schedule_now == true) { litmus_reschedule_local(); } else if (update != SUP_NO_SCHEDULER_UPDATE) { hrtimer_set_expires(timer, ns_to_ktime(update)); @@ -161,7 +248,7 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) } raw_spin_unlock_irqrestore(&state->lock, flags); - + return restart; } @@ -176,10 +263,20 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) BUG_ON(state->scheduled && state->scheduled != prev); BUG_ON(state->scheduled && !is_realtime(prev)); + tinfo = get_mc2_state(prev); + if (state->scheduled != NULL) { + struct reservation* res; + if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { + res = gmp_find_by_id(state->gmp_env, tinfo->mc2_param.res_id); + res->scheduled_on = NO_CPU; + prev->rt_param.scheduled_on = NO_CPU; + } + } + /* update time */ state->sup_env.will_schedule = true; sup_update_time(&state->sup_env, litmus_clock()); - + /* remove task from reservation if it blocks */ if (is_realtime(prev) && !is_running(prev)) task_departs(prev, is_completed(prev)); @@ -187,6 +284,17 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) /* figure out what to schedule next */ state->scheduled = sup_dispatch(&state->sup_env); + if (!state->scheduled) { + raw_spin_lock(&(_global_env.event_lock)); + + state->gmp_env->will_schedule = true; + gmp_update_time(state->gmp_env, litmus_clock()); + //state->scheduled = gmp_dispatch(&_global_env); + state->gmp_env->will_schedule = false; + + raw_spin_unlock(&(_global_env.event_lock)); + } + /* Notify LITMUS^RT core that we've arrived at a scheduling decision. */ sched_state_task_picked(); @@ -196,13 +304,27 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) mc2_update_timer_and_unlock(state); if (prev != state->scheduled && is_realtime(prev)) { + struct reservation* res; TRACE_TASK(prev, "descheduled.\n"); + TRACE_TASK(state->scheduled, "SCHEDULED.\n"); state->run_level = NUM_CRIT_LEVELS; + tinfo = get_mc2_state(prev); + if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { + res = gmp_find_by_id(state->gmp_env, tinfo->mc2_param.res_id); + res->scheduled_on = NO_CPU; + prev->rt_param.scheduled_on = NO_CPU; + } } if (state->scheduled) { + struct reservation* res; TRACE_TASK(state->scheduled, "scheduled.\n"); - //tinfo = get_mc2_state(state->scheduled); - //state->run_level = tinfo->mc2_param.crit; + tinfo = get_mc2_state(state->scheduled); + state->run_level = tinfo->mc2_param.crit; + if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { + res = gmp_find_by_id(state->gmp_env, tinfo->mc2_param.res_id); + res->scheduled_on = state->cpu; + state->scheduled->rt_param.scheduled_on = state->cpu; + } } return state->scheduled; @@ -230,10 +352,16 @@ static void mc2_task_resume(struct task_struct *tsk) { unsigned long flags; struct mc2_task_state* tinfo = get_mc2_state(tsk); - struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); + struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); TRACE_TASK(tsk, "thread wakes up at %llu\n", litmus_clock()); + if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { + state = cpu_state_for(tinfo->cpu); + } else { + state = local_cpu_state(); + } + raw_spin_lock_irqsave(&state->lock, flags); /* Requeue only if self-suspension was already processed. */ if (tinfo->has_departed) @@ -241,8 +369,16 @@ static void mc2_task_resume(struct task_struct *tsk) /* Assumption: litmus_clock() is synchronized across cores, * since we might not actually be executing on tinfo->cpu * at the moment. */ - sup_update_time(&state->sup_env, litmus_clock()); - task_arrives(tsk); + if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { + sup_update_time(&state->sup_env, litmus_clock()); + task_arrives(tsk); + } else if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { + raw_spin_lock(&(_global_env.event_lock)); + gmp_update_time(state->gmp_env, litmus_clock()); + task_arrives(tsk); + raw_spin_unlock(&(_global_env.event_lock)); + } + /* NOTE: drops state->lock */ TRACE("mc2_resume()\n"); mc2_update_timer_and_unlock(state); @@ -255,24 +391,40 @@ static void mc2_task_resume(struct task_struct *tsk) resume_legacy_task_model_updates(tsk); } +static void mc2_task_block(struct task_struct *task) +{ + struct mc2_task_state *tinfo; + + tinfo = get_mc2_state(task); + + TRACE_TASK(task, "TASK BLOCK\n"); + if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { + struct reservation *res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id); + res->scheduled_on = NO_CPU; + task->rt_param.scheduled_on = NO_CPU; + } +} /* syscall backend for job completions */ static long mc2_complete_job(void) { ktime_t next_release; long err; struct mc2_cpu_state *state = local_cpu_state(); - struct reservation_environment *env = &(state->sup_env.env); + struct reservation_environment *env = NULL; struct mc2_task_state *tinfo = get_mc2_state(current); + if (tinfo->mc2_param.crit == CRIT_LEVEL_C) + env = &(_global_env.env); + else + env = &(state->sup_env.env); - TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(), - get_deadline(current)); + TRACE_CUR("mc2_complete_job at %llu (deadline: %llu)\n", litmus_clock(), get_deadline(current)); tsk_rt(current)->completed = 1; if (tsk_rt(current)->sporadic_release) { env->time_zero = tsk_rt(current)->sporadic_release_time; - + hrtimer_cancel(&state->timer); if (tinfo->mc2_param.crit == CRIT_LEVEL_A) { struct reservation *res; struct table_driven_reservation *tdres; @@ -286,7 +438,7 @@ static long mc2_complete_job(void) res->next_replenishment += tdres->intervals[0].start; res->env->change_state(res->env, res, RESERVATION_DEPLETED); - TRACE_CUR("CHANGE NEXT_REP = %llu\n NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); + TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env.next_scheduler_update); } } @@ -327,32 +479,55 @@ static long mc2_admit_task(struct task_struct *tsk) } preempt_disable(); - - state = cpu_state_for(task_cpu(tsk)); - raw_spin_lock_irqsave(&state->lock, flags); - - res = sup_find_by_id(&state->sup_env, mp->res_id); - - /* found the appropriate reservation (or vCPU) */ - if (res) { - TRACE_TASK(tsk, "FOUND RES ID\n"); - tinfo->mc2_param.crit = mp->crit; - tinfo->mc2_param.res_id = mp->res_id; + if (mp->crit == CRIT_LEVEL_C) { + raw_spin_lock_irqsave(&(_global_env.event_lock), flags); - kfree(tsk_rt(tsk)->plugin_state); - tsk_rt(tsk)->plugin_state = NULL; + res = gmp_find_by_id(&_global_env, mp->res_id); + if (res) { + TRACE_TASK(tsk, "FOUND GMP RES ID\n"); + tinfo->mc2_param.crit = mp->crit; + tinfo->mc2_param.res_id = mp->res_id; + + kfree(tsk_rt(tsk)->plugin_state); + tsk_rt(tsk)->plugin_state = NULL; + + err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); + tinfo->cpu = -1; + tinfo->has_departed = true; + tsk_rt(tsk)->plugin_state = tinfo; + + tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; + } - err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); - tinfo->cpu = task_cpu(tsk); - tinfo->has_departed = true; - tsk_rt(tsk)->plugin_state = tinfo; + raw_spin_unlock_irqrestore(&(_global_env.event_lock), flags); + + } else { + state = cpu_state_for(task_cpu(tsk)); + raw_spin_lock_irqsave(&state->lock, flags); - /* disable LITMUS^RT's per-thread budget enforcement */ - tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; - } + res = sup_find_by_id(&state->sup_env, mp->res_id); - raw_spin_unlock_irqrestore(&state->lock, flags); + /* found the appropriate reservation (or vCPU) */ + if (res) { + TRACE_TASK(tsk, "FOUND SUP RES ID\n"); + tinfo->mc2_param.crit = mp->crit; + tinfo->mc2_param.res_id = mp->res_id; + + kfree(tsk_rt(tsk)->plugin_state); + tsk_rt(tsk)->plugin_state = NULL; + + err = mc2_task_client_init(&tinfo->res_info, &tinfo->mc2_param, tsk, res); + tinfo->cpu = task_cpu(tsk); + tinfo->has_departed = true; + tsk_rt(tsk)->plugin_state = tinfo; + + /* disable LITMUS^RT's per-thread budget enforcement */ + tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; + } + raw_spin_unlock_irqrestore(&state->lock, flags); + } + preempt_enable(); if (err) @@ -366,15 +541,29 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, { unsigned long flags; struct mc2_task_state* tinfo = get_mc2_state(tsk); - struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); + struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); struct reservation *res; TRACE_TASK(tsk, "new RT task %llu (on_rq:%d, running:%d)\n", litmus_clock(), on_runqueue, is_running); + if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { + state = cpu_state_for(tinfo->cpu); + } else { + state = local_cpu_state(); + } + /* acquire the lock protecting the state and disable interrupts */ raw_spin_lock_irqsave(&state->lock, flags); + if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { + res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); + } else { + raw_spin_lock(&(_global_env.event_lock)); + res = gmp_find_by_id(&_global_env, tinfo->mc2_param.res_id); + raw_spin_unlock(&(_global_env.event_lock)); + } + if (is_running) { state->scheduled = tsk; /* make sure this task should actually be running */ @@ -384,7 +573,14 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, if (on_runqueue || is_running) { /* Assumption: litmus_clock() is synchronized across cores * [see comment in pres_task_resume()] */ - sup_update_time(&state->sup_env, litmus_clock()); + if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { + sup_update_time(&state->sup_env, litmus_clock()); + } else if (tinfo->mc2_param.crit == CRIT_LEVEL_C) { + raw_spin_lock(&(_global_env.event_lock)); + TRACE_TASK(tsk, "CALL GMP_UPDATE_TIME in task_new at %llu\n", litmus_clock()); + gmp_update_time(state->gmp_env, litmus_clock()); + raw_spin_unlock(&(_global_env.event_lock)); + } task_arrives(tsk); /* NOTE: drops state->lock */ TRACE("mc2_new()\n"); @@ -393,7 +589,6 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, } else raw_spin_unlock_irqrestore(&state->lock, flags); - res = sup_find_by_id(&state->sup_env, tinfo->mc2_param.res_id); release_at(tsk, res->next_replenishment); if (res) TRACE_TASK(tsk, "next_replenishment = %llu\n", res->next_replenishment); @@ -407,9 +602,14 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) struct mc2_cpu_state *state; struct reservation *res = NULL, *next; struct sup_reservation_environment *sup_env; + struct gmp_reservation_environment *gmp_env; int found = 0; - state = cpu_state_for(cpu); + if (cpu != -1) + state = cpu_state_for(cpu); + else + state = local_cpu_state(); + raw_spin_lock(&state->lock); // res = sup_find_by_id(&state->sup_env, reservation_id); @@ -447,6 +647,43 @@ static long mc2_reservation_destroy(unsigned int reservation_id, int cpu) raw_spin_unlock(&state->lock); + raw_spin_lock(&(_global_env.event_lock)); + + gmp_env = &_global_env; + //if (!res) { + if (!found) { + list_for_each_entry_safe(res, next, &gmp_env->depleted_reservations, list) { + if (res->id == reservation_id) { + list_del(&res->list); + //kfree(res); + found = 1; + ret = 0; + } + } + } + if (!found) { + list_for_each_entry_safe(res, next, &gmp_env->inactive_reservations, list) { + if (res->id == reservation_id) { + list_del(&res->list); + //kfree(res); + found = 1; + ret = 0; + } + } + } + if (!found) { + list_for_each_entry_safe(res, next, &gmp_env->active_reservations, list) { + if (res->id == reservation_id) { + list_del(&res->list); + //kfree(res); + found = 1; + ret = 0; + } + } + } + + raw_spin_unlock(&(_global_env.event_lock)); + TRACE("RESERVATION_DESTROY ret = %d\n", ret); return ret; } @@ -455,8 +692,14 @@ static void mc2_task_exit(struct task_struct *tsk) { unsigned long flags; struct mc2_task_state* tinfo = get_mc2_state(tsk); - struct mc2_cpu_state *state = cpu_state_for(tinfo->cpu); + struct mc2_cpu_state *state; // = cpu_state_for(tinfo->cpu); + if (tinfo->mc2_param.crit != CRIT_LEVEL_C) { + state = cpu_state_for(tinfo->cpu); + } else { + state = local_cpu_state(); + } + raw_spin_lock_irqsave(&state->lock, flags); if (state->scheduled == tsk) @@ -466,7 +709,13 @@ static void mc2_task_exit(struct task_struct *tsk) if (is_running(tsk)) { /* Assumption: litmus_clock() is synchronized across cores * [see comment in pres_task_resume()] */ - sup_update_time(&state->sup_env, litmus_clock()); + if (tinfo->mc2_param.crit != CRIT_LEVEL_C) + sup_update_time(&state->sup_env, litmus_clock()); + else { + raw_spin_lock(&(_global_env.event_lock)); + gmp_update_time(state->gmp_env, litmus_clock()); + raw_spin_unlock(&(_global_env.event_lock)); + } task_departs(tsk, 0); /* NOTE: drops state->lock */ TRACE("mc2_exit()\n"); @@ -505,7 +754,7 @@ static long create_polling_reservation( int use_edf = config->priority == LITMUS_NO_PRIORITY; int periodic = res_type == PERIODIC_POLLING; long err = -EINVAL; - + if (config->polling_params.budget > config->polling_params.period) { printk(KERN_ERR "invalid polling reservation (%u): " @@ -533,26 +782,48 @@ static long create_polling_reservation( if (!pres) return -ENOMEM; - state = cpu_state_for(config->cpu); - raw_spin_lock_irqsave(&state->lock, flags); + if (config->cpu != -1) { + state = cpu_state_for(config->cpu); + raw_spin_lock_irqsave(&state->lock, flags); - res = sup_find_by_id(&state->sup_env, config->id); - if (!res) { - polling_reservation_init(pres, use_edf, periodic, - config->polling_params.budget, - config->polling_params.period, - config->polling_params.relative_deadline, - config->polling_params.offset); - pres->res.id = config->id; - if (!use_edf) - pres->res.priority = config->priority; - sup_add_new_reservation(&state->sup_env, &pres->res); - err = config->id; - } else { - err = -EEXIST; - } + res = sup_find_by_id(&state->sup_env, config->id); + if (!res) { + polling_reservation_init(pres, use_edf, periodic, + config->polling_params.budget, + config->polling_params.period, + config->polling_params.relative_deadline, + config->polling_params.offset); + pres->res.id = config->id; + if (!use_edf) + pres->res.priority = config->priority; + sup_add_new_reservation(&state->sup_env, &pres->res); + err = config->id; + } else { + err = -EEXIST; + } - raw_spin_unlock_irqrestore(&state->lock, flags); + raw_spin_unlock_irqrestore(&state->lock, flags); + } else if (config->cpu == -1) { + raw_spin_lock_irqsave(&(_global_env.event_lock), flags); + + res = gmp_find_by_id(&_global_env, config->id); + if (!res) { + polling_reservation_init(pres, use_edf, periodic, + config->polling_params.budget, + config->polling_params.period, + config->polling_params.relative_deadline, + config->polling_params.offset); + pres->res.id = config->id; + if (!use_edf) + pres->res.priority = config->priority; + gmp_add_new_reservation(&_global_env, &pres->res); + err = config->id; + } else { + err = -EEXIST; + } + + raw_spin_unlock_irqrestore(&(_global_env.event_lock), flags); + } if (err < 0) kfree(pres); @@ -671,10 +942,12 @@ static long mc2_reservation_create(int res_type, void* __user _config) if (copy_from_user(&config, _config, sizeof(config))) return -EFAULT; - if (config.cpu < 0 || !cpu_online(config.cpu)) { - printk(KERN_ERR "invalid polling reservation (%u): " - "CPU %d offline\n", config.id, config.cpu); - return -EINVAL; + if (config.cpu != -1) { + if (config.cpu < 0 || !cpu_online(config.cpu)) { + printk(KERN_ERR "invalid polling reservation (%u): " + "CPU %d offline\n", config.id, config.cpu); + return -EINVAL; + } } switch (res_type) { @@ -732,6 +1005,8 @@ static long mc2_activate_plugin(void) int cpu; struct mc2_cpu_state *state; + gmp_init(&_global_env); + for_each_online_cpu(cpu) { TRACE("Initializing CPU%d...\n", cpu); @@ -740,7 +1015,11 @@ static long mc2_activate_plugin(void) raw_spin_lock_init(&state->lock); state->cpu = cpu; state->scheduled = NULL; - + state->will_schedule = NULL; + state->linked = NULL; + state->gmp_env = &_global_env; + state->is_global_event = false; + sup_init(&state->sup_env); hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); @@ -794,6 +1073,37 @@ static long mc2_deactivate_plugin(void) raw_spin_unlock(&state->lock); } + raw_spin_lock(&(_global_env.event_lock)); + + /* Delete all reservations --- assumes struct reservation + * is prefix of containing struct. */ + + while (!list_empty(&_global_env.active_reservations)) { + res = list_first_entry( + &_global_env.active_reservations, + struct reservation, list); + list_del(&res->list); + kfree(res); + } + + while (!list_empty(&_global_env.inactive_reservations)) { + res = list_first_entry( + &_global_env.inactive_reservations, + struct reservation, list); + list_del(&res->list); + kfree(res); + } + + while (!list_empty(&_global_env.depleted_reservations)) { + res = list_first_entry( + &_global_env.depleted_reservations, + struct reservation, list); + list_del(&res->list); + kfree(res); + } + + raw_spin_unlock(&(_global_env.event_lock)); + destroy_domain_proc_info(&mc2_domain_proc_info); return 0; } @@ -802,6 +1112,7 @@ static struct sched_plugin mc2_plugin = { .plugin_name = "MC2", .schedule = mc2_schedule, .task_wake_up = mc2_task_resume, + .task_block = mc2_task_block, .admit_task = mc2_admit_task, .task_new = mc2_task_new, .task_exit = mc2_task_exit, -- cgit v1.2.2