From 9309774d024934b71816efa41171f439b007f983 Mon Sep 17 00:00:00 2001 From: Namhoon Kim Date: Mon, 10 Apr 2017 11:10:37 -0400 Subject: patches --- litmus/sched_mc2.c | 113 ++++++++++++++++++++++++++--------------------------- 1 file changed, 56 insertions(+), 57 deletions(-) (limited to 'litmus/sched_mc2.c') diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c index 3ccee282ffdf..ec8a92440f2b 100644 --- a/litmus/sched_mc2.c +++ b/litmus/sched_mc2.c @@ -128,17 +128,21 @@ asmlinkage long sys_enact_mode(void) struct mc2_cpu_state *state = local_cpu_state(); struct reservation *res; struct list_head *pos; + unsigned long flags; //lt_t now = litmus_clock(); TRACE_TASK(current, "ENACTING MODE TASK\n"); if (state->cpu == 0){ - preempt_disable(); - raw_spin_lock(&global_lock); - raw_spin_lock(&mode_lock); + //preempt_disable(); mode_changed = false; + local_irq_save(flags); + + raw_spin_lock(&global_lock); + raw_spin_lock(&mode_lock); if (pending){ //MCR has entered if (!seen_once){ - TRACE_TASK(current, "NOTICED MCR in mode %d\n", mode); + TRACE_TASK(current, "REQUEST = %llu\n", litmus_clock()); sched_trace_request_mode(current); + TS_MODE_CHANGE_START; //clean up jobs that are already done //after this jobs report themselves list_for_each(pos, &_global_env->active_reservations){ @@ -162,7 +166,7 @@ asmlinkage long sys_enact_mode(void) res = list_entry(pos, struct reservation, list); if (tsk_rt(res->tsk)->completed && res->mode == mode){ res->reported = 1; - TRACE_CUR("R%d RES_REPORTED_INACTIVE = %d mode %d\n", res->id, res_reported, res->mode); + //TRACE_CUR("R%d RES_REPORTED_INACTIVE = %d mode %d\n", res->id, res_reported, res->mode); res_reported--; } } @@ -170,8 +174,8 @@ asmlinkage long sys_enact_mode(void) } if( ready ){ //C is throttled lt_t new_mode_basetime = get_release(current); - - TRACE("Timer canceled\n"); + lt_t t; + //TRACE("Timer canceled\n"); hrtimer_cancel(&state->timer);//stop listening to old mode timers mode = requested_mode; TRACE("Mode has been changed.\n"); @@ -210,19 +214,18 @@ asmlinkage long sys_enact_mode(void) release_at(res->tsk, new_mode_basetime); } //raw_spin_unlock(&state->lock); - + t=litmus_clock(); sched_trace_enact_mode(current); + TS_MODE_CHANGE_END; + TRACE(KERN_ALERT "ENACT = %llu\n", t); } } raw_spin_unlock(&mode_lock); raw_spin_unlock(&global_lock); - //release other CPUs + local_irq_restore(flags); cpu_0_spin_flag = !cpu_0_spin_flag; - - preempt_enable(); - TRACE_CUR("flag = %d\n",cpu_0_spin_flag); } else if (cpu_0_task_exist) { //spin, wait for CPU 0 to stabilize mode decision @@ -242,9 +245,11 @@ asmlinkage long sys_enact_mode(void) //TRACE("CPU%d flag check. %d\n",state->cpu, mode_changed); if (mode_changed) { lt_t new_mode_basetime = get_release(current); - TRACE("CPU%d mode changed\n",state->cpu); + //TRACE("CPU%d mode changed\n",state->cpu); hrtimer_cancel(&state->timer); //stop listening to old mode timers //preempt_disable(); + local_irq_save(flags); + raw_spin_lock(&state->lock); state->sup_env = &state->sup_env_modes[mode]; list_for_each(pos, &state->sup_env->active_reservations){ @@ -260,20 +265,24 @@ asmlinkage long sys_enact_mode(void) release_at(res->tsk, new_mode_basetime); } raw_spin_unlock(&state->lock); + local_irq_restore(flags); + //preempt_enable(); } state->spin_flag = !state->spin_flag; } else { - TRACE("CPU%d no cpu_0_task_exist.%d\n",state->cpu, mode_changed); + //TRACE("CPU%d no cpu_0_task_exist.%d\n",state->cpu, mode_changed); + local_irq_restore(flags); return 0; } - TRACE("CPU%d everyone should get this.%d\n",state->cpu, mode_changed); + TRACE("CPU%d enact syscall ends m_c? %d\n",state->cpu, mode_changed); //if mode didn't change this has no effect on what's being scheduled - raw_spin_lock(&state->lock); + //raw_spin_lock(&state->lock); state->sup_env = &state->sup_env_modes[mode]; - raw_spin_unlock(&state->lock); + //raw_spin_unlock(&state->lock); //sup_update_time(state->sup_env, litmus_clock()); + return 0; } @@ -451,6 +460,9 @@ static int get_lowest_prio_cpu(lt_t priority) int cpu, ret = NO_CPU; lt_t latest_deadline = 0; + if (priority == LITMUS_NO_PRIORITY) + return ret; + ce = &_lowest_prio_cpu.cpu_entries[local_cpu_state()->cpu]; if (!ce->will_schedule && !ce->scheduled) { TRACE("CPU %d (local) is the lowest!\n", ce->cpu); @@ -529,8 +541,8 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) if (event->next_update < litmus_clock()) { if (event->timer_armed_on == NO_CPU) { struct reservation *res = gmp_find_by_id(_global_env, event->id); - int cpu = get_lowest_prio_cpu(res?res->priority:0); - TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); + int cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY); + //TRACE("GLOBAL EVENT PASSED!! poking CPU %d to reschedule\n", cpu); list_del(&event->list); kfree(event); if (cpu != NO_CPU) { @@ -594,17 +606,15 @@ static void mc2_update_timer_and_unlock(struct mc2_cpu_state *state) */ TRACE("mc2_update_timer for remote CPU %d (update=%llu, " "active:%d, set:%llu)\n", - state->cpu, - update, - hrtimer_active(&state->timer), + state->cpu, update, hrtimer_active(&state->timer), ktime_to_ns(hrtimer_get_expires(&state->timer))); if (!hrtimer_active(&state->timer) || ktime_to_ns(hrtimer_get_expires(&state->timer)) > update) { TRACE("poking CPU %d so that it can update its " - "scheduling timer (active:%d, set:%llu)\n", - state->cpu, - hrtimer_active(&state->timer), - ktime_to_ns(hrtimer_get_expires(&state->timer))); + "scheduling timer (active:%d, set:%llu)\n", + state->cpu, + hrtimer_active(&state->timer), + ktime_to_ns(hrtimer_get_expires(&state->timer))); //litmus_reschedule(state->cpu); /* raw_spin_lock(&state->lock); @@ -679,7 +689,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) TS_ISR_START; TRACE("Timer fired at %llu\n", litmus_clock()); - //raw_spin_lock_irqsave(&_global_env.lock, flags); raw_spin_lock_irqsave(&state->lock, flags); now = litmus_clock(); sup_update_time(state->sup_env, now); @@ -718,7 +727,6 @@ static enum hrtimer_restart on_scheduling_timer(struct hrtimer *timer) } raw_spin_unlock(&global_lock); raw_spin_unlock_irqrestore(&state->lock, flags); - //raw_spin_unlock_irqrestore(&_global_env.lock, flags); TS_ISR_END; @@ -811,7 +819,7 @@ static long mc2_complete_job(void) res->cur_budget = 0; res->env->change_state(res->env, res, RESERVATION_DEPLETED); - TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); + // TRACE_CUR("CHANGE NEXT_REP = %llu NEXT_UPDATE = %llu\n", res->next_replenishment, state->sup_env->next_scheduler_update); //if (lv < CRIT_LEVEL_C) // raw_spin_unlock(&state->lock); @@ -901,7 +909,6 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st sup_scheduler_update_after(sup_env, res->cur_budget); return tsk; } else { - TRACE_TASK(tsk, "@@@@@DISPATCH@@@@@@@ init_finished? %s\n", is_init_finished(tsk)?"true":"false"); if (!is_init_finished(tsk)) { //ce = &state->crit_entries[lv]; sup_scheduler_update_after(sup_env, res->cur_budget); @@ -1132,7 +1139,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) if (prev != state->scheduled && is_realtime(prev)) { struct mc2_task_state* tinfo = get_mc2_state(prev); struct reservation* res = tinfo->res_info[mode].client.reservation; - TRACE_TASK(prev, "PREEPT_COUNT %d\n", preempt_count()); if (res) { TRACE_TASK(prev, "PREV JOB was scheduled_on = P%d\n", res->scheduled_on); res->scheduled_on = NO_CPU; @@ -1142,7 +1148,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev) if (tinfo->has_departed == false && get_task_crit_level(prev) == CRIT_LEVEL_C) { int cpu; raw_spin_lock(&global_lock); - cpu = get_lowest_prio_cpu(res?res->priority:0); + cpu = get_lowest_prio_cpu(res?res->priority:LITMUS_NO_PRIORITY); if (cpu != NO_CPU && _lowest_prio_cpu.cpu_entries[cpu].will_schedule == false) { //raw_spin_lock(&_lowest_prio_cpu.lock); _lowest_prio_cpu.cpu_entries[cpu].will_schedule = true; @@ -1195,7 +1201,6 @@ static void resume_legacy_task_model_updates(struct task_struct *tsk) now = litmus_clock(); if (is_tardy(tsk, now)) { release_at(tsk, now); - //sched_trace_task_release(tsk); } } } @@ -1224,7 +1229,7 @@ static void mc2_task_resume(struct task_struct *tsk) if (tinfo->has_departed) { /* We don't want to consider jobs before synchronous releases */ - if (tsk_rt(tsk)->job_params.job_no > 4) { + if (tsk_rt(tsk)->job_params.job_no > 3) { switch(get_task_crit_level(tsk)) { case CRIT_LEVEL_A: TS_RELEASE_LATENCY_A(get_release(tsk)); @@ -1238,7 +1243,7 @@ static void mc2_task_resume(struct task_struct *tsk) default: break; } - TRACE_CUR("INIT_FINISHED is SET\n"); + // TRACE_CUR("INIT_FINISHED is SET\n"); tsk_mc2_data(tsk)->init_finished = 1; } @@ -1293,14 +1298,11 @@ static long mc2_admit_task(struct task_struct *tsk) return -ENOMEM; if (!mp) { - printk(KERN_ERR "mc2_admit_task: criticality level has not been set\n"); TRACE("mc2_admit_task: criticality level has not been set\n"); return -ESRCH; } lv = mp->crit; - preempt_disable(); - if (lv < CRIT_LEVEL_C) { state = cpu_state_for(task_cpu(tsk)); @@ -1312,14 +1314,14 @@ static long mc2_admit_task(struct task_struct *tsk) tinfo->mc2_param.res_id = mp->res_id; tinfo->mc2_param.mode_mask = mp->mode_mask; tinfo->mc2_param.init_finished = 0; - TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); +// TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); - TRACE_TASK(tsk, "Mode 0\n"); +// TRACE_TASK(tsk, "Mode 0\n"); res = sup_find_by_id(&(state->sup_env_modes[0]), mp->res_id); /* found the appropriate reservation */ if (res) { - TRACE_TASK(tsk, "SUP FOUND RES ID in mode 0\n"); +// TRACE_TASK(tsk, "SUP FOUND RES ID in mode 0\n"); /* initial values */ err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); @@ -1334,12 +1336,12 @@ static long mc2_admit_task(struct task_struct *tsk) //task not present in mode continue; } - TRACE_TASK(tsk, "Mode %d\n",i); +// TRACE_TASK(tsk, "Mode %d\n",i); res = sup_find_by_id(&(state->sup_env_modes[i]), mp->res_id); /* found the appropriate reservation */ if (res) { - TRACE_TASK(tsk, "SUP FOUND RES ID in mode %d\n", i); + // TRACE_TASK(tsk, "SUP FOUND RES ID in mode %d\n", i); /* initial values */ err = err? err:mc2_task_client_init(&tinfo->res_info[i], &tinfo->mc2_param, tsk, res); @@ -1355,18 +1357,16 @@ static long mc2_admit_task(struct task_struct *tsk) tsk_rt(tsk)->plugin_state = tinfo; tsk_rt(tsk)->task_params.budget_policy = NO_ENFORCEMENT; } - TRACE_CUR("ctrl_page mode_poll_task %d, cpu = %d, tsk_rt->ctrl_page = %x\n", tsk_rt(tsk)->ctrl_page->mode_poll_task, tinfo->cpu, tsk_rt(tsk)->ctrl_page); + if (is_mode_poll_task(tsk) && tinfo->cpu == 0) { - TRACE_CUR("CPU0_TASK_EXIST set\n"); cpu_0_task_exist = true; } raw_spin_unlock_irqrestore(&state->lock, flags); } else if (lv == CRIT_LEVEL_C) { - TRACE_TASK(tsk, "Task being admitted is Level C\n"); +// TRACE_TASK(tsk, "Task being admitted is Level C\n"); state = local_cpu_state(); raw_spin_lock_irqsave(&state->lock, flags); - raw_spin_lock(&global_lock); //state = local_cpu_state(); //raw_spin_lock(&state->lock); @@ -1378,14 +1378,16 @@ static long mc2_admit_task(struct task_struct *tsk) tinfo->mc2_param.mode_mask = mp->mode_mask; tinfo->mc2_param.init_finished = 0; - TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); + // TRACE_TASK(tsk, "mode_mask = %x\n", mp->mode_mask); - TRACE_TASK(tsk, "Mode 0\n"); +// TRACE_TASK(tsk, "Mode 0\n"); + + raw_spin_lock(&global_lock); res = gmp_find_by_id(&(_global_env_modes[0]), mp->res_id); /* found the appropriate reservation */ if (res) { - TRACE_TASK(tsk, "GMP FOUND RES ID in mode 0\n"); + // TRACE_TASK(tsk, "GMP FOUND RES ID in mode 0\n"); /* initial values */ err = err? err:mc2_task_client_init(&tinfo->res_info[0], &tinfo->mc2_param, tsk, res); @@ -1428,12 +1430,11 @@ static long mc2_admit_task(struct task_struct *tsk) raw_spin_unlock_irqrestore(&state->lock, flags); } - preempt_enable(); if (err) kfree(tinfo); - TRACE_TASK(tsk, "MC2 task admitted %d\n", err); + //TRACE_TASK(tsk, "MC2 task admitted %d\n", err); return err; } @@ -1460,7 +1461,6 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, else state = cpu_state_for(tinfo->cpu); - local_irq_save(flags); /* acquire the lock protecting the state and disable interrupts */ //raw_spin_lock(&_global_env.lock); @@ -1471,6 +1471,7 @@ static void mc2_task_new(struct task_struct *tsk, int on_runqueue, litmus_reschedule_local(); } + local_irq_save(flags); raw_spin_lock(&state->lock); if (lv == CRIT_LEVEL_C) { @@ -1778,8 +1779,7 @@ static long create_polling_reservation( /* sanity checks */ if (config->polling_params.budget > config->polling_params.period) { - printk(KERN_ERR "invalid polling reservation (%u): " - "budget > period\n", config->id); + printk(KERN_ERR "invalid polling reservation (%u): " "budget > period\n", config->id); return -EINVAL; } if (config->polling_params.budget > @@ -2160,7 +2160,7 @@ static long mc2_activate_plugin(void) } _global_env = &_global_env_modes[0]; - raw_spin_lock_init(&_lowest_prio_cpu.lock); + //raw_spin_lock_init(&_lowest_prio_cpu.lock); raw_spin_lock_init(&mode_lock); raw_spin_lock_init(&global_lock); @@ -2188,7 +2188,6 @@ static long mc2_activate_plugin(void) // cr_entry->level = lv; // cr_entry->running = NULL; //} - for(i = 0; i < NR_MODES; i++){ sup_init(&(state->sup_env_modes[i])); } -- cgit v1.2.2