/* litmus/jobs.c - common job control code */ #include #include #include #include #include #include #ifdef CONFIG_MERGE_TIMERS #include #endif #ifdef CONFIG_PLUGIN_COLOR #include #endif #ifdef CONFIG_PLUGIN_MC #include #else #define TRACE_MC_TASK(t, fmt, args...) TRACE_TASK(t, fmt, ##args) #endif static inline void setup_release(struct task_struct *t, struct rt_job *job, lt_t release) { /* prepare next release */ job->release = release; job->deadline = release + get_rt_relative_deadline(t); job->exec_time = 0; /* update job sequence number */ ++job->job_no; } static inline void setup_kernel_release(struct task_struct *t, lt_t release) { BUG_ON(!t); #ifdef CONFIG_PLUGIN_COLOR if (tsk_rt(t)->orig_cost) get_exec_cost(t) = color_chunk; #endif /* Record lateness before we set up the next job's * release and deadline. Lateness may be negative. */ t->rt_param.job_params.lateness = (long long)litmus_clock() - (long long)t->rt_param.job_params.deadline; t->rt.time_slice = 1; setup_release(t, &tsk_rt(t)->job_params, release); TRACE_MC_TASK(t, "kernel rel=%llu, dead=%llu\n", get_release(t), get_deadline(t)); sched_trace_server_release(-t->pid, get_rt_job(t), tsk_rt(t)->job_params); } void setup_user_release(struct task_struct *t, lt_t release) { setup_release(t, &tsk_rt(t)->user_job, release); TRACE_MC_TASK(t, "user rel=%llu, dead=%llu\n", get_user_release(t), get_user_deadline(t)); #ifdef CONFIG_PLUGIN_MC if (CRIT_LEVEL_A != tsk_mc_crit(t)) sched_trace_task_release(t); #endif } void prepare_for_next_period(struct task_struct *t) { lt_t rem, exec, orig; #ifdef CONFIG_PLUGIN_COLOR if (tsk_rt(t)->orig_cost) { /* Task is chunked */ tsk_rt(t)->last_exec_time += get_exec_time(t); exec = tsk_rt(t)->last_exec_time; orig = tsk_rt(t)->orig_cost; rem = orig - exec; if (lt_before(exec, orig)) { /* Don't increment period if there is user-space work to * perform and we haven't exhausted our original budget */ get_exec_time(t) = 0; /* The last chunk is a remainder */ if (rem < get_exec_cost(t)) get_exec_cost(t) = rem; return; } } #endif setup_kernel_release(t, get_release(t) + get_rt_period(t)); } void release_at(struct task_struct *t, lt_t start) { BUG_ON(!t); TRACE_MC_TASK(t, "Releasing at %llu\n", start); setup_kernel_release(t, start); setup_user_release(t, start); BUG_ON(!is_released(t, start)); } /* * User-space job has completed execution */ long complete_job(void) { lt_t amount; lt_t now = litmus_clock(); lt_t exec_time = tsk_rt(current)->job_params.exec_time; /* Task statistic summaries */ tsk_rt(current)->tot_exec_time += exec_time; if (lt_before(tsk_rt(current)->max_exec_time, exec_time)) tsk_rt(current)->max_exec_time = exec_time; if (is_tardy(current, now)) { TRACE_TASK(current, "is tardy, now: %llu, deadline: %llu\n", now, get_deadline(current)); amount = now - get_deadline(current); if (lt_after(amount, tsk_rt(current)->max_tardy)) tsk_rt(current)->max_tardy = amount; tsk_rt(current)->total_tardy += amount; ++tsk_rt(current)->missed; } TRACE_TASK(current, "user complete\n"); /* Mark that we do not execute anymore */ tsk_rt(current)->completed = 1; tsk_rt(current)->flags = RT_F_SLEEP; /* call schedule, this will return when a new job arrives * it also takes care of preparing for the next release */ schedule(); return 0; }