/* * Definition of the scheduler plugin interface. * */ #ifndef _LINUX_RT_PARAM_H_ #define _LINUX_RT_PARAM_H_ #include typedef unsigned long jiffie_t; /* different types of clients */ typedef enum { RT_CLASS_HARD, RT_CLASS_SOFT, RT_CLASS_BEST_EFFORT } task_class_t; typedef struct rt_param { unsigned long exec_cost; unsigned long period; unsigned int cpu; task_class_t class; } rt_param_t; /* fixed point wrapper to force compiler * errors in case of misuse of a fixed point value */ typedef struct { long val; } fp_t; typedef struct { fp_t weight; unsigned long period; fp_t value; } service_level_t; typedef struct { fp_t estimate; fp_t accumulated; } predictor_state_t; typedef struct { /* when will this task be release the next time? */ jiffie_t release; /* time instant the last job was released */ jiffie_t last_release; /* what is the current deadline? */ jiffie_t deadline; /* b-bit tie breaker for PFAIR, it is ignored in EDF */ int b_bit; /* group deadline tie breaker, it is ignored in EDF */ jiffie_t group_deadline; /* how long has this task executed so far? * In case of capacity sharing a job completion cannot be * detected by checking time_slice == 0 as the job may have * executed while using another capacity. Use this counter * to keep track of the time spent on a CPU by a job. * * In other words: The number of consumed quanta since the * last job release. */ unsigned int exec_time; /* Which job is this. This is used to let user space * specify which job to wait for, which is important if jobs * overrun. If we just call sys_sleep_next_period() then we * will unintentionally miss jobs after an overrun. * * Increase this sequence number when a job is released. */ unsigned int job_no; } rt_times_t; /* RT task parameters for scheduling extensions * These parameters are inherited during clone and therefore must * be explicitly set up before the task set is launched. */ typedef struct task_rt_param { /* is the task sleeping? */ unsigned int flags:8; /* Real-time marker: 1 iff it is a LITMUS real-time task. */ unsigned int is_realtime:1; /* is a BE->RT or RT->BE transition pending? */ unsigned int transition_pending:1; /* is this task under control of litmus? * * this is necessary because otherwise signal delivery code * may try to wake up a task that is already queued in plugin * data structures. */ unsigned int litmus_controlled:1; /* Did this task register any SRP controlled resource accesses? * This, of course, should only ever be true under partitioning. * However, this limitation is not currently enforced. */ unsigned int subject_to_srp:1; /* if a BE->RT transition failed, then this field contains the error */ unsigned long transition_error; /* user controlled parameters */ rt_param_t basic_params; /* task representing the current "inherited" task * priority, assigned by inherit_priority and * return priority in the scheduler plugins. * could point to self if PI does not result in * an increased task priority. */ struct task_struct* inh_task; /* Don't just dereference this pointer in kernel space! * It might very well point to junk or nothing at all. * NULL indicates that the task has not requested any non-preemptable * section support. * Not inherited upon fork. */ __user short* np_flag; /* For the FMLP under PSN-EDF, it is required to make the task * non-preemptive from kernel space. In order not to interfere with * user space, this counter indicates the kernel space np setting. * kernel_np > 0 => task is non-preemptive */ unsigned int kernel_np; /* timing parameters */ rt_times_t times; /* This is currently only used by the PFAIR code * and a prime candidate for cleanup. */ rt_times_t backup; /* This field can be used by plugins to store where the task * is currently scheduled. It is the responsibility of the * plugin to avoid race conditions. * * Used by GSN-EDF. */ int scheduled_on; /* This field can be used by plugins to store where the task * is currently linked. It is the responsibility of the plugin * to avoid race conditions. * * Used by GSN-EDF. */ int linked_on; /* Adaptive support. Adaptive tasks will store service levels * in this (dynamically allocated) structure. */ service_level_t* service_level; unsigned int no_service_levels; unsigned int cur_service_level; /* Adaptive support. Store state for weight estimation. */ predictor_state_t predictor_state; /* Adaptive support. Optimizer fields. */ struct list_head opt_list; fp_t opt_order; fp_t opt_dw; fp_t opt_nw; unsigned int opt_level; jiffie_t opt_change; /* Fields saved before BE->RT transition. */ int old_policy; int old_prio; } task_rt_param_t; /* Possible RT flags */ #define RT_F_RUNNING 0x00000000 #define RT_F_SLEEP 0x00000001 #define RT_F_EXP_QUANTA 0x00000002 #define RT_F_NON_PREEMTABLE 0x00000004 #define RT_F_EXIT_SEM 0x00000008 #define is_realtime(t) ((t)->rt_param.is_realtime) #define rt_transition_pending(t) \ ((t)->rt_param.transition_pending) /* Realtime utility macros */ #define get_passed_quanta(t) ((t)->rt_param.times.exec_time) #define inc_passed_quanta(t) ((t)->rt_param.times.exec_time += 1) #define get_rt_flags(t) ((t)->rt_param.flags) #define set_rt_flags(t,f) (t)->rt_param.flags=(f) #define get_exec_cost(t) ((t)->rt_param.basic_params.exec_cost) #define get_rt_period(t) ((t)->rt_param.basic_params.period) #define set_rt_period(t,p) (t)->rt_param.basic_params.period=(p) #define set_exec_cost(t,e) (t)->rt_param.basic_params.exec_cost=(e) #define get_partition(t) (t)->rt_param.basic_params.cpu #define get_deadline(t) ((t)->rt_param.times.deadline) #define get_last_release(t) ((t)->rt_param.times.last_release) #define get_class(t) ((t)->rt_param.basic_params.class) #define has_active_job(t) \ (time_before(get_last_release(t), jiffies) \ && time_before_eq(jiffies, get_deadline(t))) #define get_est_weight(t) ((t)->rt_param.predictor_state.estimate) #define get_sl(t, l) \ ((t)->rt_param.service_level[l]) #define get_cur_sl(t) ((t)->rt_param.cur_service_level) #define get_max_sl(t) ((t)->rt_param.no_service_levels - 1) #define get_opt_sl(t) ((t)->rt_param.opt_level) #define is_subject_to_srp(t) ((t)->rt_param.subject_to_srp) #define is_hrt(t) \ ((t)->rt_param.basic_params.class == RT_CLASS_HARD) #define is_srt(t) \ ((t)->rt_param.basic_params.class == RT_CLASS_SOFT) #define is_be(t) \ ((t)->rt_param.basic_params.class == RT_CLASS_BEST_EFFORT) #define clear_rt_params(t) \ memset(&(t)->rt_param,0, sizeof(struct task_rt_param)) #define get_release(t) ((t)->rt_param.times.release) #define set_release(t,r) ((t)->rt_param.times.release=(r)) /* honor the flag that is set when scheduling is in progress * This is some dirty hack in Linux that creates race conditions in our code * if we don't pay attention to it. */ #define is_running(t) \ ((t)->state == TASK_RUNNING || \ (t)->thread_info->preempt_count & PREEMPT_ACTIVE) #define is_blocked(t) (!is_running(t)) #define is_released(t) (time_before_eq((t)->rt_param.times.release, jiffies)) #define is_tardy(t) (time_before_eq((t)->rt_param.times.deadline, jiffies)) #define task_slack(t) ( (int) (t)->rt_param.times.deadline - (int) jiffies - \ (int) ((t)->rt_param.basic_params.exec_cost - \ (t)->rt_param.times.exec_time)) /* real-time comparison macros */ #define earlier_deadline(a, b) (time_before(\ (a)->rt_param.times.deadline,\ (b)->rt_param.times.deadline)) #define earlier_release(a, b) (time_before(\ (a)->rt_param.times.release,\ (b)->rt_param.times.release)) #define earlier_last_release(a, b) (time_before(\ (a)->rt_param.times.last_release,\ (b)->rt_param.times.last_release)) #define make_np(t) do {t->rt_param.kernel_np++;} while(0); #define take_np(t) do {t->rt_param.kernel_np--;} while(0); #define backup_times(t) do { (t)->rt_param.backup=(t)->rt_param.times; \ } while(0); #define restore_times(t) do { (t)->rt_param.times=(t)->rt_param.backup; \ } while(0); #define rt_list2task(p) list_entry(p, struct task_struct, rt_list) #endif