From 1d88b6ae5ed253fbfd4568eb668e2ebd1ca01ac7 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Sun, 7 Oct 2007 23:28:55 -0400 Subject: adpative: massive work on the optimizer Not yet complete. --- include/linux/rt_param.h | 24 ++++++++--- include/linux/sched_adaptive.h | 93 +++++++++++++++++++++++++++++++++++++----- 2 files changed, 101 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rt_param.h b/include/linux/rt_param.h index 52737bb4dd..d14e2129b1 100644 --- a/include/linux/rt_param.h +++ b/include/linux/rt_param.h @@ -38,7 +38,6 @@ typedef struct { } service_level_t; typedef struct { - fp_t error; fp_t estimate; fp_t accumulated; } predictor_state_t; @@ -172,6 +171,15 @@ typedef struct task_rt_param { */ fp_t wt_y; fp_t wt_slope; + + /* Adaptive support. Optimizer fields. + */ + struct list_head opt_list; + fp_t opt_order; + fp_t opt_dw; + fp_t opt_nw; + unsigned int opt_level; + jiffie_t opt_change; } task_rt_param_t; /* Possible RT flags */ @@ -192,9 +200,17 @@ typedef struct task_rt_param { #define set_exec_cost(t,e) (t)->rt_param.basic_params.exec_cost=(e) #define get_partition(t) (t)->rt_param.basic_params.cpu #define get_deadline(t) ((t)->rt_param.times.deadline) +#define get_last_release(t) ((t)->rt_param.times.last_release) #define get_class(t) ((t)->rt_param.basic_params.class) +#define is_active(t) \ + (get_last_release(t) < jiffies && get_deadline(t) >= jiffies) + #define get_est_weight(t) ((t)->rt_param.predictor_state.estimate) +#define get_sl(t, l) \ + ((t)->rt_param.service_level[get_cur_sl(t)]) +#define get_cur_sl(t) ((t)->rt_param.cur_service_level) +#define get_max_sl(t) ((t)->rt_param.no_service_levels - 1) #define is_realtime(t) ((t)->rt_param.is_realtime) #define is_subject_to_srp(t) ((t)->rt_param.subject_to_srp) @@ -208,12 +224,8 @@ typedef struct task_rt_param { #define clear_rt_params(t) \ memset(&(t)->rt_param,0, sizeof(struct task_rt_param)) -#define get_last_release_time(t) ((t)->rt_param.times.last_release) -#define set_last_release_time(t,r) ((t)->rt_param.times.last_release=(r)) - #define get_release(t) ((t)->rt_param.times.release) #define set_release(t,r) ((t)->rt_param.times.release=(r)) -#define get_last_release(t) ((t)->rt_param.times.last_release) /* honor the flag that is set when scheduling is in progress * This is some dirty hack in Linux that creates race conditions in our code @@ -253,4 +265,6 @@ memset(&(t)->rt_param,0, sizeof(struct task_rt_param)) } while(0); +#define rt_list2task(p) list_entry(p, struct task_struct, rt_list) + #endif diff --git a/include/linux/sched_adaptive.h b/include/linux/sched_adaptive.h index eac2f90652..0f56a57cd3 100644 --- a/include/linux/sched_adaptive.h +++ b/include/linux/sched_adaptive.h @@ -1,9 +1,15 @@ #ifndef __ADAPTIVE_H__ #define __ADAPTIVE_H__ -static inline unsigned long ideal_allocation(fp_t weight, unsigned long delta_t) +static inline fp_t ideal(fp_t weight, jiffie_t delta_t) { - return _floor(_mul(weight, FP(delta_t))); + return _mul(weight, FP(delta_t)); +} + +static inline long ideal_exec_time(struct task_struct* t) +{ + jiffie_t delta = jiffies - get_last_release(t); + return _round(ideal(get_est_weight(t), delta)); } /* this makes a whole bunch of linearity assumptions */ @@ -16,19 +22,84 @@ static inline fp_t weight_transfer(fp_t from_val, fp_t to_val, return _div(_mul(act_weight, rel_to), rel_from); } -static inline void update_estimate(predictor_state_t *state, fp_t actual_weight, - fp_t a, fp_t b, fp_t c) +static inline fp_t est_weight_at(struct task_struct* t, unsigned int level) { - fp_t err, new, delta_err; + if (t->rt_param.no_service_levels) + return weight_transfer(get_sl(t, get_cur_sl(t)).value, + get_sl(t, level).value, + t->rt_param.wt_slope, t->rt_param.wt_y, + get_est_weight(t)); + else + return get_est_weight(t); + +} + +static void update_estimate(predictor_state_t *state, fp_t actual_weight, + fp_t a, fp_t b) +{ + fp_t err, new; err = _sub(actual_weight, state->estimate); - state->accumulated = _add(state->accumulated, err); - delta_err = _sub(err, state->error); - new = _add(_add(_mul(a, err), - _mul(b, state->accumulated)), - _mul(c, delta_err)); + new = _add(_mul(a, err), + _mul(b, state->accumulated)); state->estimate = new; - state->error = err; + state->accumulated = _add(state->accumulated, err); +} + +static fp_t linear_metric(struct task_struct* t) +{ + fp_t v1, vcur, vmax, g1, gmax; + fp_t est_w; + unsigned int l = t->rt_param.no_service_levels; + unsigned int lcur; + + if (l <= 1) + return FP(0); + + lcur = t->rt_param.cur_service_level; + est_w = get_est_weight(t); + v1 = t->rt_param.service_level[0].value; + vmax = t->rt_param.service_level[l - 1].value; + vcur = t->rt_param.service_level[lcur].value; + + g1 = weight_transfer(vcur, v1, + t->rt_param.wt_slope, t->rt_param.wt_y, + est_w); + gmax = weight_transfer(vcur, vmax, + t->rt_param.wt_slope, t->rt_param.wt_y, + est_w); + return _div(_sub(vmax, v1), + _sub(gmax, g1)); +} + +static unsigned long reweighted_deadline(fp_t ow, fp_t nw, unsigned long alloc, + jiffie_t deadline, jiffie_t release) +{ + fp_t dl; + dl = _mul(FP(deadline - release), ow); + dl = _sub(dl, FP(alloc)); + dl = _div(dl, nw); + return _round(dl); +} + +static inline int is_under_allocated(struct task_struct* t) +{ + return ideal_exec_time(t) >= t->rt_param.times.exec_time; } +static inline jiffie_t equal_point(struct task_struct* t) +{ + return _round(_div( FP(t->rt_param.times.exec_time), + get_est_weight(t))) - + (jiffies - get_last_release(t)); +} + +static jiffie_t decrease_enactment_time(struct task_struct* t) +{ + if (is_active(t) && !is_under_allocated(t)) + return equal_point(t); + return 0; +} + + #endif -- cgit v1.2.2