diff options
| author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-10-07 23:28:55 -0400 |
|---|---|---|
| committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-10-07 23:28:55 -0400 |
| commit | 1d88b6ae5ed253fbfd4568eb668e2ebd1ca01ac7 (patch) | |
| tree | 15764ea7fbf450962773210ed95d60982f784710 /include/linux | |
| parent | 3a591b6003054e1d4e15c8dd7deefe3c8090192e (diff) | |
adpative: massive work on the optimizer
Not yet complete.
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/rt_param.h | 24 | ||||
| -rw-r--r-- | include/linux/sched_adaptive.h | 93 |
2 files changed, 101 insertions, 16 deletions
diff --git a/include/linux/rt_param.h b/include/linux/rt_param.h index 52737bb4dd..d14e2129b1 100644 --- a/include/linux/rt_param.h +++ b/include/linux/rt_param.h | |||
| @@ -38,7 +38,6 @@ typedef struct { | |||
| 38 | } service_level_t; | 38 | } service_level_t; |
| 39 | 39 | ||
| 40 | typedef struct { | 40 | typedef struct { |
| 41 | fp_t error; | ||
| 42 | fp_t estimate; | 41 | fp_t estimate; |
| 43 | fp_t accumulated; | 42 | fp_t accumulated; |
| 44 | } predictor_state_t; | 43 | } predictor_state_t; |
| @@ -172,6 +171,15 @@ typedef struct task_rt_param { | |||
| 172 | */ | 171 | */ |
| 173 | fp_t wt_y; | 172 | fp_t wt_y; |
| 174 | fp_t wt_slope; | 173 | fp_t wt_slope; |
| 174 | |||
| 175 | /* Adaptive support. Optimizer fields. | ||
| 176 | */ | ||
| 177 | struct list_head opt_list; | ||
| 178 | fp_t opt_order; | ||
| 179 | fp_t opt_dw; | ||
| 180 | fp_t opt_nw; | ||
| 181 | unsigned int opt_level; | ||
| 182 | jiffie_t opt_change; | ||
| 175 | } task_rt_param_t; | 183 | } task_rt_param_t; |
| 176 | 184 | ||
| 177 | /* Possible RT flags */ | 185 | /* Possible RT flags */ |
| @@ -192,9 +200,17 @@ typedef struct task_rt_param { | |||
| 192 | #define set_exec_cost(t,e) (t)->rt_param.basic_params.exec_cost=(e) | 200 | #define set_exec_cost(t,e) (t)->rt_param.basic_params.exec_cost=(e) |
| 193 | #define get_partition(t) (t)->rt_param.basic_params.cpu | 201 | #define get_partition(t) (t)->rt_param.basic_params.cpu |
| 194 | #define get_deadline(t) ((t)->rt_param.times.deadline) | 202 | #define get_deadline(t) ((t)->rt_param.times.deadline) |
| 203 | #define get_last_release(t) ((t)->rt_param.times.last_release) | ||
| 195 | #define get_class(t) ((t)->rt_param.basic_params.class) | 204 | #define get_class(t) ((t)->rt_param.basic_params.class) |
| 196 | 205 | ||
| 206 | #define is_active(t) \ | ||
| 207 | (get_last_release(t) < jiffies && get_deadline(t) >= jiffies) | ||
| 208 | |||
| 197 | #define get_est_weight(t) ((t)->rt_param.predictor_state.estimate) | 209 | #define get_est_weight(t) ((t)->rt_param.predictor_state.estimate) |
| 210 | #define get_sl(t, l) \ | ||
| 211 | ((t)->rt_param.service_level[get_cur_sl(t)]) | ||
| 212 | #define get_cur_sl(t) ((t)->rt_param.cur_service_level) | ||
| 213 | #define get_max_sl(t) ((t)->rt_param.no_service_levels - 1) | ||
| 198 | 214 | ||
| 199 | #define is_realtime(t) ((t)->rt_param.is_realtime) | 215 | #define is_realtime(t) ((t)->rt_param.is_realtime) |
| 200 | #define is_subject_to_srp(t) ((t)->rt_param.subject_to_srp) | 216 | #define is_subject_to_srp(t) ((t)->rt_param.subject_to_srp) |
| @@ -208,12 +224,8 @@ typedef struct task_rt_param { | |||
| 208 | #define clear_rt_params(t) \ | 224 | #define clear_rt_params(t) \ |
| 209 | memset(&(t)->rt_param,0, sizeof(struct task_rt_param)) | 225 | memset(&(t)->rt_param,0, sizeof(struct task_rt_param)) |
| 210 | 226 | ||
| 211 | #define get_last_release_time(t) ((t)->rt_param.times.last_release) | ||
| 212 | #define set_last_release_time(t,r) ((t)->rt_param.times.last_release=(r)) | ||
| 213 | |||
| 214 | #define get_release(t) ((t)->rt_param.times.release) | 227 | #define get_release(t) ((t)->rt_param.times.release) |
| 215 | #define set_release(t,r) ((t)->rt_param.times.release=(r)) | 228 | #define set_release(t,r) ((t)->rt_param.times.release=(r)) |
| 216 | #define get_last_release(t) ((t)->rt_param.times.last_release) | ||
| 217 | 229 | ||
| 218 | /* honor the flag that is set when scheduling is in progress | 230 | /* honor the flag that is set when scheduling is in progress |
| 219 | * This is some dirty hack in Linux that creates race conditions in our code | 231 | * This is some dirty hack in Linux that creates race conditions in our code |
| @@ -253,4 +265,6 @@ memset(&(t)->rt_param,0, sizeof(struct task_rt_param)) | |||
| 253 | } while(0); | 265 | } while(0); |
| 254 | 266 | ||
| 255 | 267 | ||
| 268 | #define rt_list2task(p) list_entry(p, struct task_struct, rt_list) | ||
| 269 | |||
| 256 | #endif | 270 | #endif |
diff --git a/include/linux/sched_adaptive.h b/include/linux/sched_adaptive.h index eac2f90652..0f56a57cd3 100644 --- a/include/linux/sched_adaptive.h +++ b/include/linux/sched_adaptive.h | |||
| @@ -1,9 +1,15 @@ | |||
| 1 | #ifndef __ADAPTIVE_H__ | 1 | #ifndef __ADAPTIVE_H__ |
| 2 | #define __ADAPTIVE_H__ | 2 | #define __ADAPTIVE_H__ |
| 3 | 3 | ||
| 4 | static inline unsigned long ideal_allocation(fp_t weight, unsigned long delta_t) | 4 | static inline fp_t ideal(fp_t weight, jiffie_t delta_t) |
| 5 | { | 5 | { |
| 6 | return _floor(_mul(weight, FP(delta_t))); | 6 | return _mul(weight, FP(delta_t)); |
| 7 | } | ||
| 8 | |||
| 9 | static inline long ideal_exec_time(struct task_struct* t) | ||
| 10 | { | ||
| 11 | jiffie_t delta = jiffies - get_last_release(t); | ||
| 12 | return _round(ideal(get_est_weight(t), delta)); | ||
| 7 | } | 13 | } |
| 8 | 14 | ||
| 9 | /* this makes a whole bunch of linearity assumptions */ | 15 | /* this makes a whole bunch of linearity assumptions */ |
| @@ -16,19 +22,84 @@ static inline fp_t weight_transfer(fp_t from_val, fp_t to_val, | |||
| 16 | return _div(_mul(act_weight, rel_to), rel_from); | 22 | return _div(_mul(act_weight, rel_to), rel_from); |
| 17 | } | 23 | } |
| 18 | 24 | ||
| 19 | static inline void update_estimate(predictor_state_t *state, fp_t actual_weight, | 25 | static inline fp_t est_weight_at(struct task_struct* t, unsigned int level) |
| 20 | fp_t a, fp_t b, fp_t c) | ||
| 21 | { | 26 | { |
| 22 | fp_t err, new, delta_err; | 27 | if (t->rt_param.no_service_levels) |
| 28 | return weight_transfer(get_sl(t, get_cur_sl(t)).value, | ||
| 29 | get_sl(t, level).value, | ||
| 30 | t->rt_param.wt_slope, t->rt_param.wt_y, | ||
| 31 | get_est_weight(t)); | ||
| 32 | else | ||
| 33 | return get_est_weight(t); | ||
| 34 | |||
| 35 | } | ||
| 36 | |||
| 37 | static void update_estimate(predictor_state_t *state, fp_t actual_weight, | ||
| 38 | fp_t a, fp_t b) | ||
| 39 | { | ||
| 40 | fp_t err, new; | ||
| 23 | 41 | ||
| 24 | err = _sub(actual_weight, state->estimate); | 42 | err = _sub(actual_weight, state->estimate); |
| 25 | state->accumulated = _add(state->accumulated, err); | 43 | new = _add(_mul(a, err), |
| 26 | delta_err = _sub(err, state->error); | 44 | _mul(b, state->accumulated)); |
| 27 | new = _add(_add(_mul(a, err), | ||
| 28 | _mul(b, state->accumulated)), | ||
| 29 | _mul(c, delta_err)); | ||
| 30 | state->estimate = new; | 45 | state->estimate = new; |
| 31 | state->error = err; | 46 | state->accumulated = _add(state->accumulated, err); |
| 47 | } | ||
| 48 | |||
| 49 | static fp_t linear_metric(struct task_struct* t) | ||
| 50 | { | ||
| 51 | fp_t v1, vcur, vmax, g1, gmax; | ||
| 52 | fp_t est_w; | ||
| 53 | unsigned int l = t->rt_param.no_service_levels; | ||
| 54 | unsigned int lcur; | ||
| 55 | |||
| 56 | if (l <= 1) | ||
| 57 | return FP(0); | ||
| 58 | |||
| 59 | lcur = t->rt_param.cur_service_level; | ||
| 60 | est_w = get_est_weight(t); | ||
| 61 | v1 = t->rt_param.service_level[0].value; | ||
| 62 | vmax = t->rt_param.service_level[l - 1].value; | ||
| 63 | vcur = t->rt_param.service_level[lcur].value; | ||
| 64 | |||
| 65 | g1 = weight_transfer(vcur, v1, | ||
| 66 | t->rt_param.wt_slope, t->rt_param.wt_y, | ||
| 67 | est_w); | ||
| 68 | gmax = weight_transfer(vcur, vmax, | ||
| 69 | t->rt_param.wt_slope, t->rt_param.wt_y, | ||
| 70 | est_w); | ||
| 71 | return _div(_sub(vmax, v1), | ||
| 72 | _sub(gmax, g1)); | ||
| 73 | } | ||
| 74 | |||
| 75 | static unsigned long reweighted_deadline(fp_t ow, fp_t nw, unsigned long alloc, | ||
| 76 | jiffie_t deadline, jiffie_t release) | ||
| 77 | { | ||
| 78 | fp_t dl; | ||
| 79 | dl = _mul(FP(deadline - release), ow); | ||
| 80 | dl = _sub(dl, FP(alloc)); | ||
| 81 | dl = _div(dl, nw); | ||
| 82 | return _round(dl); | ||
| 83 | } | ||
| 84 | |||
| 85 | static inline int is_under_allocated(struct task_struct* t) | ||
| 86 | { | ||
| 87 | return ideal_exec_time(t) >= t->rt_param.times.exec_time; | ||
| 32 | } | 88 | } |
| 33 | 89 | ||
| 90 | static inline jiffie_t equal_point(struct task_struct* t) | ||
| 91 | { | ||
| 92 | return _round(_div( FP(t->rt_param.times.exec_time), | ||
| 93 | get_est_weight(t))) - | ||
| 94 | (jiffies - get_last_release(t)); | ||
| 95 | } | ||
| 96 | |||
| 97 | static jiffie_t decrease_enactment_time(struct task_struct* t) | ||
| 98 | { | ||
| 99 | if (is_active(t) && !is_under_allocated(t)) | ||
| 100 | return equal_point(t); | ||
| 101 | return 0; | ||
| 102 | } | ||
| 103 | |||
| 104 | |||
| 34 | #endif | 105 | #endif |
