aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched_adaptive.h161
-rw-r--r--kernel/sched_adaptive.c255
2 files changed, 224 insertions, 192 deletions
diff --git a/include/linux/sched_adaptive.h b/include/linux/sched_adaptive.h
deleted file mode 100644
index 81b803ad31..0000000000
--- a/include/linux/sched_adaptive.h
+++ /dev/null
@@ -1,161 +0,0 @@
1#ifndef __ADAPTIVE_H__
2#define __ADAPTIVE_H__
3
4/* All time dependent functions
5 * rely on this field.
6 * Update in the optimizer before use!
7 */
8extern jiffie_t opt_time;
9
10
11static inline fp_t ideal(fp_t weight, jiffie_t delta_t)
12{
13 return _mul(weight, FP(delta_t));
14}
15
16static noinline long ideal_exec_time(struct task_struct* t)
17{
18 jiffie_t delta = opt_time - get_last_release(t);
19 return _round(ideal(get_est_weight(t), delta));
20}
21
22/* this makes a whole bunch of linearity assumptions */
23static noinline fp_t weight_transfer(struct task_struct* t,
24 unsigned int from, unsigned int to,
25 fp_t act_weight)
26{
27 fp_t rel_from, rel_to, ret;
28 rel_from = get_sl(t, from).weight;
29 rel_to = get_sl(t, to).weight;
30 ret.val = (act_weight.val * rel_to.val) / rel_from.val;
31 /*_div(_mul(act_weight, rel_to), rel_from);*/
32 TRACE("weight_transfer(%ld, %ld, %ld) => %ld to=%u from=%u\n",
33 rel_from.val, rel_to.val, act_weight.val, ret.val, from, to);
34 return ret;
35}
36
37static noinline fp_t est_weight_at(struct task_struct* t, unsigned int level)
38{
39 if (t->rt_param.no_service_levels)
40 return weight_transfer(t, get_cur_sl(t), level,
41 get_est_weight(t));
42 else
43 return get_est_weight(t);
44
45}
46
47static noinline void update_estimate(predictor_state_t *state, fp_t actual_weight,
48 fp_t a, fp_t b)
49{
50 fp_t err, new;
51
52 TRACE("OLD ESTIMATE Weight" _FP_ " ActWt " _FP_ " A:" _FP_ ", B:" _FP_
53 "\n", fp2str(state->estimate), fp2str(actual_weight), fp2str(a),
54 fp2str(b));
55
56 err = _sub(actual_weight, state->estimate);
57 new = _add(_mul(a, err),
58 _mul(b, state->accumulated));
59 state->estimate = new;
60 state->accumulated = _add(state->accumulated, err);
61 TRACE("ERROR " _FP_ ", NEW " _FP_ ", ACC" _FP_ "\n", fp2str(err),
62 fp2str(new), fp2str(state->accumulated));
63}
64
65static noinline fp_t linear_metric(struct task_struct* t)
66{
67 fp_t v1, vmax, g1, gmax;
68 fp_t est_w;
69 unsigned int l = t->rt_param.no_service_levels;
70 unsigned int lcur;
71
72 if (l <= 1)
73 return FP(0);
74
75 lcur = get_cur_sl(t);;
76 est_w = get_est_weight(t);
77
78 TRACE_TASK(t, " linear_metric: lcur=%u l=%u est_w=" _FP_ "\n",
79 lcur, l, est_w);
80/* TRACE_TASK(t, " linear_metric: est_w.val=%ld\n", est_w.val); */
81
82
83 v1 = t->rt_param.service_level[0].value;
84 vmax = t->rt_param.service_level[l - 1].value;
85
86 TRACE_TASK(t, " linear_metric: v1=" _FP_ " vmax=" _FP_ "\n", v1, vmax);
87/* TRACE_TASK(t, " linear_metric: v1=%ld vmax=%ld\n", v1.val, vmax.val);*/
88
89
90 g1 = weight_transfer(t, lcur, 0, est_w);
91 gmax = weight_transfer(t, lcur, l - 1, est_w);
92
93 TRACE_TASK(t, " linear_metric: g1=" _FP_ " gmax=" _FP_ "\n", g1, gmax);
94/* TRACE_TASK(t, " linear_metric: g1=%ld gmax=%ld\n", g1, gmax); */
95
96
97 TRACE_BUG_ON(_eq(_sub(gmax, g1), FP(0)));
98 if (_eq(_sub(gmax, g1), FP(0)))
99 return FP(0);
100 return _div(_sub(vmax, v1),
101 _sub(gmax, g1));
102}
103
104static noinline unsigned long reweighted_period(fp_t ow, fp_t nw, unsigned long alloc,
105 jiffie_t deadline, jiffie_t release)
106{
107 fp_t dl;
108 dl = _mul(FP(deadline - release), ow);
109 dl = _sub(dl, FP(alloc));
110 dl = _div(dl, nw);
111 return _round(dl);
112}
113
114static noinline int is_under_allocated(struct task_struct* t)
115{
116 return ideal_exec_time(t) >= t->rt_param.times.exec_time;
117}
118
119static noinline jiffie_t dec_equal_point_delay(struct task_struct* t)
120{
121 if (_lt(FP(0), get_est_weight(t)))
122 /* when t was released plus time needed to equalize
123 * minus now
124 */
125 return get_last_release(t) +
126 _round(_div( FP(t->rt_param.times.exec_time),
127 get_est_weight(t))) -
128 opt_time;
129 else
130 /* if the weight is zero we just take the
131 * deadline
132 */
133 return t->rt_param.times.deadline;
134}
135
136static noinline jiffie_t inc_equal_point_delay(struct task_struct* t)
137{
138 if (_lt(FP(0), t->rt_param.opt_nw))
139 /* when t was released plus time needed to equalize
140 * minus now
141 */
142 return get_last_release(t) +
143 _round(_div( FP(t->rt_param.times.exec_time),
144 t->rt_param.opt_nw)) -
145 opt_time;
146 else
147 /* if the weight is zero we just take the
148 * deadline
149 */
150 return t->rt_param.times.deadline;
151}
152
153static noinline jiffie_t decrease_delay(struct task_struct* t)
154{
155 if (has_active_job(t) && !is_under_allocated(t))
156 return dec_equal_point_delay(t);
157 return 0;
158}
159
160
161#endif
diff --git a/kernel/sched_adaptive.c b/kernel/sched_adaptive.c
index 0d524cf258..0f00903fad 100644
--- a/kernel/sched_adaptive.c
+++ b/kernel/sched_adaptive.c
@@ -1,4 +1,5 @@
1 1
2
2/* 3/*
3 * kernel/sched_adaptive.c 4 * kernel/sched_adaptive.c
4 * 5 *
@@ -26,7 +27,6 @@
26#include <linux/sched_trace.h> 27#include <linux/sched_trace.h>
27 28
28#include <linux/fpmath.h> 29#include <linux/fpmath.h>
29#include <linux/sched_adaptive.h>
30 30
31/* Overview of GSN-EDF operations. 31/* Overview of GSN-EDF operations.
32 * 32 *
@@ -143,6 +143,179 @@ jiffie_t opt_time;
143 */ 143 */
144static LIST_HEAD(adaptive_inc_list); 144static LIST_HEAD(adaptive_inc_list);
145 145
146/* comment out to disable optimizer debugging */
147#define ENABLE_OPTIMIZER_DEBUGGING
148
149#ifdef ENABLE_OPTIMIZER_DEBUGGING
150#define OPT_DBG TRACE
151#define OPT_DBG_T TRACE_TASK
152#else
153#define OPT_DBG
154#define OPT_DBG_T OPT_D
155#endif
156
157/******************************************************************************/
158/* OPTIMIZER MATH */
159/******************************************************************************/
160
161/* All time dependent functions
162 * rely on opt_time.
163 * Update in the optimizer before use!
164 */
165
166static inline fp_t ideal(fp_t weight, jiffie_t delta_t)
167{
168 return _mul(weight, FP(delta_t));
169}
170
171static noinline long ideal_exec_time(struct task_struct* t)
172{
173 jiffie_t delta = opt_time - get_last_release(t);
174 return _round(ideal(get_est_weight(t), delta));
175}
176
177/* this makes a whole bunch of linearity assumptions */
178static noinline fp_t weight_transfer(struct task_struct* t,
179 unsigned int from, unsigned int to,
180 fp_t act_weight)
181{
182 fp_t rel_from, rel_to, ret;
183 rel_from = get_sl(t, from).weight;
184 rel_to = get_sl(t, to).weight;
185 ret.val = (act_weight.val * rel_to.val) / rel_from.val;
186 OPT_DBG("weight_transfer(%ld, %ld, %ld) => %ld to=%u from=%u\n",
187 rel_from.val, rel_to.val, act_weight.val, ret.val, from, to);
188
189 return ret;
190}
191
192static noinline fp_t est_weight_at(struct task_struct* t, unsigned int level)
193{
194 if (t->rt_param.no_service_levels)
195 return weight_transfer(t, get_cur_sl(t), level,
196 get_est_weight(t));
197 else
198 return get_est_weight(t);
199
200}
201
202static noinline void update_estimate(predictor_state_t *state, fp_t actual_weight,
203 fp_t a, fp_t b)
204{
205 fp_t err, new;
206
207 OPT_DBG("OLD ESTIMATE Weight" _FP_ " ActWt " _FP_ " A:" _FP_ ", B:" _FP_
208 "\n", fp2str(state->estimate), fp2str(actual_weight), fp2str(a),
209 fp2str(b));
210 err = _sub(actual_weight, state->estimate);
211 new = _add(_mul(a, err),
212 _mul(b, state->accumulated));
213 state->estimate = new;
214 state->accumulated = _add(state->accumulated, err);
215 OPT_DBG("ERROR " _FP_ ", NEW " _FP_ ", ACC" _FP_ "\n", fp2str(err),
216 fp2str(new), fp2str(state->accumulated));
217
218}
219
220static noinline fp_t linear_metric(struct task_struct* t)
221{
222 fp_t v1, vmax, g1, gmax;
223 fp_t est_w;
224 unsigned int l = t->rt_param.no_service_levels;
225 unsigned int lcur;
226
227 if (l <= 1)
228 return FP(0);
229
230 lcur = get_cur_sl(t);;
231 est_w = get_est_weight(t);
232
233 OPT_DBG_T(t, " linear_metric: lcur=%u l=%u est_w=" _FP_ "\n",
234 lcur, l, est_w);
235 OPT_DBG_T(t, " linear_metric: est_w.val=%ld\n", est_w.val);
236
237
238 v1 = t->rt_param.service_level[0].value;
239 vmax = t->rt_param.service_level[l - 1].value;
240
241 OPT_DBG_T(t, " linear_metric: v1=" _FP_ " vmax=" _FP_ "\n", v1, vmax);
242 OPT_DBG_T(t, " linear_metric: v1=%ld vmax=%ld\n", v1.val, vmax.val);
243
244
245 g1 = weight_transfer(t, lcur, 0, est_w);
246 gmax = weight_transfer(t, lcur, l - 1, est_w);
247
248 OPT_DBG_T(t, " linear_metric: g1=" _FP_ " gmax=" _FP_ "\n", g1, gmax);
249 OPT_DBG_T(t, " linear_metric: g1=%ld gmax=%ld\n", g1, gmax);
250
251
252 TRACE_BUG_ON(_eq(_sub(gmax, g1), FP(0)));
253 if (_eq(_sub(gmax, g1), FP(0)))
254 return FP(0);
255 return _div(_sub(vmax, v1),
256 _sub(gmax, g1));
257}
258
259static noinline unsigned long reweighted_period(fp_t ow, fp_t nw, unsigned long alloc,
260 jiffie_t deadline, jiffie_t release)
261{
262 fp_t dl;
263 dl = _mul(FP(deadline - release), ow);
264 dl = _sub(dl, FP(alloc));
265 if(_eq(nw, FP(0)))
266 return 0;
267 dl = _div(dl, nw);
268 return _round(dl);
269}
270
271static noinline int is_under_allocated(struct task_struct* t)
272{
273 return ideal_exec_time(t) >= t->rt_param.times.exec_time;
274}
275
276static noinline jiffie_t dec_equal_point_delay(struct task_struct* t)
277{
278 if (_lt(FP(0), get_est_weight(t)))
279 /* when t was released plus time needed to equalize
280 * minus now
281 */
282 return get_last_release(t) +
283 _round(_div( FP(t->rt_param.times.exec_time),
284 get_est_weight(t))) -
285 opt_time;
286 else
287 /* if the weight is zero we just take the
288 * deadline
289 */
290 return t->rt_param.times.deadline;
291}
292
293static noinline jiffie_t inc_equal_point_delay(struct task_struct* t)
294{
295 if (_lt(FP(0), t->rt_param.opt_nw))
296 /* when t was released plus time needed to equalize
297 * minus now
298 */
299 return get_last_release(t) +
300 _round(_div( FP(t->rt_param.times.exec_time),
301 t->rt_param.opt_nw)) -
302 opt_time;
303 else
304 /* if the weight is zero we just take the
305 * deadline
306 */
307 return t->rt_param.times.deadline;
308}
309
310static noinline jiffie_t decrease_delay(struct task_struct* t)
311{
312 if (has_active_job(t) && !is_under_allocated(t))
313 return dec_equal_point_delay(t);
314 return 0;
315}
316
317
318
146/******************************************************************************/ 319/******************************************************************************/
147/* SORT ORDERS */ 320/* SORT ORDERS */
148/******************************************************************************/ 321/******************************************************************************/
@@ -187,11 +360,10 @@ static void set_service_level(struct task_struct* t, unsigned int level)
187 t->rt_param.basic_params.exec_cost = _round(_mul(new->weight, 360 t->rt_param.basic_params.exec_cost = _round(_mul(new->weight,
188 FP(new->period))); 361 FP(new->period)));
189 362
190 if (t->rt_param.signal_ready) 363 scheduler_signal(t, SIGUSR1);
191 scheduler_signal(t, SIGUSR1);
192 364
193 sched_trace_service_level_change(t); 365 sched_trace_service_level_change(t);
194 TRACE_TASK(t, "service level %u activated\n", level); 366 OPT_DBG_T(t, "service level %u activated\n", level);
195} 367}
196 368
197/* call this _before_ updating deadline and release of t */ 369/* call this _before_ updating deadline and release of t */
@@ -205,16 +377,18 @@ static void update_weight_estimate(struct task_struct* t)
205 exec_time = t->rt_param.times.exec_time; 377 exec_time = t->rt_param.times.exec_time;
206 sl_period = get_sl(t, get_opt_sl(t)).period; 378 sl_period = get_sl(t, get_opt_sl(t)).period;
207 379
208 /*TRACE("ow=" _FP_ " nw=" _FP_ ", r-d " _FP_ ", deadline %d, release %d, exec_time=%ld sl_period=%lu\n", 380 OPT_DBG("ow=" _FP_ " nw=" _FP_ ", r-d " _FP_
209 fp2str(ow), fp2str(nw),fp2str(FP(get_deadline(t)-get_last_release(t))), get_deadline(t), get_last_release(t), exec_time, sl_period);*/ 381 ", deadline %d, release %d, exec_time=%ld sl_period=%lu\n",
382 fp2str(ow), fp2str(nw),
383 fp2str(FP(get_deadline(t) - get_last_release(t))),
384 get_deadline(t), get_last_release(t), exec_time, sl_period);
210 385
211 t->rt_param.predictor_state.estimate = nw; 386 t->rt_param.predictor_state.estimate = nw;
212 TRACE_TASK(t, "AAAupdate_weight_estimate from " _FP_ " to "_FP_"\n", fp2str(ow), fp2str(nw)); 387 OPT_DBG_T(t, "update_weight_estimate from " _FP_ " to "_FP_"\n", fp2str(ow), fp2str(nw));
388
213 389
214 /*_div( _max(_mul(nw, FP(sl_period)), FP(exec_time)), 390 OPT_DBG_T(t, " update_weight_estimate: " _FP_ " => " _FP_ "\n",
215 FP(get_deadline(t) - get_last_release(t))); 391 fp2str(ow), fp2str(get_est_weight(t)));
216 TRACE_TASK(t, " update_weight_estimate: " _FP_ " => " _FP_ "\n",
217 fp2str(ow), fp2str(get_est_weight(t)));*/
218} 392}
219 393
220 394
@@ -344,7 +518,7 @@ void adaptive_optimize(void)
344 unsigned int l; 518 unsigned int l;
345 jiffie_t enactment_time; 519 jiffie_t enactment_time;
346 520
347 TRACE(":::::: running adaptive optimizer\n"); 521 OPT_DBG(":::::: running adaptive optimizer\n");
348 opt_time = jiffies; 522 opt_time = jiffies;
349 523
350 INIT_LIST_HEAD(&list); 524 INIT_LIST_HEAD(&list);
@@ -364,7 +538,7 @@ void adaptive_optimize(void)
364 for_each_online_cpu(i) 538 for_each_online_cpu(i)
365 M = _add(M, FP(1)); 539 M = _add(M, FP(1));
366 _M = M; 540 _M = M;
367 TRACE("opt: system capacity: " _FP_ "\n", fp2str(M)); 541 OPT_DBG("opt: system capacity: " _FP_ "\n", fp2str(M));
368 542
369 /* 3) Compute L value for all tasks, 543 /* 3) Compute L value for all tasks,
370 * and set tasks to service level 0, 544 * and set tasks to service level 0,
@@ -378,15 +552,15 @@ void adaptive_optimize(void)
378 continue; 552 continue;
379 } 553 }
380 t->rt_param.opt_order = linear_metric(t); 554 t->rt_param.opt_order = linear_metric(t);
381 TRACE_TASK(t, "est_w = " _FP_ " L = " _FP_ "\n", 555 OPT_DBG_T(t, "est_w = " _FP_ " L = " _FP_ "\n",
382 get_est_weight(t), 556 get_est_weight(t),
383 fp2str(t->rt_param.opt_order)); 557 fp2str(t->rt_param.opt_order));
384 t->rt_param.opt_level = 0; 558 t->rt_param.opt_level = 0;
385 M = _sub(M, est_weight_at(t, 0)); 559 M = _sub(M, est_weight_at(t, 0));
386 estU = _add(estU, get_est_weight(t)); 560 estU = _add(estU, get_est_weight(t));
387 } 561 }
388 TRACE("opt: estimated utilization: " _FP_ "\n", fp2str(estU)); 562 OPT_DBG("opt: estimated utilization: " _FP_ "\n", fp2str(estU));
389 TRACE("opt: estimated capacity at all sl=0: " _FP_ "\n", fp2str(M)); 563 OPT_DBG("opt: estimated capacity at all sl=0: " _FP_ "\n", fp2str(M));
390 564
391 565
392 /* 4) sort list by decreasing linear metric */ 566 /* 4) sort list by decreasing linear metric */
@@ -411,8 +585,9 @@ void adaptive_optimize(void)
411 break; /* proceed to next task */ 585 break; /* proceed to next task */
412 } 586 }
413 } 587 }
414 TRACE_TASK(t, " will run at sl=%u, prior=%u dw=" _FP_ "\n", 588 OPT_DBG_T(t, " will run at sl=%u, prior=%u dw=" _FP_ "\n",
415 l, get_cur_sl(t), fp2str(t->rt_param.opt_dw)); 589 l, get_cur_sl(t), fp2str(t->rt_param.opt_dw));
590
416 } 591 }
417 592
418 /* 6) filter tasks that reweight */ 593 /* 6) filter tasks that reweight */
@@ -467,9 +642,10 @@ void adaptive_optimize(void)
467 /* opt_dw is negative */ 642 /* opt_dw is negative */
468 estU = _add(estU, t->rt_param.opt_dw); 643 estU = _add(estU, t->rt_param.opt_dw);
469 list_add(p, &list); 644 list_add(p, &list);
470 TRACE_TASK(t, " weight decrease at %ld => estU=" 645
471 _FP_ "\n", enactment_time, 646 OPT_DBG_T(t, " weight decrease at %ld => estU="
472 fp2str(estU)); 647 _FP_ "\n", enactment_time,
648 fp2str(estU));
473 649
474 } else 650 } else
475 /* stop decrease loop */ 651 /* stop decrease loop */
@@ -488,9 +664,11 @@ void adaptive_optimize(void)
488 t->rt_param.opt_change = enactment_time; 664 t->rt_param.opt_change = enactment_time;
489 list_del(p); 665 list_del(p);
490 list_add(p, &list); 666 list_add(p, &list);
491 TRACE_TASK(t, " weight increase at %ld => estU=" 667
668 OPT_DBG_T(t, " weight increase at %ld => estU="
492 _FP_ "\n", enactment_time, 669 _FP_ "\n", enactment_time,
493 fp2str(estU)); 670 fp2str(estU));
671
494 } else 672 } else
495 /* stop increase loop */ 673 /* stop increase loop */
496 break; 674 break;
@@ -516,7 +694,7 @@ void adaptive_optimize(void)
516 } 694 }
517 695
518 last_optimizer_run = jiffies; 696 last_optimizer_run = jiffies;
519 TRACE(":::::: optimizer run complete\n"); 697 OPT_DBG(":::::: optimizer run complete\n");
520} 698}
521 699
522/* update_cpu_position - Move the cpu entry to the correct place to maintain 700/* update_cpu_position - Move the cpu entry to the correct place to maintain
@@ -693,8 +871,10 @@ static void adaptive_job_arrival(struct task_struct* task)
693 if (edf_preemption_needed(&adaptive, last->linked)) { 871 if (edf_preemption_needed(&adaptive, last->linked)) {
694 /* preemption necessary */ 872 /* preemption necessary */
695 task = __take_ready(&adaptive); 873 task = __take_ready(&adaptive);
874
696 TRACE("job_arrival: task %d linked to %d\n", 875 TRACE("job_arrival: task %d linked to %d\n",
697 task->pid, last->cpu); 876 task->pid, last->cpu);
877
698 if (last->linked) 878 if (last->linked)
699 requeue(last->linked); 879 requeue(last->linked);
700 880
@@ -763,7 +943,9 @@ static reschedule_check_t adaptive_scheduler_tick(void)
763 943
764 /* (1) run the optimizer if it did not trigger often enough */ 944 /* (1) run the optimizer if it did not trigger often enough */
765 if (time_before_eq(last_optimizer_run + optimizer_period, jiffies)) { 945 if (time_before_eq(last_optimizer_run + optimizer_period, jiffies)) {
766 TRACE("adaptive: optimizing due to period threshold\n"); 946
947 OPT_DBG("adaptive: optimizing due to period threshold\n");
948
767 adaptive_optimize(); 949 adaptive_optimize();
768 } 950 }
769 951
@@ -788,13 +970,15 @@ static noinline void job_completion(struct task_struct *t)
788{ 970{
789 long delta; 971 long delta;
790 fp_t actual_weight, old_estimate; 972 fp_t actual_weight, old_estimate;
973 unsigned int lcurr = get_cur_sl(t);
974 fp_t v = t->rt_param.service_level[lcurr].value;
791 BUG_ON(!t); 975 BUG_ON(!t);
792 976
793 sched_trace_job_completion(t); 977 sched_trace_job_completion(t);
794 delta = t->rt_param.times.exec_time - 978 delta = t->rt_param.times.exec_time -
795 t->rt_param.basic_params.exec_cost; 979 t->rt_param.basic_params.exec_cost;
796 980
797 TRACE_TASK(t, "job %d completes, delta WCET = %d\n", 981 OPT_DBG_T(t, "job %d completes, delta WCET = %d\n",
798 t->rt_param.times.job_no, delta); 982 t->rt_param.times.job_no, delta);
799 983
800 actual_weight = _frac(t->rt_param.times.exec_time, 984 actual_weight = _frac(t->rt_param.times.exec_time,
@@ -804,13 +988,16 @@ static noinline void job_completion(struct task_struct *t)
804 update_estimate(&t->rt_param.predictor_state, actual_weight, 988 update_estimate(&t->rt_param.predictor_state, actual_weight,
805 fc_a, fc_b); 989 fc_a, fc_b);
806 990
807 TRACE_TASK(t, " weight estimation: error=" _FP_ " weight=" _FP_ " => " _FP_ "\n", 991 OPT_DBG_T(t, "Job %d completes. Current value " _FP_
992 ", Weight estimation: error=" _FP_ " weight="
993 _FP_ " => " _FP_ "\n",t->rt_param.times.job_no, v,
808 _sub(get_est_weight(t), old_estimate), 994 _sub(get_est_weight(t), old_estimate),
809 old_estimate, get_est_weight(t)); 995 old_estimate, get_est_weight(t));
810 if (_gt(_div(_abs(_sub(get_est_weight(t), old_estimate)), 996
811 get_est_weight(t)), 997 if ( (!_eq(get_est_weight(t),FP(0))) &&
812 task_error_threshold)) { 998 (_gt(_div(_abs(_sub(get_est_weight(t), old_estimate)),
813 TRACE("adaptive: optimizing due to task error threshold\n"); 999 get_est_weight(t)), task_error_threshold))) {
1000 OPT_DBG("adaptive: optimizing due to task error threshold\n");
814 adaptive_optimize(); 1001 adaptive_optimize();
815 } 1002 }
816 1003
@@ -960,6 +1147,7 @@ static void adaptive_finish_switch(struct task_struct *prev)
960static long adaptive_prepare_task(struct task_struct * t) 1147static long adaptive_prepare_task(struct task_struct * t)
961{ 1148{
962 unsigned long flags; 1149 unsigned long flags;
1150
963 TRACE("adaptive: prepare task %d\n", t->pid); 1151 TRACE("adaptive: prepare task %d\n", t->pid);
964 1152
965 if (t->state == TASK_STOPPED) { 1153 if (t->state == TASK_STOPPED) {
@@ -975,6 +1163,7 @@ static long adaptive_prepare_task(struct task_struct * t)
975 _frac(get_exec_cost(t), get_rt_period(t)); 1163 _frac(get_exec_cost(t), get_rt_period(t));
976 1164
977 TRACE_TASK(t, "est_weight=" _FP_ "\n", get_est_weight(t)); 1165 TRACE_TASK(t, "est_weight=" _FP_ "\n", get_est_weight(t));
1166
978 if (get_rt_mode() == MODE_RT_RUN) 1167 if (get_rt_mode() == MODE_RT_RUN)
979 /* The action is already on. 1168 /* The action is already on.
980 * Prepare immediate release 1169 * Prepare immediate release
@@ -1005,7 +1194,9 @@ static void adaptive_wake_up_task(struct task_struct *task)
1005 * anyway.If it has zero budget and the next release is in the future 1194 * anyway.If it has zero budget and the next release is in the future
1006 * it has to go to the release queue. 1195 * it has to go to the release queue.
1007 */ 1196 */
1197
1008 TRACE("adaptive: %d unsuspends\n", task->pid); 1198 TRACE("adaptive: %d unsuspends\n", task->pid);
1199
1009 task->state = TASK_RUNNING; 1200 task->state = TASK_RUNNING;
1010 1201
1011 if (is_tardy(task)) { 1202 if (is_tardy(task)) {
@@ -1032,7 +1223,9 @@ static void adaptive_task_blocks(struct task_struct *t)
1032 queue_unlock_irqrestore(&adaptive_lock, flags); 1223 queue_unlock_irqrestore(&adaptive_lock, flags);
1033 1224
1034 BUG_ON(!is_realtime(t)); 1225 BUG_ON(!is_realtime(t));
1226
1035 TRACE("task %d suspends\n", t->pid); 1227 TRACE("task %d suspends\n", t->pid);
1228
1036 BUG_ON(t->rt_list.next != LIST_POISON1); 1229 BUG_ON(t->rt_list.next != LIST_POISON1);
1037 BUG_ON(t->rt_list.prev != LIST_POISON2); 1230 BUG_ON(t->rt_list.prev != LIST_POISON2);
1038} 1231}