aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2007-10-09 04:03:39 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2007-10-09 04:03:39 -0400
commitbd97e173b396ee952f55e6ef01798095cc7e5c84 (patch)
treebf382d3a4204dc2991e3beae7f90b21c6dc133fc /kernel
parentceadf901822f226fc4bb03b8880efd37ad83dc79 (diff)
adaptive: increase+decrease goodness
Complete reweighting rules.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_adaptive.c181
1 files changed, 151 insertions, 30 deletions
diff --git a/kernel/sched_adaptive.c b/kernel/sched_adaptive.c
index eab569b04e..de58c0b414 100644
--- a/kernel/sched_adaptive.c
+++ b/kernel/sched_adaptive.c
@@ -11,7 +11,7 @@
11 * what is going on here. 11 * what is going on here.
12 * 12 *
13 * Block et al., "Feedback-Controlled Adaptive Multiprocessor Real-Time 13 * Block et al., "Feedback-Controlled Adaptive Multiprocessor Real-Time
14 * Systems", 2008. 14 * Systems", submitted to RTAS 2008.
15 */ 15 */
16 16
17#include <linux/percpu.h> 17#include <linux/percpu.h>
@@ -93,11 +93,8 @@
93 * __take_ready). 93 * __take_ready).
94 */ 94 */
95 95
96/* TODO: 96static void unlink(struct task_struct* t);
97 * - export weight error on task completion 97static void adaptive_job_arrival(struct task_struct* task);
98 * -
99 */
100
101 98
102/* cpu_entry_t - maintain the linked and scheduled state 99/* cpu_entry_t - maintain the linked and scheduled state
103 */ 100 */
@@ -140,6 +137,10 @@ static fp_t task_error_threshold;
140/* optimizer time snapshot */ 137/* optimizer time snapshot */
141jiffie_t opt_time; 138jiffie_t opt_time;
142 139
140/* Delayed weight increase notification list.
141 * This list gets clobbered on each optimizer run.
142 */
143static LIST_HEAD(adaptive_inc_list);
143 144
144/******************************************************************************/ 145/******************************************************************************/
145/* SORT ORDERS */ 146/* SORT ORDERS */
@@ -184,22 +185,137 @@ static void set_service_level(struct task_struct* t, unsigned int level)
184 t->rt_param.basic_params.period = new->period; 185 t->rt_param.basic_params.period = new->period;
185 t->rt_param.basic_params.exec_cost = _round(_mul(new->weight, 186 t->rt_param.basic_params.exec_cost = _round(_mul(new->weight,
186 FP(new->period))); 187 FP(new->period)));
188
189 /* TODO: post signal! */
190
187 sched_trace_service_level_change(t); 191 sched_trace_service_level_change(t);
188 TRACE_TASK(t, "service level %u activated\n", level); 192 TRACE_TASK(t, "service level %u activated\n", level);
189} 193}
190 194
191/* t is eligible for reweighting now */ 195/* call this _before_ updating deadline and release of t */
192static void enact_weight_change(struct task_struct* t) 196static void update_weight_estimate(struct task_struct* t)
193{ 197{
194 198 fp_t nw;
199 jiffie_t sl_period, exec_time;
200
201 nw = t->rt_param.opt_nw;
202 exec_time = t->rt_param.times.exec_time;
203 sl_period = get_sl(t, get_opt_sl(t)).period;
204
205 t->rt_param.predictor_state.estimate =
206 _div( _max(_mul(nw, FP(sl_period)), FP(exec_time)),
207 FP(get_deadline(t) - get_last_release(t)));
195} 208}
196 209
197/* t has to weight some, it must be halted */ 210
198static void prepare_delayed_weight_change(struct task_struct* t) 211static void decrease_weight(struct task_struct* t)
199{ 212{
213 fp_t ow, nw;
214 jiffie_t last, period, delay;
215
216 ow = get_sl(t, get_cur_sl(t)).weight;
217 nw = get_sl(t, get_opt_sl(t)).weight;
218 last = t->rt_param.times.last_release;
219 period = reweighted_period(ow, nw, t->rt_param.times.exec_time,
220 t->rt_param.times.deadline, last);
221
222 /* necessary delay has already been computed by optimizer */
223 delay = t->rt_param.opt_change;
200 224
225 update_weight_estimate(t);
226
227 if (!delay)
228 t->rt_param.times.last_release = opt_time;
229 t->rt_param.times.release = opt_time + delay;
230 t->rt_param.times.deadline = opt_time + delay + period;
231
232 set_service_level(t, get_opt_sl(t));
233
234 /* take out of queue/link structure */
235 unlink(t);
236 /* present as a new job */
237 adaptive_job_arrival(t);
238}
239
240
241static void increase_weight(struct task_struct* t)
242{
243 fp_t ow, nw;
244 jiffie_t last, period, delay;
245
246 ow = get_sl(t, get_cur_sl(t)).weight;
247 nw = get_sl(t, get_opt_sl(t)).weight;
248 last = t->rt_param.times.last_release;
249 period = reweighted_period(ow, nw, t->rt_param.times.exec_time,
250 t->rt_param.times.deadline, last);
251
252 if (t->rt_param.opt_change == 0) {
253 /* can be enacted now */
254 if (is_under_allocated(t) ||
255 time_before(opt_time + period, get_deadline(t)))
256 /* do it now */
257 delay = 0;
258 else {
259 if (is_under_allocated(t)) {
260 t->rt_param.opt_change += opt_time;
261 /* The next job release will notice that opt !=
262 * sl and initiate a weight change.
263 */
264 return;
265 } else
266 /* nope, wait for equal point */
267 delay = inc_equal_point_delay(t);
268 }
269
270 update_weight_estimate(t);
271
272 if (!delay)
273 t->rt_param.times.last_release = opt_time;
274 t->rt_param.times.release = opt_time + delay;
275 t->rt_param.times.deadline = opt_time + delay + period;
276
277 set_service_level(t, get_opt_sl(t));
278
279 /* take out of queue/link structure */
280 unlink(t);
281 /* present as a new job */
282 adaptive_job_arrival(t);
283
284 } else {
285 /* must wait until capacity is released */
286 t->rt_param.opt_change += opt_time;
287 list_insert(&t->rt_param.opt_list, &adaptive_inc_list,
288 by_enactment_time);
289 }
201} 290}
202 291
292static void delayed_increase_weight(void)
293{
294 struct list_head *p, *extra;
295 struct task_struct* t;
296
297 opt_time = jiffies;
298 list_for_each_safe(p, extra, &adaptive_inc_list) {
299 t = list_entry(p, struct task_struct, rt_param.opt_list);
300 if (time_before_eq(t->rt_param.opt_change, opt_time)) {
301 list_del(p);
302 /* prevent recursion */
303 t->rt_param.opt_change = 0;
304 /* this takes care of everything */
305 increase_weight(t);
306 } else
307 /* list is sorted */
308 break;
309 }
310}
311
312static void change_weight(struct task_struct* t)
313{
314 if (get_cur_sl(t) < get_opt_sl(t))
315 increase_weight(t);
316 else
317 decrease_weight(t);
318}
203 319
204/******************************************************************************/ 320/******************************************************************************/
205/* OPTIMIZER */ 321/* OPTIMIZER */
@@ -293,7 +409,7 @@ void adaptive_optimize(void)
293 list_del(p); 409 list_del(p);
294 if (t->rt_param.opt_level < get_cur_sl(t)) { 410 if (t->rt_param.opt_level < get_cur_sl(t)) {
295 list_add(p, &dec); 411 list_add(p, &dec);
296 t->rt_param.opt_change = decrease_enactment_time(t); 412 t->rt_param.opt_change = decrease_delay(t);
297 } else if (t->rt_param.opt_level > get_cur_sl(t)) { 413 } else if (t->rt_param.opt_level > get_cur_sl(t)) {
298 list_add(p, &inc); 414 list_add(p, &inc);
299 t->rt_param.opt_change = 0; 415 t->rt_param.opt_change = 0;
@@ -378,18 +494,11 @@ void adaptive_optimize(void)
378 * delayed. Also convert change fields to actual timestamp for to be 494 * delayed. Also convert change fields to actual timestamp for to be
379 * nice to the scheduler_tick(). 495 * nice to the scheduler_tick().
380 */ 496 */
381 list_for_each_safe(p, extra, &list) { 497 INIT_LIST_HEAD(&adaptive_inc_list);
382 498 list_for_each_safe(p, extra, &list) {
383 t = list_entry(p, struct task_struct, rt_param.opt_list); 499 t = list_entry(p, struct task_struct, rt_param.opt_list);
384 list_del(p); 500 list_del(p);
385 if (t->rt_param.opt_change == 0) 501 change_weight(t);
386 /* enact change now */
387 enact_weight_change(t);
388 else {
389 /* fixup: make it a real timestamp */
390 t->rt_param.opt_change += opt_time;
391 prepare_delayed_weight_change(t);
392 }
393 } 502 }
394 503
395 last_optimizer_run = jiffies; 504 last_optimizer_run = jiffies;
@@ -471,7 +580,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
471/* unlink - Make sure a task is not linked any longer to an entry 580/* unlink - Make sure a task is not linked any longer to an entry
472 * where it was linked before. Must hold adaptive_lock. 581 * where it was linked before. Must hold adaptive_lock.
473 */ 582 */
474static noinline void unlink(struct task_struct* t) 583static void unlink(struct task_struct* t)
475{ 584{
476 cpu_entry_t *entry; 585 cpu_entry_t *entry;
477 586
@@ -555,7 +664,7 @@ static noinline void requeue(struct task_struct* task)
555} 664}
556 665
557/* adaptive_job_arrival: task is either resumed or released */ 666/* adaptive_job_arrival: task is either resumed or released */
558static noinline void adaptive_job_arrival(struct task_struct* task) 667static void adaptive_job_arrival(struct task_struct* task)
559{ 668{
560 cpu_entry_t* last; 669 cpu_entry_t* last;
561 670
@@ -594,6 +703,19 @@ static noinline void adaptive_release_jobs(void)
594 set_rt_flags(queued, RT_F_RUNNING); 703 set_rt_flags(queued, RT_F_RUNNING);
595 queued->rt_param.times.last_release = 704 queued->rt_param.times.last_release =
596 queued->rt_param.times.release; 705 queued->rt_param.times.release;
706
707 /* check for delayed weight increase */
708 if (get_opt_sl(queued) != get_cur_sl(queued) &&
709 time_before_eq(queued->rt_param.opt_change, jiffies)) {
710 opt_time = jiffies;
711 set_service_level(queued, get_opt_sl(queued));
712 queued->rt_param.times.deadline =
713 get_last_release(queued) +
714 get_rt_period(queued);
715 queued->rt_param.predictor_state.estimate =
716 queued->rt_param.opt_nw;
717 }
718
597 sched_trace_job_release(queued); 719 sched_trace_job_release(queued);
598 adaptive_job_arrival(queued); 720 adaptive_job_arrival(queued);
599 } 721 }
@@ -629,14 +751,12 @@ static reschedule_check_t adaptive_scheduler_tick(void)
629 if (time_before_eq(last_optimizer_run + optimizer_period, jiffies)) { 751 if (time_before_eq(last_optimizer_run + optimizer_period, jiffies)) {
630 TRACE("adaptive: optimizing due to period threshold\n"); 752 TRACE("adaptive: optimizing due to period threshold\n");
631 adaptive_optimize(); 753 adaptive_optimize();
632 } else if (is_realtime(t) &&
633 get_cur_sl(t) != get_opt_sl(t) &&
634 time_before_eq(t->rt_param.opt_change, jiffies)) {
635 opt_time = jiffies;
636 enact_weight_change(t);
637 } 754 }
638 755
639 /* (2) try to release pending jobs */ 756 /* (2) enact delayed weight increases */
757 delayed_increase_weight();
758
759 /* (3) try to release pending jobs */
640 adaptive_release_jobs(); 760 adaptive_release_jobs();
641 761
642 /* we don't need to check linked != scheduled since 762 /* we don't need to check linked != scheduled since
@@ -665,6 +785,7 @@ static noinline void job_completion(struct task_struct *t)
665 785
666 actual_weight = _frac(t->rt_param.times.exec_time, 786 actual_weight = _frac(t->rt_param.times.exec_time,
667 t->rt_param.basic_params.period); 787 t->rt_param.basic_params.period);
788 sched_trace_weight_error(t, actual_weight);
668 old_estimate = get_est_weight(t); 789 old_estimate = get_est_weight(t);
669 update_estimate(&t->rt_param.predictor_state, actual_weight, 790 update_estimate(&t->rt_param.predictor_state, actual_weight,
670 fc_a, fc_b); 791 fc_a, fc_b);