diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-10-07 02:05:22 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-10-07 02:05:22 -0400 |
commit | dec97cac61f768324aab0df12146f34ae59e3db0 (patch) | |
tree | 363f27f33c3df3296ca3ea207eb0574f941009d5 /kernel | |
parent | c642a99878f107a81278506619502e1764dd431a (diff) |
adaptive: introduce optimizer hooks
Predict task weight, introduce optimizer run.
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_adaptive.c | 34 |
1 files changed, 33 insertions, 1 deletions
diff --git a/kernel/sched_adaptive.c b/kernel/sched_adaptive.c index 6756bef0bf..f17807d31d 100644 --- a/kernel/sched_adaptive.c +++ b/kernel/sched_adaptive.c | |||
@@ -116,6 +116,13 @@ static LIST_HEAD(adaptive_cpu_queue); | |||
116 | 116 | ||
117 | static rt_domain_t adaptive; | 117 | static rt_domain_t adaptive; |
118 | 118 | ||
119 | /* feedback control parameters */ | ||
120 | static fp_t fc_a, fc_b, fc_c; | ||
121 | |||
122 | /* optimizer trigger */ | ||
123 | static jiffie_t last_optimizer_run; | ||
124 | static jiffie_t optimizer_period; | ||
125 | static fp_t task_error_threshold; | ||
119 | 126 | ||
120 | static void set_service_level(struct task_struct* t, unsigned int level) | 127 | static void set_service_level(struct task_struct* t, unsigned int level) |
121 | { | 128 | { |
@@ -132,6 +139,10 @@ static void set_service_level(struct task_struct* t, unsigned int level) | |||
132 | TRACE_TASK(t, "service level %u activated\n", level); | 139 | TRACE_TASK(t, "service level %u activated\n", level); |
133 | } | 140 | } |
134 | 141 | ||
142 | void adaptive_optimize(void) | ||
143 | { | ||
144 | last_optimizer_run = jiffies; | ||
145 | } | ||
135 | 146 | ||
136 | /* update_cpu_position - Move the cpu entry to the correct place to maintain | 147 | /* update_cpu_position - Move the cpu entry to the correct place to maintain |
137 | * order in the cpu queue. Caller must hold adaptive lock. | 148 | * order in the cpu queue. Caller must hold adaptive lock. |
@@ -362,7 +373,11 @@ static reschedule_check_t adaptive_scheduler_tick(void) | |||
362 | if (get_rt_mode() == MODE_RT_RUN && smp_processor_id() == 0) { | 373 | if (get_rt_mode() == MODE_RT_RUN && smp_processor_id() == 0) { |
363 | queue_lock_irqsave(&adaptive_lock, flags); | 374 | queue_lock_irqsave(&adaptive_lock, flags); |
364 | 375 | ||
365 | /* (1) try to release pending jobs */ | 376 | /* (1) run the optimizer if it did not trigger often enough */ |
377 | if (time_before_eq(last_optimizer_run + optimizer_period, jiffies)) | ||
378 | adaptive_optimize(); | ||
379 | |||
380 | /* (2) try to release pending jobs */ | ||
366 | adaptive_release_jobs(); | 381 | adaptive_release_jobs(); |
367 | 382 | ||
368 | /* we don't need to check linked != scheduled since | 383 | /* we don't need to check linked != scheduled since |
@@ -379,12 +394,20 @@ static reschedule_check_t adaptive_scheduler_tick(void) | |||
379 | static noinline void job_completion(struct task_struct *t) | 394 | static noinline void job_completion(struct task_struct *t) |
380 | { | 395 | { |
381 | long delta; | 396 | long delta; |
397 | fp_t actual_weight; | ||
382 | BUG_ON(!t); | 398 | BUG_ON(!t); |
383 | 399 | ||
384 | sched_trace_job_completion(t); | 400 | sched_trace_job_completion(t); |
385 | delta = t->rt_param.times.exec_time - | 401 | delta = t->rt_param.times.exec_time - |
386 | t->rt_param.basic_params.exec_cost; | 402 | t->rt_param.basic_params.exec_cost; |
387 | 403 | ||
404 | actual_weight = _frac(t->rt_param.times.exec_time, | ||
405 | t->rt_param.basic_params.period); | ||
406 | update_estimate(&t->rt_param.predictor_state, actual_weight, | ||
407 | fc_a, fc_b, fc_c); | ||
408 | if (_gt(t->rt_param.predictor_state.error, task_error_threshold)) | ||
409 | adaptive_optimize(); | ||
410 | |||
388 | TRACE_TASK(t, "job %d completes, delta WCET = %d\n", | 411 | TRACE_TASK(t, "job %d completes, delta WCET = %d\n", |
389 | t->rt_param.times.job_no, delta); | 412 | t->rt_param.times.job_no, delta); |
390 | 413 | ||
@@ -754,6 +777,8 @@ static int adaptive_mode_change(int new_mode) | |||
754 | list_add(&entry->list, &adaptive_cpu_queue); | 777 | list_add(&entry->list, &adaptive_cpu_queue); |
755 | } | 778 | } |
756 | 779 | ||
780 | adaptive_optimize(); | ||
781 | |||
757 | queue_unlock_irqrestore(&adaptive_lock, flags); | 782 | queue_unlock_irqrestore(&adaptive_lock, flags); |
758 | 783 | ||
759 | } | 784 | } |
@@ -793,6 +818,13 @@ sched_plugin_t *__init init_adaptive_plugin(void) | |||
793 | int cpu; | 818 | int cpu; |
794 | cpu_entry_t *entry; | 819 | cpu_entry_t *entry; |
795 | 820 | ||
821 | fc_a = FP(1); | ||
822 | fc_b = FP(1); | ||
823 | fc_c = FP(1); | ||
824 | |||
825 | optimizer_period = 250; | ||
826 | task_error_threshold = _frac(1, 2); | ||
827 | |||
796 | if (!s_plugin.ready_to_use) | 828 | if (!s_plugin.ready_to_use) |
797 | { | 829 | { |
798 | /* initialize CPU state */ | 830 | /* initialize CPU state */ |