aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2007-10-05 20:17:53 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2007-10-05 20:17:53 -0400
commita75df6573126387eca0e10bd30ce99c11f6a61b1 (patch)
tree4b982b3800b1b630e303cf1936c9534633de63d7 /kernel
parent7ed0f0bcab2a0bb6bfac3f19fc6495cd4bc4f9ff (diff)
Add some infrastructure for reweighting.
Track error of allocation, prepare service level changes.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_adaptive.c34
1 files changed, 26 insertions, 8 deletions
diff --git a/kernel/sched_adaptive.c b/kernel/sched_adaptive.c
index 520a290be6..16f249f03e 100644
--- a/kernel/sched_adaptive.c
+++ b/kernel/sched_adaptive.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Implementation of Aaron's adaptive global EDF scheduling algorithm. 4 * Implementation of Aaron's adaptive global EDF scheduling algorithm.
5 * 5 *
6 * This scheduler is based on the GSN-EDF scheduler for simplicity reasons. 6 * This scheduler is based on the GSN-EDF scheduler.
7 */ 7 */
8 8
9#include <linux/percpu.h> 9#include <linux/percpu.h>
@@ -114,6 +114,21 @@ static LIST_HEAD(adaptive_cpu_queue);
114static rt_domain_t adaptive; 114static rt_domain_t adaptive;
115 115
116 116
117static void set_service_level(struct task_struct* t, unsigned int level)
118{
119 BUG_ON(!t);
120 BUG_ON(t->rt_param.no_service_levels <= level);
121
122 t->rt_param.cur_service_level = level;
123 t->rt_param.basic_params.period =
124 t->rt_param.service_level[level].period;
125 t->rt_param.basic_params.exec_cost =
126 t->rt_param.service_level[level].exec_cost;
127 sched_trace_service_level_change(t);
128 TRACE_TASK(t, "service level %u activated\n", level);
129}
130
131
117/* update_cpu_position - Move the cpu entry to the correct place to maintain 132/* update_cpu_position - Move the cpu entry to the correct place to maintain
118 * order in the cpu queue. Caller must hold adaptive lock. 133 * order in the cpu queue. Caller must hold adaptive lock.
119 */ 134 */
@@ -153,9 +168,8 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
153 BUG_ON(linked && !is_realtime(linked)); 168 BUG_ON(linked && !is_realtime(linked));
154 169
155 /* Currently linked task is set to be unlinked. */ 170 /* Currently linked task is set to be unlinked. */
156 if (entry->linked) { 171 if (entry->linked)
157 entry->linked->rt_param.linked_on = NO_CPU; 172 entry->linked->rt_param.linked_on = NO_CPU;
158 }
159 173
160 /* Link new task to CPU. */ 174 /* Link new task to CPU. */
161 if (linked) { 175 if (linked) {
@@ -360,14 +374,18 @@ static reschedule_check_t adaptive_scheduler_tick(void)
360/* caller holds adaptive_lock */ 374/* caller holds adaptive_lock */
361static noinline void job_completion(struct task_struct *t) 375static noinline void job_completion(struct task_struct *t)
362{ 376{
377 long delta;
363 BUG_ON(!t); 378 BUG_ON(!t);
364 379
365 sched_trace_job_completion(t); 380 sched_trace_job_completion(t);
381 delta = t->rt_param.times.exec_time -
382 t->rt_param.basic_params.exec_cost;
383 t->rt_param.stats.accumulated_error += delta;
384
366 385
367 TRACE_TASK(t, "job %d completes, delta WCET = %d\n", 386 TRACE_TASK(t, "job %d completes, delta WCET = %d, acc. error = %ld\n",
368 t->rt_param.times.job_no, 387 t->rt_param.times.job_no, delta,
369 t->rt_param.times.exec_time - 388 t->rt_param.stats.accumulated_error);
370 t->rt_param.basic_params.exec_cost);
371 389
372 /* set flags */ 390 /* set flags */
373 set_rt_flags(t, RT_F_SLEEP); 391 set_rt_flags(t, RT_F_SLEEP);