aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2007-10-08 20:24:20 -0400
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2007-10-08 20:24:20 -0400
commit729c2d3b077bf90d1c4e32dcb047e25cf5d9f684 (patch)
tree496c57a3b51e287cf29d7381aced70a43ddc5a90 /kernel
parent0b57fb8bf75f9ab8e8378238b71f3c983afc5fa5 (diff)
adaptive: debugging
Diffstat (limited to 'kernel')
-rw-r--r--kernel/litmus.c44
-rw-r--r--kernel/sched_adaptive.c52
2 files changed, 69 insertions, 27 deletions
diff --git a/kernel/litmus.c b/kernel/litmus.c
index 131fc5039e..e837850998 100644
--- a/kernel/litmus.c
+++ b/kernel/litmus.c
@@ -218,12 +218,6 @@ asmlinkage long sys_set_service_levels(pid_t pid,
218 target->rt_param.service_level = NULL; 218 target->rt_param.service_level = NULL;
219 target->rt_param.no_service_levels = 0; 219 target->rt_param.no_service_levels = 0;
220 220
221 /* count == 0 means tear down service levels*/
222 if (count == 0) {
223 retval = 0;
224 goto out;
225 }
226
227 if (wt_y && wt_slope) { 221 if (wt_y && wt_slope) {
228 if (copy_from_user(&target->rt_param.wt_y, wt_y, 222 if (copy_from_user(&target->rt_param.wt_y, wt_y,
229 sizeof(fp_t))) { 223 sizeof(fp_t))) {
@@ -238,7 +232,7 @@ asmlinkage long sys_set_service_levels(pid_t pid,
238 target->rt_param.wt_slope = FP(1); 232 target->rt_param.wt_slope = FP(1);
239 goto out; 233 goto out;
240 } 234 }
241 if (_leq(target->rt_param.wt_y, FP(0)) || 235 if (_lt(target->rt_param.wt_y, FP(0)) ||
242 _leq(target->rt_param.wt_slope, FP(0))) { 236 _leq(target->rt_param.wt_slope, FP(0))) {
243 retval = -EINVAL; 237 retval = -EINVAL;
244 target->rt_param.wt_y = FP(0); 238 target->rt_param.wt_y = FP(0);
@@ -250,6 +244,15 @@ asmlinkage long sys_set_service_levels(pid_t pid,
250 target->rt_param.wt_slope = FP(1); 244 target->rt_param.wt_slope = FP(1);
251 } 245 }
252 246
247 TRACE_TASK(target, "set slope=" _FP_ ", y=" _FP_ "\n",
248 target->rt_param.wt_slope, target->rt_param.wt_y);
249
250 /* count == 0 means tear down service levels*/
251 if (count == 0) {
252 retval = 0;
253 goto out;
254 }
255
253 klevels = kmalloc(sizeof(service_level_t) * count, GFP_KERNEL); 256 klevels = kmalloc(sizeof(service_level_t) * count, GFP_KERNEL);
254 if (!klevels) { 257 if (!klevels) {
255 retval = -ENOMEM; 258 retval = -ENOMEM;
@@ -262,12 +265,18 @@ asmlinkage long sys_set_service_levels(pid_t pid,
262 kfree(klevels); 265 kfree(klevels);
263 goto out; 266 goto out;
264 } 267 }
265 if (level.period <= 0) 268 if (level.period <= 0) {
269 TRACE("service level %d period <= 0\n", i);
266 goto out; 270 goto out;
267 if (_leq(level.weight, last_weight)) 271 }
272 if (_leq(level.weight, last_weight)) {
273 TRACE("service level %d weight non-increase\n", i);
268 goto out; 274 goto out;
269 if (_leq(level.value, last_value)) 275 }
276 if (_leq(level.value, last_value)) {
277 TRACE("service level %d value non-increase\n", i);
270 goto out; 278 goto out;
279 }
271 last_value = level.value; 280 last_value = level.value;
272 last_weight = level.weight; 281 last_weight = level.weight;
273 klevels[i] = level; 282 klevels[i] = level;
@@ -679,6 +688,7 @@ void exit_litmus(struct task_struct *dead_tsk)
679 curr_sched_plugin->tear_down(dead_tsk); 688 curr_sched_plugin->tear_down(dead_tsk);
680} 689}
681 690
691
682void list_qsort(struct list_head* list, list_cmp_t less_than) 692void list_qsort(struct list_head* list, list_cmp_t less_than)
683{ 693{
684 struct list_head lt; 694 struct list_head lt;
@@ -694,6 +704,7 @@ void list_qsort(struct list_head* list, list_cmp_t less_than)
694 INIT_LIST_HEAD(&geq); 704 INIT_LIST_HEAD(&geq);
695 705
696 pivot = list->next; 706 pivot = list->next;
707 list_del(pivot);
697 list_for_each_safe(pos, extra, list) { 708 list_for_each_safe(pos, extra, list) {
698 list_del(pos); 709 list_del(pos);
699 if (less_than(pos, pivot)) { 710 if (less_than(pos, pivot)) {
@@ -705,17 +716,14 @@ void list_qsort(struct list_head* list, list_cmp_t less_than)
705 } 716 }
706 } 717 }
707 if (n_lt < n_geq) { 718 if (n_lt < n_geq) {
708 if (n_lt > 1) 719 list_qsort(&lt, less_than);
709 list_qsort(&lt, less_than); 720 list_qsort(&geq, less_than);
710 if (n_geq > 1)
711 list_qsort(&geq, less_than);
712 } else { 721 } else {
713 if (n_geq > 1) 722 list_qsort(&geq, less_than);
714 list_qsort(&geq, less_than); 723 list_qsort(&lt, less_than);
715 if (n_lt > 1)
716 list_qsort(&lt, less_than);
717 } 724 }
718 list_splice(&geq, list); 725 list_splice(&geq, list);
726 list_add(pivot, list);
719 list_splice(&lt, list); 727 list_splice(&lt, list);
720} 728}
721 729
diff --git a/kernel/sched_adaptive.c b/kernel/sched_adaptive.c
index a91e607105..24095e02d8 100644
--- a/kernel/sched_adaptive.c
+++ b/kernel/sched_adaptive.c
@@ -93,6 +93,11 @@
93 * __take_ready). 93 * __take_ready).
94 */ 94 */
95 95
96/* TODO:
97 * - export weight error on task completion
98 * -
99 */
100
96 101
97/* cpu_entry_t - maintain the linked and scheduled state 102/* cpu_entry_t - maintain the linked and scheduled state
98 */ 103 */
@@ -208,12 +213,12 @@ void adaptive_optimize(void)
208 struct list_head *p, *extra; 213 struct list_head *p, *extra;
209 cpu_entry_t *cpu; 214 cpu_entry_t *cpu;
210 struct task_struct* t; 215 struct task_struct* t;
211 fp_t M = FP(0), w0, wl, new_M, estU = FP(0), _M; 216 fp_t M = FP(0), w0, wl, tmp, estU = FP(0), _M;
212 int i; 217 int i;
213 unsigned int l; 218 unsigned int l;
214 jiffie_t enactment_time; 219 jiffie_t enactment_time;
215 220
216 TRACE("running adaptive optimizer\n"); 221 TRACE(":::::: running adaptive optimizer\n");
217 opt_time = jiffies; 222 opt_time = jiffies;
218 223
219 INIT_LIST_HEAD(&list); 224 INIT_LIST_HEAD(&list);
@@ -233,6 +238,7 @@ void adaptive_optimize(void)
233 for_each_online_cpu(i) 238 for_each_online_cpu(i)
234 M = _add(M, FP(1)); 239 M = _add(M, FP(1));
235 _M = M; 240 _M = M;
241 TRACE("opt: system capacity: " _FP_ "\n", fp2str(M));
236 242
237 /* 3) Compute L value for all tasks, 243 /* 3) Compute L value for all tasks,
238 * and set tasks to service level 0, 244 * and set tasks to service level 0,
@@ -241,11 +247,17 @@ void adaptive_optimize(void)
241 */ 247 */
242 list_for_each(p, &list) { 248 list_for_each(p, &list) {
243 t = list_entry(p, struct task_struct, rt_param.opt_list); 249 t = list_entry(p, struct task_struct, rt_param.opt_list);
244 t->rt_param.opt_order = linear_metric(t); 250 t->rt_param.opt_order = linear_metric(t);
251 TRACE_TASK(t, "est_w = " _FP_ " L = " _FP_ "\n",
252 get_est_weight(t),
253 fp2str(t->rt_param.opt_order));
245 t->rt_param.opt_level = 0; 254 t->rt_param.opt_level = 0;
246 M = _sub(M, est_weight_at(t, 0)); 255 M = _sub(M, est_weight_at(t, 0));
247 estU = _add(estU, get_est_weight(t)); 256 estU = _add(estU, get_est_weight(t));
248 } 257 }
258 TRACE("opt: estimated utilization: " _FP_ "\n", fp2str(estU));
259 TRACE("opt: estimated capacity at all sl=0: " _FP_ "\n", fp2str(M));
260
249 261
250 /* 4) sort list by decreasing linear metric */ 262 /* 4) sort list by decreasing linear metric */
251 list_qsort(&list, by_linear_metric); 263 list_qsort(&list, by_linear_metric);
@@ -258,13 +270,18 @@ void adaptive_optimize(void)
258 while (l > 1) { 270 while (l > 1) {
259 l--; 271 l--;
260 wl = est_weight_at(t, l); 272 wl = est_weight_at(t, l);
261 new_M = _sub(M, _sub(wl, w0)); 273 tmp = _sub(M, _sub(wl, w0));
262 if (_leq(FP(0), new_M)) { 274 if (_leq(FP(0), tmp)) {
263 /* this level fits in */ 275 /* this level fits in */
264 M = new_M; 276 M = tmp;
265 t->rt_param.opt_level = l; 277 t->rt_param.opt_level = l;
266 t->rt_param.opt_dw = _sub(get_est_weight(t), wl); 278 t->rt_param.opt_dw = _sub(wl,
279 get_est_weight(t));
267 t->rt_param.opt_nw = wl; 280 t->rt_param.opt_nw = wl;
281 TRACE_TASK(t, " will run at sl=%u, "
282 "prior=%ud dw=" _FP_ "\n",
283 l, get_cur_sl(t),
284 fp2str(t->rt_param.opt_dw));
268 break; /* proceed to next task */ 285 break; /* proceed to next task */
269 } 286 }
270 } 287 }
@@ -301,6 +318,7 @@ void adaptive_optimize(void)
301 /* Very ugly jump, but we need to force enactment_time = 0 318 /* Very ugly jump, but we need to force enactment_time = 0
302 * during the first iteration. 319 * during the first iteration.
303 */ 320 */
321 M = _M;
304 enactment_time = 0; 322 enactment_time = 0;
305 goto first_iteration; 323 goto first_iteration;
306 324
@@ -321,6 +339,10 @@ void adaptive_optimize(void)
321 /* opt_dw is negative */ 339 /* opt_dw is negative */
322 estU = _add(estU, t->rt_param.opt_dw); 340 estU = _add(estU, t->rt_param.opt_dw);
323 list_add(p, &list); 341 list_add(p, &list);
342 TRACE_TASK(t, " weight decrease at %ld => estU="
343 _FP_ "\n", enactment_time,
344 fp2str(estU));
345
324 } else 346 } else
325 /* stop decrease loop */ 347 /* stop decrease loop */
326 break; 348 break;
@@ -331,12 +353,16 @@ void adaptive_optimize(void)
331 p = inc.next; 353 p = inc.next;
332 t = list_entry(p, struct task_struct, 354 t = list_entry(p, struct task_struct,
333 rt_param.opt_list); 355 rt_param.opt_list);
334 if (_leq(_add(estU, t->rt_param.opt_dw), M)) { 356 tmp = _add(estU, t->rt_param.opt_dw)
357 if (_leq(tmp, M)) {
335 /* it fits */ 358 /* it fits */
336 estU = _add(estU, t->rt_param.opt_dw); 359 estU = tmp;
337 t->rt_param.opt_change = enactment_time; 360 t->rt_param.opt_change = enactment_time;
338 list_del(p); 361 list_del(p);
339 list_add(p, &list); 362 list_add(p, &list);
363 TRACE_TASK(t, " weight increase at %ld => estU="
364 _FP_ "\n", enactment_time,
365 fp2str(estU));
340 } else 366 } else
341 /* stop increase loop */ 367 /* stop increase loop */
342 break; 368 break;
@@ -369,6 +395,7 @@ void adaptive_optimize(void)
369 } 395 }
370 396
371 last_optimizer_run = jiffies; 397 last_optimizer_run = jiffies;
398
372} 399}
373 400
374/* update_cpu_position - Move the cpu entry to the correct place to maintain 401/* update_cpu_position - Move the cpu entry to the correct place to maintain
@@ -800,6 +827,13 @@ static long adaptive_prepare_task(struct task_struct * t)
800 827
801 t->rt_param.scheduled_on = NO_CPU; 828 t->rt_param.scheduled_on = NO_CPU;
802 t->rt_param.linked_on = NO_CPU; 829 t->rt_param.linked_on = NO_CPU;
830 if (t->rt_param.no_service_levels) {
831 t->rt_param.predictor_state.estimate =
832 get_sl(t, 0).weight;
833 } else
834 t->rt_param.predictor_state.estimate =
835 _frac(get_exec_cost(t), get_rt_period(t));
836
803 if (get_rt_mode() == MODE_RT_RUN) 837 if (get_rt_mode() == MODE_RT_RUN)
804 /* The action is already on. 838 /* The action is already on.
805 * Prepare immediate release 839 * Prepare immediate release