diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-10-19 17:04:28 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-10-19 17:04:28 -0400 |
commit | 33fad37e4f1b023ddfea00c066b529dc670de684 (patch) | |
tree | 269412dc9299ca2ace3bead6aa65db57e2cf2ed8 | |
parent | b5a6bb4e6f094fe3e080d95ca5f69d640dbd7cef (diff) |
seems to be working like this
-rw-r--r-- | include/linux/sched_trace.h | 10 | ||||
-rw-r--r-- | kernel/rt_domain.c | 4 | ||||
-rw-r--r-- | kernel/sched_adaptive.c | 137 | ||||
-rw-r--r-- | kernel/sched_trace.c | 11 |
4 files changed, 144 insertions, 18 deletions
diff --git a/include/linux/sched_trace.h b/include/linux/sched_trace.h index 88d19e2903..308cc7dd8d 100644 --- a/include/linux/sched_trace.h +++ b/include/linux/sched_trace.h | |||
@@ -75,6 +75,7 @@ typedef struct { | |||
75 | u16 period; | 75 | u16 period; |
76 | u16 wcet; | 76 | u16 wcet; |
77 | int tardiness; | 77 | int tardiness; |
78 | unsigned int job_no; | ||
78 | } completion_record_t; | 79 | } completion_record_t; |
79 | 80 | ||
80 | typedef struct { | 81 | typedef struct { |
@@ -93,7 +94,10 @@ typedef struct { | |||
93 | typedef struct { | 94 | typedef struct { |
94 | trace_header_t header; | 95 | trace_header_t header; |
95 | task_info_t task; | 96 | task_info_t task; |
97 | unsigned int from:16; | ||
98 | unsigned int to:16; | ||
96 | service_level_t new_level; | 99 | service_level_t new_level; |
100 | service_level_t old_level; | ||
97 | } service_level_change_record_t; | 101 | } service_level_change_record_t; |
98 | 102 | ||
99 | typedef struct { | 103 | typedef struct { |
@@ -134,7 +138,9 @@ void sched_trace_server_completion(int id, unsigned int budget, | |||
134 | void sched_trace_server_scheduled(int id, task_class_t class, | 138 | void sched_trace_server_scheduled(int id, task_class_t class, |
135 | unsigned int budget, jiffie_t deadline); | 139 | unsigned int budget, jiffie_t deadline); |
136 | 140 | ||
137 | void sched_trace_service_level_change(struct task_struct* t); | 141 | void sched_trace_service_level_change(struct task_struct* t, |
142 | unsigned int from, | ||
143 | unsigned int to); | ||
138 | 144 | ||
139 | void sched_trace_weight_error(struct task_struct* t, fp_t actual); | 145 | void sched_trace_weight_error(struct task_struct* t, fp_t actual); |
140 | 146 | ||
@@ -155,7 +161,7 @@ void sched_trace_weight_error(struct task_struct* t, fp_t actual); | |||
155 | #define sched_trace_server_completion(id, budget, deadline, class) | 161 | #define sched_trace_server_completion(id, budget, deadline, class) |
156 | #define sched_trace_server_scheduled(id, class, budget, deadline) | 162 | #define sched_trace_server_scheduled(id, class, budget, deadline) |
157 | 163 | ||
158 | #define sched_trace_service_level_change(t) | 164 | #define sched_trace_service_level_change(t, a, b) |
159 | 165 | ||
160 | #define sched_trace_weight_error(x, y) | 166 | #define sched_trace_weight_error(x, y) |
161 | 167 | ||
diff --git a/kernel/rt_domain.c b/kernel/rt_domain.c index 53c5346411..4875c53076 100644 --- a/kernel/rt_domain.c +++ b/kernel/rt_domain.c | |||
@@ -98,6 +98,10 @@ struct task_struct* __take_ready_rq(rt_domain_t* rt, runqueue_t* rq, int cpu) | |||
98 | */ | 98 | */ |
99 | void __add_release(rt_domain_t* rt, struct task_struct *task) | 99 | void __add_release(rt_domain_t* rt, struct task_struct *task) |
100 | { | 100 | { |
101 | TRACE("rt: adding %s/%d (%u, %u) rel=%d to release queue\n", | ||
102 | task->comm, task->pid, get_exec_cost(task), get_rt_period(task), | ||
103 | get_release(task)); | ||
104 | |||
101 | list_insert(&task->rt_list, &rt->release_queue, release_order); | 105 | list_insert(&task->rt_list, &rt->release_queue, release_order); |
102 | } | 106 | } |
103 | 107 | ||
diff --git a/kernel/sched_adaptive.c b/kernel/sched_adaptive.c index 0f00903fad..1dd2b7763c 100644 --- a/kernel/sched_adaptive.c +++ b/kernel/sched_adaptive.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/sched_plugin.h> | 25 | #include <linux/sched_plugin.h> |
26 | #include <linux/edf_common.h> | 26 | #include <linux/edf_common.h> |
27 | #include <linux/sched_trace.h> | 27 | #include <linux/sched_trace.h> |
28 | #include <asm/uaccess.h> | ||
28 | 29 | ||
29 | #include <linux/fpmath.h> | 30 | #include <linux/fpmath.h> |
30 | 31 | ||
@@ -132,9 +133,14 @@ static fp_t fc_a, fc_b; | |||
132 | 133 | ||
133 | /* optimizer trigger */ | 134 | /* optimizer trigger */ |
134 | static jiffie_t last_optimizer_run; | 135 | static jiffie_t last_optimizer_run; |
136 | static jiffie_t optimizer_min_invocation_sep; | ||
135 | static jiffie_t optimizer_period; | 137 | static jiffie_t optimizer_period; |
136 | static fp_t task_error_threshold; | 138 | static fp_t task_error_threshold; |
137 | 139 | ||
140 | static fp_t system_capacity; | ||
141 | /* total actual weight of the task system */ | ||
142 | static fp_t total_weight; | ||
143 | |||
138 | /* optimizer time snapshot */ | 144 | /* optimizer time snapshot */ |
139 | jiffie_t opt_time; | 145 | jiffie_t opt_time; |
140 | 146 | ||
@@ -256,8 +262,10 @@ static noinline fp_t linear_metric(struct task_struct* t) | |||
256 | _sub(gmax, g1)); | 262 | _sub(gmax, g1)); |
257 | } | 263 | } |
258 | 264 | ||
259 | static noinline unsigned long reweighted_period(fp_t ow, fp_t nw, unsigned long alloc, | 265 | static noinline unsigned long reweighted_period(fp_t ow, fp_t nw, |
260 | jiffie_t deadline, jiffie_t release) | 266 | unsigned long alloc, |
267 | jiffie_t deadline, | ||
268 | jiffie_t release) | ||
261 | { | 269 | { |
262 | fp_t dl; | 270 | fp_t dl; |
263 | dl = _mul(FP(deadline - release), ow); | 271 | dl = _mul(FP(deadline - release), ow); |
@@ -351,9 +359,11 @@ static int by_enactment_time(struct list_head* a, struct list_head* b) | |||
351 | static void set_service_level(struct task_struct* t, unsigned int level) | 359 | static void set_service_level(struct task_struct* t, unsigned int level) |
352 | { | 360 | { |
353 | service_level_t *new; | 361 | service_level_t *new; |
362 | unsigned int old; | ||
354 | BUG_ON(!t); | 363 | BUG_ON(!t); |
355 | BUG_ON(t->rt_param.no_service_levels <= level); | 364 | BUG_ON(t->rt_param.no_service_levels <= level); |
356 | 365 | ||
366 | old = t->rt_param.cur_service_level; | ||
357 | t->rt_param.cur_service_level = level; | 367 | t->rt_param.cur_service_level = level; |
358 | new = t->rt_param.service_level + level; | 368 | new = t->rt_param.service_level + level; |
359 | t->rt_param.basic_params.period = new->period; | 369 | t->rt_param.basic_params.period = new->period; |
@@ -362,7 +372,7 @@ static void set_service_level(struct task_struct* t, unsigned int level) | |||
362 | 372 | ||
363 | scheduler_signal(t, SIGUSR1); | 373 | scheduler_signal(t, SIGUSR1); |
364 | 374 | ||
365 | sched_trace_service_level_change(t); | 375 | sched_trace_service_level_change(t, old, level); |
366 | OPT_DBG_T(t, "service level %u activated\n", level); | 376 | OPT_DBG_T(t, "service level %u activated\n", level); |
367 | } | 377 | } |
368 | 378 | ||
@@ -384,7 +394,8 @@ static void update_weight_estimate(struct task_struct* t) | |||
384 | get_deadline(t), get_last_release(t), exec_time, sl_period); | 394 | get_deadline(t), get_last_release(t), exec_time, sl_period); |
385 | 395 | ||
386 | t->rt_param.predictor_state.estimate = nw; | 396 | t->rt_param.predictor_state.estimate = nw; |
387 | OPT_DBG_T(t, "update_weight_estimate from " _FP_ " to "_FP_"\n", fp2str(ow), fp2str(nw)); | 397 | OPT_DBG_T(t, "update_weight_estimate from " _FP_ " to "_FP_"\n", |
398 | fp2str(ow), fp2str(nw)); | ||
388 | 399 | ||
389 | 400 | ||
390 | OPT_DBG_T(t, " update_weight_estimate: " _FP_ " => " _FP_ "\n", | 401 | OPT_DBG_T(t, " update_weight_estimate: " _FP_ " => " _FP_ "\n", |
@@ -499,6 +510,10 @@ static void change_weight(struct task_struct* t) | |||
499 | increase_weight(t); | 510 | increase_weight(t); |
500 | else | 511 | else |
501 | decrease_weight(t); | 512 | decrease_weight(t); |
513 | OPT_DBG_T(t, "after change_weight: last_rel:%d rel:%d dl:%d\n", | ||
514 | get_last_release(t), | ||
515 | get_release(t), | ||
516 | get_deadline(t)); | ||
502 | } | 517 | } |
503 | 518 | ||
504 | /******************************************************************************/ | 519 | /******************************************************************************/ |
@@ -518,6 +533,10 @@ void adaptive_optimize(void) | |||
518 | unsigned int l; | 533 | unsigned int l; |
519 | jiffie_t enactment_time; | 534 | jiffie_t enactment_time; |
520 | 535 | ||
536 | if (time_before(jiffies, | ||
537 | last_optimizer_run + optimizer_min_invocation_sep)) | ||
538 | return; | ||
539 | |||
521 | OPT_DBG(":::::: running adaptive optimizer\n"); | 540 | OPT_DBG(":::::: running adaptive optimizer\n"); |
522 | opt_time = jiffies; | 541 | opt_time = jiffies; |
523 | 542 | ||
@@ -535,8 +554,7 @@ void adaptive_optimize(void) | |||
535 | } | 554 | } |
536 | 555 | ||
537 | /* 2) determine current system capacity */ | 556 | /* 2) determine current system capacity */ |
538 | for_each_online_cpu(i) | 557 | M = system_capacity; |
539 | M = _add(M, FP(1)); | ||
540 | _M = M; | 558 | _M = M; |
541 | OPT_DBG("opt: system capacity: " _FP_ "\n", fp2str(M)); | 559 | OPT_DBG("opt: system capacity: " _FP_ "\n", fp2str(M)); |
542 | 560 | ||
@@ -863,6 +881,12 @@ static void adaptive_job_arrival(struct task_struct* task) | |||
863 | BUG_ON(list_empty(&adaptive_cpu_queue)); | 881 | BUG_ON(list_empty(&adaptive_cpu_queue)); |
864 | BUG_ON(!task); | 882 | BUG_ON(!task); |
865 | 883 | ||
884 | TRACE_TASK(task, "job_arrival: last_rel=%d rel=%d dl=%d now=%d\n", | ||
885 | get_last_release(task), get_release(task), | ||
886 | get_deadline(task), | ||
887 | jiffies); | ||
888 | |||
889 | |||
866 | /* first queue arriving job */ | 890 | /* first queue arriving job */ |
867 | requeue(task); | 891 | requeue(task); |
868 | 892 | ||
@@ -892,6 +916,8 @@ static noinline void adaptive_release_jobs(void) | |||
892 | list_for_each_safe(pos, save, &adaptive.release_queue) { | 916 | list_for_each_safe(pos, save, &adaptive.release_queue) { |
893 | queued = list_entry(pos, struct task_struct, rt_list); | 917 | queued = list_entry(pos, struct task_struct, rt_list); |
894 | if (likely(is_released(queued))) { | 918 | if (likely(is_released(queued))) { |
919 | TRACE_TASK(queued, "released rel=%d now=%d\n", | ||
920 | get_release(queued), jiffies); | ||
895 | /* this one is ready to go*/ | 921 | /* this one is ready to go*/ |
896 | list_del(pos); | 922 | list_del(pos); |
897 | set_rt_flags(queued, RT_F_RUNNING); | 923 | set_rt_flags(queued, RT_F_RUNNING); |
@@ -972,8 +998,18 @@ static noinline void job_completion(struct task_struct *t) | |||
972 | fp_t actual_weight, old_estimate; | 998 | fp_t actual_weight, old_estimate; |
973 | unsigned int lcurr = get_cur_sl(t); | 999 | unsigned int lcurr = get_cur_sl(t); |
974 | fp_t v = t->rt_param.service_level[lcurr].value; | 1000 | fp_t v = t->rt_param.service_level[lcurr].value; |
1001 | |||
1002 | int non_zero_weight; | ||
1003 | fp_t error_percentage; | ||
1004 | int exceeds_threshold; | ||
1005 | |||
975 | BUG_ON(!t); | 1006 | BUG_ON(!t); |
976 | 1007 | ||
1008 | TRACE_TASK(t, " completion, last_rel=%d rel=%d dl=%d now=%d " | ||
1009 | "period=%d\n", | ||
1010 | get_last_release(t), get_release(t), get_deadline(t), | ||
1011 | jiffies, get_rt_period(t)); | ||
1012 | |||
977 | sched_trace_job_completion(t); | 1013 | sched_trace_job_completion(t); |
978 | delta = t->rt_param.times.exec_time - | 1014 | delta = t->rt_param.times.exec_time - |
979 | t->rt_param.basic_params.exec_cost; | 1015 | t->rt_param.basic_params.exec_cost; |
@@ -993,18 +1029,45 @@ static noinline void job_completion(struct task_struct *t) | |||
993 | _FP_ " => " _FP_ "\n",t->rt_param.times.job_no, v, | 1029 | _FP_ " => " _FP_ "\n",t->rt_param.times.job_no, v, |
994 | _sub(get_est_weight(t), old_estimate), | 1030 | _sub(get_est_weight(t), old_estimate), |
995 | old_estimate, get_est_weight(t)); | 1031 | old_estimate, get_est_weight(t)); |
996 | 1032 | ||
997 | if ( (!_eq(get_est_weight(t),FP(0))) && | 1033 | /* Now we have determined the task error. |
998 | (_gt(_div(_abs(_sub(get_est_weight(t), old_estimate)), | 1034 | * Next we release the next job. |
999 | get_est_weight(t)), task_error_threshold))) { | 1035 | * Then we optimize. It's easier for the optimizer to deal |
1036 | * with just-released jobs. | ||
1037 | */ | ||
1038 | |||
1039 | /* prepare for next period */ | ||
1040 | edf_prepare_for_next_period(t); | ||
1041 | |||
1042 | TRACE_TASK(t, " prepped, last_rel=%d rel=%d dl=%d now=%d\n", | ||
1043 | get_last_release(t), get_release(t), get_deadline(t), | ||
1044 | jiffies); | ||
1045 | |||
1046 | if (is_released(t)) { | ||
1047 | /* set flags */ | ||
1048 | /* prevent fake completions */ | ||
1049 | set_rt_flags(t, RT_F_RUNNING); | ||
1050 | t->rt_param.times.last_release = | ||
1051 | t->rt_param.times.release; | ||
1052 | } | ||
1053 | |||
1054 | |||
1055 | non_zero_weight = !_eq(get_est_weight(t),FP(0)); | ||
1056 | if (non_zero_weight) | ||
1057 | error_percentage = _div(_abs(_sub(get_est_weight(t), | ||
1058 | old_estimate)), | ||
1059 | get_est_weight(t)); | ||
1060 | else | ||
1061 | error_percentage = FP(0); | ||
1062 | exceeds_threshold = _gt(error_percentage, task_error_threshold); | ||
1063 | |||
1064 | |||
1065 | if (exceeds_threshold) { | ||
1000 | OPT_DBG("adaptive: optimizing due to task error threshold\n"); | 1066 | OPT_DBG("adaptive: optimizing due to task error threshold\n"); |
1001 | adaptive_optimize(); | 1067 | adaptive_optimize(); |
1002 | } | 1068 | } |
1003 | 1069 | ||
1004 | /* set flags */ | 1070 | |
1005 | set_rt_flags(t, RT_F_SLEEP); | ||
1006 | /* prepare for next period */ | ||
1007 | edf_prepare_for_next_period(t); | ||
1008 | /* unlink */ | 1071 | /* unlink */ |
1009 | unlink(t); | 1072 | unlink(t); |
1010 | /* requeue | 1073 | /* requeue |
@@ -1250,12 +1313,27 @@ static int adaptive_mode_change(int new_mode) | |||
1250 | unsigned long flags; | 1313 | unsigned long flags; |
1251 | int cpu; | 1314 | int cpu; |
1252 | cpu_entry_t *entry; | 1315 | cpu_entry_t *entry; |
1316 | struct task_struct* t; | ||
1317 | struct list_head* pos; | ||
1253 | 1318 | ||
1254 | if (new_mode == MODE_RT_RUN) { | 1319 | if (new_mode == MODE_RT_RUN) { |
1255 | queue_lock_irqsave(&adaptive_lock, flags); | 1320 | queue_lock_irqsave(&adaptive_lock, flags); |
1256 | 1321 | ||
1322 | system_capacity = FP(0); | ||
1323 | for_each_online_cpu(cpu) | ||
1324 | system_capacity = _add(system_capacity, FP(1)); | ||
1325 | |||
1257 | __rerelease_all(&adaptive, edf_release_at); | 1326 | __rerelease_all(&adaptive, edf_release_at); |
1258 | 1327 | ||
1328 | total_weight = FP(0); | ||
1329 | list_for_each(pos, &adaptive.release_queue) { | ||
1330 | t = list_entry(pos, struct task_struct, rt_list); | ||
1331 | total_weight = _add(total_weight, get_est_weight(t)); | ||
1332 | } | ||
1333 | TRACE("adaptive: total weight: " _FP_ | ||
1334 | " (at mode change)\n", total_weight); | ||
1335 | |||
1336 | |||
1259 | /* get old cruft out of the way in case we reenter real-time | 1337 | /* get old cruft out of the way in case we reenter real-time |
1260 | * mode for a second time | 1338 | * mode for a second time |
1261 | */ | 1339 | */ |
@@ -1279,6 +1357,35 @@ static int adaptive_mode_change(int new_mode) | |||
1279 | } | 1357 | } |
1280 | 1358 | ||
1281 | 1359 | ||
1360 | typedef enum { | ||
1361 | ADAPTIVE_SET_MIN_OPT_SEP = 1 | ||
1362 | } adaptive_cmds_t; | ||
1363 | |||
1364 | |||
1365 | static int adaptive_setup(int cmd, void __user *up) | ||
1366 | { | ||
1367 | unsigned int error = -EINVAL; | ||
1368 | unsigned int val; | ||
1369 | |||
1370 | if (copy_from_user(&val, up, sizeof(unsigned int))) { | ||
1371 | error = -EFAULT; | ||
1372 | goto out; | ||
1373 | } | ||
1374 | |||
1375 | switch (cmd) { | ||
1376 | case ADAPTIVE_SET_MIN_OPT_SEP: | ||
1377 | optimizer_min_invocation_sep = val; | ||
1378 | TRACE("adaptive: min opt sep set to %d\n", | ||
1379 | optimizer_min_invocation_sep); | ||
1380 | return 0; | ||
1381 | break; | ||
1382 | } | ||
1383 | |||
1384 | out: | ||
1385 | return error; | ||
1386 | } | ||
1387 | |||
1388 | |||
1282 | /* Plugin object */ | 1389 | /* Plugin object */ |
1283 | static sched_plugin_t s_plugin __cacheline_aligned_in_smp = { | 1390 | static sched_plugin_t s_plugin __cacheline_aligned_in_smp = { |
1284 | .ready_to_use = 0 | 1391 | .ready_to_use = 0 |
@@ -1300,6 +1407,7 @@ static sched_plugin_t s_plugin __cacheline_aligned_in_smp = { | |||
1300 | .mode_change = adaptive_mode_change, \ | 1407 | .mode_change = adaptive_mode_change, \ |
1301 | .wake_up_task = adaptive_wake_up_task, \ | 1408 | .wake_up_task = adaptive_wake_up_task, \ |
1302 | .task_blocks = adaptive_task_blocks, \ | 1409 | .task_blocks = adaptive_task_blocks, \ |
1410 | .scheduler_setup = adaptive_setup \ | ||
1303 | } | 1411 | } |
1304 | 1412 | ||
1305 | 1413 | ||
@@ -1313,6 +1421,7 @@ sched_plugin_t *__init init_adaptive_plugin(void) | |||
1313 | fc_b = _frac( 303, 1000); | 1421 | fc_b = _frac( 303, 1000); |
1314 | 1422 | ||
1315 | optimizer_period = 1000; | 1423 | optimizer_period = 1000; |
1424 | optimizer_min_invocation_sep = 50; | ||
1316 | task_error_threshold = _frac(1, 2); | 1425 | task_error_threshold = _frac(1, 2); |
1317 | 1426 | ||
1318 | if (!s_plugin.ready_to_use) | 1427 | if (!s_plugin.ready_to_use) |
diff --git a/kernel/sched_trace.c b/kernel/sched_trace.c index d41c6e6b74..4cfe0c4e63 100644 --- a/kernel/sched_trace.c +++ b/kernel/sched_trace.c | |||
@@ -635,6 +635,7 @@ void sched_trace_job_completion(struct task_struct *t) | |||
635 | tinfo(rec.task, t); | 635 | tinfo(rec.task, t); |
636 | rtinfo(rec, t); | 636 | rtinfo(rec, t); |
637 | rec.tardiness = jiffies - t->rt_param.times.deadline; | 637 | rec.tardiness = jiffies - t->rt_param.times.deadline; |
638 | rec.job_no = t->rt_param.times.job_no; | ||
638 | TRACE_TASK(t, "AAATardiness : %d\n", rec.tardiness); | 639 | TRACE_TASK(t, "AAATardiness : %d\n", rec.tardiness); |
639 | put_trace(rec); | 640 | put_trace(rec); |
640 | } | 641 | } |
@@ -724,13 +725,19 @@ void sched_trace_capacity_alloc_srv(pid_t srv, u32 srv_dl, task_class_t cls, | |||
724 | put_trace(rec); | 725 | put_trace(rec); |
725 | } | 726 | } |
726 | 727 | ||
727 | void sched_trace_service_level_change(struct task_struct *t) | 728 | void sched_trace_service_level_change(struct task_struct *t, |
729 | unsigned int from, | ||
730 | unsigned int to) | ||
728 | { | 731 | { |
729 | service_level_change_record_t rec; | 732 | service_level_change_record_t rec; |
730 | header(rec, ST_SERVICE_LEVEL_CHANGE); | 733 | header(rec, ST_SERVICE_LEVEL_CHANGE); |
731 | tinfo(rec.task, t); | 734 | tinfo(rec.task, t); |
735 | rec.to = to; | ||
736 | rec.from = from; | ||
732 | rec.new_level = | 737 | rec.new_level = |
733 | t->rt_param.service_level[t->rt_param.cur_service_level]; | 738 | t->rt_param.service_level[to]; |
739 | rec.old_level = | ||
740 | t->rt_param.service_level[from]; | ||
734 | put_trace(rec); | 741 | put_trace(rec); |
735 | } | 742 | } |
736 | 743 | ||