diff options
| author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-08-20 17:28:55 -0400 |
|---|---|---|
| committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-08-27 15:07:17 -0400 |
| commit | 077aaecac31331b65442275843932314049a2ceb (patch) | |
| tree | 6b24a59521a0f5e3853b7667669b54211b03b272 | |
| parent | 9a19f35c9c287cb8abd5bcf276ae8d1a3e876907 (diff) | |
EDF priority tie-breaks.wip-robust-tie-break
Instead of tie-breaking by PID (which is a static
priority tie-break), we can tie-break by other
job-level-unique parameters. This is desirable
because tasks are equaly affected by tardiness
since static priority tie-breaks cause tasks
with greater PID values to experience the most
tardiness.
There are four tie-break methods:
1) Lateness. If two jobs, J_{1,i} and J_{2,j} of
tasks T_1 and T_2, respectively, have equal
deadlines, we favor the job of the task that
had the worst lateness for jobs J_{1,i-1} and
J_{2,j-1}.
Note: Unlike tardiness, lateness may be less than
zero. This occurs when a job finishes before its
deadline.
2) Normalized Lateness. The same as #1, except
lateness is first normalized by each task's
relative deadline. This prevents tasks with short
relative deadlines and small execution requirements
from always losing tie-breaks.
3) Hash. The job tuple (PID, Job#) is used to
generate a hash. Hash values are then compared.
A job has ~50% chance of winning a tie-break
with respect to another job.
Note: Emperical testing shows that some jobs
can have +/- ~1.5% advantage in tie-breaks.
Linux's built-in hash function is not totally
a uniform hash.
4) PIDs. PID-based tie-break used in prior
versions of Litmus.
Conflicts:
litmus/edf_common.c
| -rw-r--r-- | include/litmus/fpmath.h | 145 | ||||
| -rw-r--r-- | include/litmus/litmus.h | 2 | ||||
| -rw-r--r-- | include/litmus/rt_param.h | 6 | ||||
| -rw-r--r-- | litmus/Kconfig | 46 | ||||
| -rw-r--r-- | litmus/edf_common.c | 110 | ||||
| -rw-r--r-- | litmus/jobs.c | 8 |
6 files changed, 297 insertions, 20 deletions
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h new file mode 100644 index 000000000000..04d4bcaeae96 --- /dev/null +++ b/include/litmus/fpmath.h | |||
| @@ -0,0 +1,145 @@ | |||
| 1 | #ifndef __FP_MATH_H__ | ||
| 2 | #define __FP_MATH_H__ | ||
| 3 | |||
| 4 | #ifndef __KERNEL__ | ||
| 5 | #include <stdint.h> | ||
| 6 | #define abs(x) (((x) < 0) ? -(x) : x) | ||
| 7 | #endif | ||
| 8 | |||
| 9 | // Use 64-bit because we want to track things at the nanosecond scale. | ||
| 10 | // This can lead to very large numbers. | ||
| 11 | typedef int64_t fpbuf_t; | ||
| 12 | typedef struct | ||
| 13 | { | ||
| 14 | fpbuf_t val; | ||
| 15 | } fp_t; | ||
| 16 | |||
| 17 | #define FP_SHIFT 10 | ||
| 18 | #define ROUND_BIT (FP_SHIFT - 1) | ||
| 19 | |||
| 20 | #define _fp(x) ((fp_t) {x}) | ||
| 21 | |||
| 22 | #ifdef __KERNEL__ | ||
| 23 | static const fp_t LITMUS_FP_ZERO = {.val = 0}; | ||
| 24 | static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)}; | ||
| 25 | #endif | ||
| 26 | |||
| 27 | static inline fp_t FP(fpbuf_t x) | ||
| 28 | { | ||
| 29 | return _fp(((fpbuf_t) x) << FP_SHIFT); | ||
| 30 | } | ||
| 31 | |||
| 32 | /* divide two integers to obtain a fixed point value */ | ||
| 33 | static inline fp_t _frac(fpbuf_t a, fpbuf_t b) | ||
| 34 | { | ||
| 35 | return _fp(FP(a).val / (b)); | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline fpbuf_t _point(fp_t x) | ||
| 39 | { | ||
| 40 | return (x.val % (1 << FP_SHIFT)); | ||
| 41 | |||
| 42 | } | ||
| 43 | |||
| 44 | #define fp2str(x) x.val | ||
| 45 | /*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */ | ||
| 46 | #define _FP_ "%ld/1024" | ||
| 47 | |||
| 48 | static inline fpbuf_t _floor(fp_t x) | ||
| 49 | { | ||
| 50 | return x.val >> FP_SHIFT; | ||
| 51 | } | ||
| 52 | |||
| 53 | /* FIXME: negative rounding */ | ||
| 54 | static inline fpbuf_t _round(fp_t x) | ||
| 55 | { | ||
| 56 | return _floor(x) + ((x.val >> ROUND_BIT) & 1); | ||
| 57 | } | ||
| 58 | |||
| 59 | /* multiply two fixed point values */ | ||
| 60 | static inline fp_t _mul(fp_t a, fp_t b) | ||
| 61 | { | ||
| 62 | return _fp((a.val * b.val) >> FP_SHIFT); | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline fp_t _div(fp_t a, fp_t b) | ||
| 66 | { | ||
| 67 | #if !defined(__KERNEL__) && !defined(unlikely) | ||
| 68 | #define unlikely(x) (x) | ||
| 69 | #define DO_UNDEF_UNLIKELY | ||
| 70 | #endif | ||
| 71 | /* try not to overflow */ | ||
| 72 | if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) )) | ||
| 73 | return _fp((a.val / b.val) << FP_SHIFT); | ||
| 74 | else | ||
| 75 | return _fp((a.val << FP_SHIFT) / b.val); | ||
| 76 | #ifdef DO_UNDEF_UNLIKELY | ||
| 77 | #undef unlikely | ||
| 78 | #undef DO_UNDEF_UNLIKELY | ||
| 79 | #endif | ||
| 80 | } | ||
| 81 | |||
| 82 | static inline fp_t _add(fp_t a, fp_t b) | ||
| 83 | { | ||
| 84 | return _fp(a.val + b.val); | ||
| 85 | } | ||
| 86 | |||
| 87 | static inline fp_t _sub(fp_t a, fp_t b) | ||
| 88 | { | ||
| 89 | return _fp(a.val - b.val); | ||
| 90 | } | ||
| 91 | |||
| 92 | static inline fp_t _neg(fp_t x) | ||
| 93 | { | ||
| 94 | return _fp(-x.val); | ||
| 95 | } | ||
| 96 | |||
| 97 | static inline fp_t _abs(fp_t x) | ||
| 98 | { | ||
| 99 | return _fp(abs(x.val)); | ||
| 100 | } | ||
| 101 | |||
| 102 | /* works the same as casting float/double to integer */ | ||
| 103 | static inline fpbuf_t _fp_to_integer(fp_t x) | ||
| 104 | { | ||
| 105 | return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1); | ||
| 106 | } | ||
| 107 | |||
| 108 | static inline fp_t _integer_to_fp(fpbuf_t x) | ||
| 109 | { | ||
| 110 | return _frac(x,1); | ||
| 111 | } | ||
| 112 | |||
| 113 | static inline int _leq(fp_t a, fp_t b) | ||
| 114 | { | ||
| 115 | return a.val <= b.val; | ||
| 116 | } | ||
| 117 | |||
| 118 | static inline int _geq(fp_t a, fp_t b) | ||
| 119 | { | ||
| 120 | return a.val >= b.val; | ||
| 121 | } | ||
| 122 | |||
| 123 | static inline int _lt(fp_t a, fp_t b) | ||
| 124 | { | ||
| 125 | return a.val < b.val; | ||
| 126 | } | ||
| 127 | |||
| 128 | static inline int _gt(fp_t a, fp_t b) | ||
| 129 | { | ||
| 130 | return a.val > b.val; | ||
| 131 | } | ||
| 132 | |||
| 133 | static inline int _eq(fp_t a, fp_t b) | ||
| 134 | { | ||
| 135 | return a.val == b.val; | ||
| 136 | } | ||
| 137 | |||
| 138 | static inline fp_t _max(fp_t a, fp_t b) | ||
| 139 | { | ||
| 140 | if (a.val < b.val) | ||
| 141 | return b; | ||
| 142 | else | ||
| 143 | return a; | ||
| 144 | } | ||
| 145 | #endif | ||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 338245abd6ed..807b7888695a 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
| @@ -63,7 +63,7 @@ void litmus_exit_task(struct task_struct *tsk); | |||
| 63 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | 63 | #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) |
| 64 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | 64 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) |
| 65 | #define get_release(t) (tsk_rt(t)->job_params.release) | 65 | #define get_release(t) (tsk_rt(t)->job_params.release) |
| 66 | 66 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) | |
| 67 | 67 | ||
| 68 | #define is_hrt(t) \ | 68 | #define is_hrt(t) \ |
| 69 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) | 69 | (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 89ac0dda7d3d..fac939dbd33a 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
| @@ -110,6 +110,12 @@ struct rt_job { | |||
| 110 | /* How much service has this job received so far? */ | 110 | /* How much service has this job received so far? */ |
| 111 | lt_t exec_time; | 111 | lt_t exec_time; |
| 112 | 112 | ||
| 113 | /* By how much did the prior job miss its deadline by? | ||
| 114 | * Value differs from tardiness in that lateness may | ||
| 115 | * be negative (when job finishes before its deadline). | ||
| 116 | */ | ||
| 117 | long long lateness; | ||
| 118 | |||
| 113 | /* Which job is this. This is used to let user space | 119 | /* Which job is this. This is used to let user space |
| 114 | * specify which job to wait for, which is important if jobs | 120 | * specify which job to wait for, which is important if jobs |
| 115 | * overrun. If we just call sys_sleep_next_period() then we | 121 | * overrun. If we just call sys_sleep_next_period() then we |
diff --git a/litmus/Kconfig b/litmus/Kconfig index 68459d4dca41..48ff3e3c657c 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
| @@ -79,6 +79,52 @@ config SCHED_CPU_AFFINITY | |||
| 79 | 79 | ||
| 80 | Say Yes if unsure. | 80 | Say Yes if unsure. |
| 81 | 81 | ||
| 82 | choice | ||
| 83 | prompt "EDF Tie-Break Behavior" | ||
| 84 | default EDF_TIE_BREAK_LATENESS_NORM | ||
| 85 | help | ||
| 86 | Allows the configuration of tie-breaking behavior when the deadlines | ||
| 87 | of two EDF-scheduled tasks are equal. | ||
| 88 | |||
| 89 | config EDF_TIE_BREAK_LATENESS | ||
| 90 | bool "Lateness-based Tie Break" | ||
| 91 | help | ||
| 92 | Break ties between to jobs, A and B, based upon the lateness of their | ||
| 93 | prior jobs. The job with the greatest lateness has priority. Note that | ||
| 94 | lateness has a negative value if the prior job finished before its | ||
| 95 | deadline. | ||
| 96 | |||
| 97 | config EDF_TIE_BREAK_LATENESS_NORM | ||
| 98 | bool "Normalized Lateness-based Tie Break" | ||
| 99 | help | ||
| 100 | Break ties between to jobs, A and B, based upon the lateness, normalized | ||
| 101 | by relative deadline, their prior jobs. The job with the greatest | ||
| 102 | normalized lateness has priority. Note that lateness has a negative value | ||
| 103 | if the prior job finished before its deadline. | ||
| 104 | |||
| 105 | Normalized lateness tie-breaks are likely desireable over non-normalized | ||
| 106 | tie-breaks if the execution times and/or relative deadlines of tasks in a | ||
| 107 | task set vary greatly. | ||
| 108 | |||
| 109 | config EDF_TIE_BREAK_HASH | ||
| 110 | bool "Hash-based Tie Breaks" | ||
| 111 | help | ||
| 112 | Break ties between two jobs, A and B, with equal deadlines by using a | ||
| 113 | uniform hash; i.e.: hash(A.pid, A.job_num) < hash(B.pid, B.job_num). Job | ||
| 114 | A has ~50% of winning a given tie-break. | ||
| 115 | |||
| 116 | config EDF_PID_TIE_BREAK | ||
| 117 | bool "PID-based Tie Breaks" | ||
| 118 | help | ||
| 119 | Break ties based upon OS-assigned process IDs. Use this option if | ||
| 120 | required by algorithm's real-time analysis or per-task response-time | ||
| 121 | jitter must be minimized in overload conditions. | ||
| 122 | |||
| 123 | NOTES: | ||
| 124 | * This tie-breaking method was default in Litmus 2012.2 and before. | ||
| 125 | |||
| 126 | endchoice | ||
| 127 | |||
| 82 | endmenu | 128 | endmenu |
| 83 | 129 | ||
| 84 | menu "Tracing" | 130 | menu "Tracing" |
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 668737f0fbf9..52205df3ea8b 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
| @@ -14,6 +14,32 @@ | |||
| 14 | 14 | ||
| 15 | #include <litmus/edf_common.h> | 15 | #include <litmus/edf_common.h> |
| 16 | 16 | ||
| 17 | #ifdef CONFIG_EDF_TIE_BREAK_LATENESS_NORM | ||
| 18 | #include <litmus/fpmath.h> | ||
| 19 | #endif | ||
| 20 | |||
| 21 | #ifdef CONFIG_EDF_TIE_BREAK_HASH | ||
| 22 | #include <linux/hash.h> | ||
| 23 | static inline long edf_hash(struct task_struct *t) | ||
| 24 | { | ||
| 25 | /* pid is 32 bits, so normally we would shove that into the | ||
| 26 | * upper 32-bits and and put the job number in the bottom | ||
| 27 | * and hash the 64-bit number with hash_64(). Sadly, | ||
| 28 | * in testing, hash_64() doesn't distribute keys were the | ||
| 29 | * upper bits are close together (as would be the case with | ||
| 30 | * pids) and job numbers are equal (as would be the case with | ||
| 31 | * synchronous task sets with all relative deadlines equal). | ||
| 32 | * | ||
| 33 | * A 2006 Linux patch proposed the following solution | ||
| 34 | * (but for some reason it wasn't accepted...). | ||
| 35 | * | ||
| 36 | * At least this workaround works for 32-bit systems as well. | ||
| 37 | */ | ||
| 38 | return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32); | ||
| 39 | } | ||
| 40 | #endif | ||
| 41 | |||
| 42 | |||
| 17 | /* edf_higher_prio - returns true if first has a higher EDF priority | 43 | /* edf_higher_prio - returns true if first has a higher EDF priority |
| 18 | * than second. Deadline ties are broken by PID. | 44 | * than second. Deadline ties are broken by PID. |
| 19 | * | 45 | * |
| @@ -63,32 +89,78 @@ int edf_higher_prio(struct task_struct* first, | |||
| 63 | 89 | ||
| 64 | #endif | 90 | #endif |
| 65 | 91 | ||
| 66 | /* Determine the task with earliest deadline, with | 92 | if (earlier_deadline(first_task, second_task)) { |
| 67 | * tie-break logic. | ||
| 68 | */ | ||
| 69 | if (unlikely(!is_realtime(second_task))) { | ||
| 70 | return 1; | ||
| 71 | } | ||
| 72 | else if (earlier_deadline(first_task, second_task)) { | ||
| 73 | /* Is the deadline of the first task earlier? | ||
| 74 | * Then it has higher priority. | ||
| 75 | */ | ||
| 76 | return 1; | 93 | return 1; |
| 77 | } | 94 | } |
| 78 | else if (get_deadline(first_task) == get_deadline(second_task)) { | 95 | else if (get_deadline(first_task) == get_deadline(second_task)) { |
| 79 | /* Need to tie break */ | 96 | /* Need to tie break. All methods must set pid_break to 0/1 if |
| 80 | 97 | * first_task does not have priority over second_task. | |
| 81 | /* Tie break by pid */ | 98 | */ |
| 82 | if (first_task->pid < second_task->pid) { | 99 | int pid_break; |
| 100 | |||
| 101 | |||
| 102 | #if defined(CONFIG_EDF_TIE_BREAK_LATENESS) | ||
| 103 | /* Tie break by lateness. Jobs with greater lateness get | ||
| 104 | * priority. This should spread tardiness across all tasks, | ||
| 105 | * especially in task sets where all tasks have the same | ||
| 106 | * period and relative deadlines. | ||
| 107 | */ | ||
| 108 | if (get_lateness(first_task) > get_lateness(second_task)) { | ||
| 83 | return 1; | 109 | return 1; |
| 84 | } | 110 | } |
| 85 | else if (first_task->pid == second_task->pid) { | 111 | pid_break = (get_lateness(first_task) == get_lateness(second_task)); |
| 86 | /* If the PIDs are the same then the task with the | 112 | |
| 87 | * inherited priority wins. | 113 | |
| 88 | */ | 114 | #elif defined(CONFIG_EDF_TIE_BREAK_LATENESS_NORM) |
| 89 | if (!second_task->rt_param.inh_task) { | 115 | /* Tie break by lateness, normalized by relative deadline. Jobs with |
| 116 | * greater normalized lateness get priority. | ||
| 117 | * | ||
| 118 | * Note: Considered using the algebraically equivalent | ||
| 119 | * lateness(first)*relative_deadline(second) > | ||
| 120 | lateness(second)*relative_deadline(first) | ||
| 121 | * to avoid fixed-point math, but values are prone to overflow if inputs | ||
| 122 | * are on the order of several seconds, even in 64-bit. | ||
| 123 | */ | ||
| 124 | fp_t fnorm = _frac(get_lateness(first_task), | ||
| 125 | get_rt_relative_deadline(first_task)); | ||
| 126 | fp_t snorm = _frac(get_lateness(second_task), | ||
| 127 | get_rt_relative_deadline(second_task)); | ||
| 128 | if (_gt(fnorm, snorm)) { | ||
| 129 | return 1; | ||
| 130 | } | ||
| 131 | pid_break = _eq(fnorm, snorm); | ||
| 132 | |||
| 133 | |||
| 134 | #elif defined(CONFIG_EDF_TIE_BREAK_HASH) | ||
| 135 | /* Tie break by comparing hashs of (pid, job#) tuple. There should be | ||
| 136 | * a 50% chance that first_task has a higher priority than second_task. | ||
| 137 | */ | ||
| 138 | long fhash = edf_hash(first_task); | ||
| 139 | long shash = edf_hash(second_task); | ||
| 140 | if (fhash < shash) { | ||
| 141 | return 1; | ||
| 142 | } | ||
| 143 | pid_break = (fhash == shash); | ||
| 144 | #else | ||
| 145 | |||
| 146 | |||
| 147 | /* CONFIG_EDF_PID_TIE_BREAK */ | ||
| 148 | pid_break = 1; // fall through to tie-break by pid; | ||
| 149 | #endif | ||
| 150 | |||
| 151 | /* Tie break by pid */ | ||
| 152 | if(pid_break) { | ||
| 153 | if (first_task->pid < second_task->pid) { | ||
| 90 | return 1; | 154 | return 1; |
| 91 | } | 155 | } |
| 156 | else if (first_task->pid == second_task->pid) { | ||
| 157 | /* If the PIDs are the same then the task with the | ||
| 158 | * inherited priority wins. | ||
| 159 | */ | ||
| 160 | if (!second_task->rt_param.inh_task) { | ||
| 161 | return 1; | ||
| 162 | } | ||
| 163 | } | ||
| 92 | } | 164 | } |
| 93 | } | 165 | } |
| 94 | return 0; /* fall-through. prio(second_task) > prio(first_task) */ | 166 | return 0; /* fall-through. prio(second_task) > prio(first_task) */ |
diff --git a/litmus/jobs.c b/litmus/jobs.c index bc8246572e54..fb093c03d53d 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
| @@ -23,6 +23,14 @@ static inline void setup_release(struct task_struct *t, lt_t release) | |||
| 23 | void prepare_for_next_period(struct task_struct *t) | 23 | void prepare_for_next_period(struct task_struct *t) |
| 24 | { | 24 | { |
| 25 | BUG_ON(!t); | 25 | BUG_ON(!t); |
| 26 | |||
| 27 | /* Record lateness before we set up the next job's | ||
| 28 | * release and deadline. Lateness may be negative. | ||
| 29 | */ | ||
| 30 | t->rt_param.job_params.lateness = | ||
| 31 | (long long)litmus_clock() - | ||
| 32 | (long long)t->rt_param.job_params.deadline; | ||
| 33 | |||
| 26 | setup_release(t, get_release(t) + get_rt_period(t)); | 34 | setup_release(t, get_release(t) + get_rt_period(t)); |
| 27 | } | 35 | } |
| 28 | 36 | ||
