From 077aaecac31331b65442275843932314049a2ceb Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Mon, 20 Aug 2012 17:28:55 -0400 Subject: EDF priority tie-breaks. Instead of tie-breaking by PID (which is a static priority tie-break), we can tie-break by other job-level-unique parameters. This is desirable because tasks are equaly affected by tardiness since static priority tie-breaks cause tasks with greater PID values to experience the most tardiness. There are four tie-break methods: 1) Lateness. If two jobs, J_{1,i} and J_{2,j} of tasks T_1 and T_2, respectively, have equal deadlines, we favor the job of the task that had the worst lateness for jobs J_{1,i-1} and J_{2,j-1}. Note: Unlike tardiness, lateness may be less than zero. This occurs when a job finishes before its deadline. 2) Normalized Lateness. The same as #1, except lateness is first normalized by each task's relative deadline. This prevents tasks with short relative deadlines and small execution requirements from always losing tie-breaks. 3) Hash. The job tuple (PID, Job#) is used to generate a hash. Hash values are then compared. A job has ~50% chance of winning a tie-break with respect to another job. Note: Emperical testing shows that some jobs can have +/- ~1.5% advantage in tie-breaks. Linux's built-in hash function is not totally a uniform hash. 4) PIDs. PID-based tie-break used in prior versions of Litmus. Conflicts: litmus/edf_common.c --- include/litmus/fpmath.h | 145 ++++++++++++++++++++++++++++++++++++++++++++++ include/litmus/litmus.h | 2 +- include/litmus/rt_param.h | 6 ++ litmus/Kconfig | 46 +++++++++++++++ litmus/edf_common.c | 110 +++++++++++++++++++++++++++++------ litmus/jobs.c | 8 +++ 6 files changed, 297 insertions(+), 20 deletions(-) create mode 100644 include/litmus/fpmath.h diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h new file mode 100644 index 000000000000..04d4bcaeae96 --- /dev/null +++ b/include/litmus/fpmath.h @@ -0,0 +1,145 @@ +#ifndef __FP_MATH_H__ +#define __FP_MATH_H__ + +#ifndef __KERNEL__ +#include +#define abs(x) (((x) < 0) ? -(x) : x) +#endif + +// Use 64-bit because we want to track things at the nanosecond scale. +// This can lead to very large numbers. +typedef int64_t fpbuf_t; +typedef struct +{ + fpbuf_t val; +} fp_t; + +#define FP_SHIFT 10 +#define ROUND_BIT (FP_SHIFT - 1) + +#define _fp(x) ((fp_t) {x}) + +#ifdef __KERNEL__ +static const fp_t LITMUS_FP_ZERO = {.val = 0}; +static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)}; +#endif + +static inline fp_t FP(fpbuf_t x) +{ + return _fp(((fpbuf_t) x) << FP_SHIFT); +} + +/* divide two integers to obtain a fixed point value */ +static inline fp_t _frac(fpbuf_t a, fpbuf_t b) +{ + return _fp(FP(a).val / (b)); +} + +static inline fpbuf_t _point(fp_t x) +{ + return (x.val % (1 << FP_SHIFT)); + +} + +#define fp2str(x) x.val +/*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */ +#define _FP_ "%ld/1024" + +static inline fpbuf_t _floor(fp_t x) +{ + return x.val >> FP_SHIFT; +} + +/* FIXME: negative rounding */ +static inline fpbuf_t _round(fp_t x) +{ + return _floor(x) + ((x.val >> ROUND_BIT) & 1); +} + +/* multiply two fixed point values */ +static inline fp_t _mul(fp_t a, fp_t b) +{ + return _fp((a.val * b.val) >> FP_SHIFT); +} + +static inline fp_t _div(fp_t a, fp_t b) +{ +#if !defined(__KERNEL__) && !defined(unlikely) +#define unlikely(x) (x) +#define DO_UNDEF_UNLIKELY +#endif + /* try not to overflow */ + if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) )) + return _fp((a.val / b.val) << FP_SHIFT); + else + return _fp((a.val << FP_SHIFT) / b.val); +#ifdef DO_UNDEF_UNLIKELY +#undef unlikely +#undef DO_UNDEF_UNLIKELY +#endif +} + +static inline fp_t _add(fp_t a, fp_t b) +{ + return _fp(a.val + b.val); +} + +static inline fp_t _sub(fp_t a, fp_t b) +{ + return _fp(a.val - b.val); +} + +static inline fp_t _neg(fp_t x) +{ + return _fp(-x.val); +} + +static inline fp_t _abs(fp_t x) +{ + return _fp(abs(x.val)); +} + +/* works the same as casting float/double to integer */ +static inline fpbuf_t _fp_to_integer(fp_t x) +{ + return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1); +} + +static inline fp_t _integer_to_fp(fpbuf_t x) +{ + return _frac(x,1); +} + +static inline int _leq(fp_t a, fp_t b) +{ + return a.val <= b.val; +} + +static inline int _geq(fp_t a, fp_t b) +{ + return a.val >= b.val; +} + +static inline int _lt(fp_t a, fp_t b) +{ + return a.val < b.val; +} + +static inline int _gt(fp_t a, fp_t b) +{ + return a.val > b.val; +} + +static inline int _eq(fp_t a, fp_t b) +{ + return a.val == b.val; +} + +static inline fp_t _max(fp_t a, fp_t b) +{ + if (a.val < b.val) + return b; + else + return a; +} +#endif diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 338245abd6ed..807b7888695a 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -63,7 +63,7 @@ void litmus_exit_task(struct task_struct *tsk); #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) #define get_deadline(t) (tsk_rt(t)->job_params.deadline) #define get_release(t) (tsk_rt(t)->job_params.release) - +#define get_lateness(t) (tsk_rt(t)->job_params.lateness) #define is_hrt(t) \ (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 89ac0dda7d3d..fac939dbd33a 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -110,6 +110,12 @@ struct rt_job { /* How much service has this job received so far? */ lt_t exec_time; + /* By how much did the prior job miss its deadline by? + * Value differs from tardiness in that lateness may + * be negative (when job finishes before its deadline). + */ + long long lateness; + /* Which job is this. This is used to let user space * specify which job to wait for, which is important if jobs * overrun. If we just call sys_sleep_next_period() then we diff --git a/litmus/Kconfig b/litmus/Kconfig index 68459d4dca41..48ff3e3c657c 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig @@ -79,6 +79,52 @@ config SCHED_CPU_AFFINITY Say Yes if unsure. +choice + prompt "EDF Tie-Break Behavior" + default EDF_TIE_BREAK_LATENESS_NORM + help + Allows the configuration of tie-breaking behavior when the deadlines + of two EDF-scheduled tasks are equal. + + config EDF_TIE_BREAK_LATENESS + bool "Lateness-based Tie Break" + help + Break ties between to jobs, A and B, based upon the lateness of their + prior jobs. The job with the greatest lateness has priority. Note that + lateness has a negative value if the prior job finished before its + deadline. + + config EDF_TIE_BREAK_LATENESS_NORM + bool "Normalized Lateness-based Tie Break" + help + Break ties between to jobs, A and B, based upon the lateness, normalized + by relative deadline, their prior jobs. The job with the greatest + normalized lateness has priority. Note that lateness has a negative value + if the prior job finished before its deadline. + + Normalized lateness tie-breaks are likely desireable over non-normalized + tie-breaks if the execution times and/or relative deadlines of tasks in a + task set vary greatly. + + config EDF_TIE_BREAK_HASH + bool "Hash-based Tie Breaks" + help + Break ties between two jobs, A and B, with equal deadlines by using a + uniform hash; i.e.: hash(A.pid, A.job_num) < hash(B.pid, B.job_num). Job + A has ~50% of winning a given tie-break. + + config EDF_PID_TIE_BREAK + bool "PID-based Tie Breaks" + help + Break ties based upon OS-assigned process IDs. Use this option if + required by algorithm's real-time analysis or per-task response-time + jitter must be minimized in overload conditions. + + NOTES: + * This tie-breaking method was default in Litmus 2012.2 and before. + +endchoice + endmenu menu "Tracing" diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 668737f0fbf9..52205df3ea8b 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c @@ -14,6 +14,32 @@ #include +#ifdef CONFIG_EDF_TIE_BREAK_LATENESS_NORM +#include +#endif + +#ifdef CONFIG_EDF_TIE_BREAK_HASH +#include +static inline long edf_hash(struct task_struct *t) +{ + /* pid is 32 bits, so normally we would shove that into the + * upper 32-bits and and put the job number in the bottom + * and hash the 64-bit number with hash_64(). Sadly, + * in testing, hash_64() doesn't distribute keys were the + * upper bits are close together (as would be the case with + * pids) and job numbers are equal (as would be the case with + * synchronous task sets with all relative deadlines equal). + * + * A 2006 Linux patch proposed the following solution + * (but for some reason it wasn't accepted...). + * + * At least this workaround works for 32-bit systems as well. + */ + return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32); +} +#endif + + /* edf_higher_prio - returns true if first has a higher EDF priority * than second. Deadline ties are broken by PID. * @@ -63,32 +89,78 @@ int edf_higher_prio(struct task_struct* first, #endif - /* Determine the task with earliest deadline, with - * tie-break logic. - */ - if (unlikely(!is_realtime(second_task))) { - return 1; - } - else if (earlier_deadline(first_task, second_task)) { - /* Is the deadline of the first task earlier? - * Then it has higher priority. - */ + if (earlier_deadline(first_task, second_task)) { return 1; } else if (get_deadline(first_task) == get_deadline(second_task)) { - /* Need to tie break */ - - /* Tie break by pid */ - if (first_task->pid < second_task->pid) { + /* Need to tie break. All methods must set pid_break to 0/1 if + * first_task does not have priority over second_task. + */ + int pid_break; + + +#if defined(CONFIG_EDF_TIE_BREAK_LATENESS) + /* Tie break by lateness. Jobs with greater lateness get + * priority. This should spread tardiness across all tasks, + * especially in task sets where all tasks have the same + * period and relative deadlines. + */ + if (get_lateness(first_task) > get_lateness(second_task)) { return 1; } - else if (first_task->pid == second_task->pid) { - /* If the PIDs are the same then the task with the - * inherited priority wins. - */ - if (!second_task->rt_param.inh_task) { + pid_break = (get_lateness(first_task) == get_lateness(second_task)); + + +#elif defined(CONFIG_EDF_TIE_BREAK_LATENESS_NORM) + /* Tie break by lateness, normalized by relative deadline. Jobs with + * greater normalized lateness get priority. + * + * Note: Considered using the algebraically equivalent + * lateness(first)*relative_deadline(second) > + lateness(second)*relative_deadline(first) + * to avoid fixed-point math, but values are prone to overflow if inputs + * are on the order of several seconds, even in 64-bit. + */ + fp_t fnorm = _frac(get_lateness(first_task), + get_rt_relative_deadline(first_task)); + fp_t snorm = _frac(get_lateness(second_task), + get_rt_relative_deadline(second_task)); + if (_gt(fnorm, snorm)) { + return 1; + } + pid_break = _eq(fnorm, snorm); + + +#elif defined(CONFIG_EDF_TIE_BREAK_HASH) + /* Tie break by comparing hashs of (pid, job#) tuple. There should be + * a 50% chance that first_task has a higher priority than second_task. + */ + long fhash = edf_hash(first_task); + long shash = edf_hash(second_task); + if (fhash < shash) { + return 1; + } + pid_break = (fhash == shash); +#else + + + /* CONFIG_EDF_PID_TIE_BREAK */ + pid_break = 1; // fall through to tie-break by pid; +#endif + + /* Tie break by pid */ + if(pid_break) { + if (first_task->pid < second_task->pid) { return 1; } + else if (first_task->pid == second_task->pid) { + /* If the PIDs are the same then the task with the + * inherited priority wins. + */ + if (!second_task->rt_param.inh_task) { + return 1; + } + } } } return 0; /* fall-through. prio(second_task) > prio(first_task) */ diff --git a/litmus/jobs.c b/litmus/jobs.c index bc8246572e54..fb093c03d53d 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c @@ -23,6 +23,14 @@ static inline void setup_release(struct task_struct *t, lt_t release) void prepare_for_next_period(struct task_struct *t) { BUG_ON(!t); + + /* Record lateness before we set up the next job's + * release and deadline. Lateness may be negative. + */ + t->rt_param.job_params.lateness = + (long long)litmus_clock() - + (long long)t->rt_param.job_params.deadline; + setup_release(t, get_release(t) + get_rt_period(t)); } -- cgit v1.2.2