From e6f51fb826ce98d436f445aae4eb9e9dba1f30e8 Mon Sep 17 00:00:00 2001 From: Glenn Elliott Date: Mon, 20 Aug 2012 17:28:55 -0400 Subject: EDF priority tie-breaks. Instead of tie-breaking by PID (which is a static priority tie-break), we can tie-break by other job-level-unique parameters. This is desirable because tasks are equaly affected by tardiness since static priority tie-breaks cause tasks with greater PID values to experience the most tardiness. There are four tie-break methods: 1) Lateness. If two jobs, J_{1,i} and J_{2,j} of tasks T_1 and T_2, respectively, have equal deadlines, we favor the job of the task that had the worst lateness for jobs J_{1,i-1} and J_{2,j-1}. Note: Unlike tardiness, lateness may be less than zero. This occurs when a job finishes before its deadline. 2) Normalized Lateness. The same as #1, except lateness is first normalized by each task's relative deadline. This prevents tasks with short relative deadlines and small execution requirements from always losing tie-breaks. 3) Hash. The job tuple (PID, Job#) is used to generate a hash. Hash values are then compared. A job has ~50% chance of winning a tie-break with respect to another job. Note: Emperical testing shows that some jobs can have +/- ~1.5% advantage in tie-breaks. Linux's built-in hash function is not totally a uniform hash. 4) PIDs. PID-based tie-break used in prior versions of Litmus. --- include/litmus/fpmath.h | 145 ++++++++++++++++++++++++++++++++++++++++++++++ include/litmus/litmus.h | 2 +- include/litmus/rt_param.h | 6 ++ 3 files changed, 152 insertions(+), 1 deletion(-) create mode 100644 include/litmus/fpmath.h (limited to 'include/litmus') diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h new file mode 100644 index 000000000000..04d4bcaeae96 --- /dev/null +++ b/include/litmus/fpmath.h @@ -0,0 +1,145 @@ +#ifndef __FP_MATH_H__ +#define __FP_MATH_H__ + +#ifndef __KERNEL__ +#include +#define abs(x) (((x) < 0) ? -(x) : x) +#endif + +// Use 64-bit because we want to track things at the nanosecond scale. +// This can lead to very large numbers. +typedef int64_t fpbuf_t; +typedef struct +{ + fpbuf_t val; +} fp_t; + +#define FP_SHIFT 10 +#define ROUND_BIT (FP_SHIFT - 1) + +#define _fp(x) ((fp_t) {x}) + +#ifdef __KERNEL__ +static const fp_t LITMUS_FP_ZERO = {.val = 0}; +static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)}; +#endif + +static inline fp_t FP(fpbuf_t x) +{ + return _fp(((fpbuf_t) x) << FP_SHIFT); +} + +/* divide two integers to obtain a fixed point value */ +static inline fp_t _frac(fpbuf_t a, fpbuf_t b) +{ + return _fp(FP(a).val / (b)); +} + +static inline fpbuf_t _point(fp_t x) +{ + return (x.val % (1 << FP_SHIFT)); + +} + +#define fp2str(x) x.val +/*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */ +#define _FP_ "%ld/1024" + +static inline fpbuf_t _floor(fp_t x) +{ + return x.val >> FP_SHIFT; +} + +/* FIXME: negative rounding */ +static inline fpbuf_t _round(fp_t x) +{ + return _floor(x) + ((x.val >> ROUND_BIT) & 1); +} + +/* multiply two fixed point values */ +static inline fp_t _mul(fp_t a, fp_t b) +{ + return _fp((a.val * b.val) >> FP_SHIFT); +} + +static inline fp_t _div(fp_t a, fp_t b) +{ +#if !defined(__KERNEL__) && !defined(unlikely) +#define unlikely(x) (x) +#define DO_UNDEF_UNLIKELY +#endif + /* try not to overflow */ + if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) )) + return _fp((a.val / b.val) << FP_SHIFT); + else + return _fp((a.val << FP_SHIFT) / b.val); +#ifdef DO_UNDEF_UNLIKELY +#undef unlikely +#undef DO_UNDEF_UNLIKELY +#endif +} + +static inline fp_t _add(fp_t a, fp_t b) +{ + return _fp(a.val + b.val); +} + +static inline fp_t _sub(fp_t a, fp_t b) +{ + return _fp(a.val - b.val); +} + +static inline fp_t _neg(fp_t x) +{ + return _fp(-x.val); +} + +static inline fp_t _abs(fp_t x) +{ + return _fp(abs(x.val)); +} + +/* works the same as casting float/double to integer */ +static inline fpbuf_t _fp_to_integer(fp_t x) +{ + return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1); +} + +static inline fp_t _integer_to_fp(fpbuf_t x) +{ + return _frac(x,1); +} + +static inline int _leq(fp_t a, fp_t b) +{ + return a.val <= b.val; +} + +static inline int _geq(fp_t a, fp_t b) +{ + return a.val >= b.val; +} + +static inline int _lt(fp_t a, fp_t b) +{ + return a.val < b.val; +} + +static inline int _gt(fp_t a, fp_t b) +{ + return a.val > b.val; +} + +static inline int _eq(fp_t a, fp_t b) +{ + return a.val == b.val; +} + +static inline fp_t _max(fp_t a, fp_t b) +{ + if (a.val < b.val) + return b; + else + return a; +} +#endif diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 338245abd6ed..807b7888695a 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h @@ -63,7 +63,7 @@ void litmus_exit_task(struct task_struct *tsk); #define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) #define get_deadline(t) (tsk_rt(t)->job_params.deadline) #define get_release(t) (tsk_rt(t)->job_params.release) - +#define get_lateness(t) (tsk_rt(t)->job_params.lateness) #define is_hrt(t) \ (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 89ac0dda7d3d..fac939dbd33a 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h @@ -110,6 +110,12 @@ struct rt_job { /* How much service has this job received so far? */ lt_t exec_time; + /* By how much did the prior job miss its deadline by? + * Value differs from tardiness in that lateness may + * be negative (when job finishes before its deadline). + */ + long long lateness; + /* Which job is this. This is used to let user space * specify which job to wait for, which is important if jobs * overrun. If we just call sys_sleep_next_period() then we -- cgit v1.2.2