aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/fpmath.h145
-rw-r--r--litmus/Kconfig46
-rw-r--r--litmus/edf_common.c78
3 files changed, 265 insertions, 4 deletions
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h
new file mode 100644
index 000000000000..04d4bcaeae96
--- /dev/null
+++ b/include/litmus/fpmath.h
@@ -0,0 +1,145 @@
1#ifndef __FP_MATH_H__
2#define __FP_MATH_H__
3
4#ifndef __KERNEL__
5#include <stdint.h>
6#define abs(x) (((x) < 0) ? -(x) : x)
7#endif
8
9// Use 64-bit because we want to track things at the nanosecond scale.
10// This can lead to very large numbers.
11typedef int64_t fpbuf_t;
12typedef struct
13{
14 fpbuf_t val;
15} fp_t;
16
17#define FP_SHIFT 10
18#define ROUND_BIT (FP_SHIFT - 1)
19
20#define _fp(x) ((fp_t) {x})
21
22#ifdef __KERNEL__
23static const fp_t LITMUS_FP_ZERO = {.val = 0};
24static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)};
25#endif
26
27static inline fp_t FP(fpbuf_t x)
28{
29 return _fp(((fpbuf_t) x) << FP_SHIFT);
30}
31
32/* divide two integers to obtain a fixed point value */
33static inline fp_t _frac(fpbuf_t a, fpbuf_t b)
34{
35 return _fp(FP(a).val / (b));
36}
37
38static inline fpbuf_t _point(fp_t x)
39{
40 return (x.val % (1 << FP_SHIFT));
41
42}
43
44#define fp2str(x) x.val
45/*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */
46#define _FP_ "%ld/1024"
47
48static inline fpbuf_t _floor(fp_t x)
49{
50 return x.val >> FP_SHIFT;
51}
52
53/* FIXME: negative rounding */
54static inline fpbuf_t _round(fp_t x)
55{
56 return _floor(x) + ((x.val >> ROUND_BIT) & 1);
57}
58
59/* multiply two fixed point values */
60static inline fp_t _mul(fp_t a, fp_t b)
61{
62 return _fp((a.val * b.val) >> FP_SHIFT);
63}
64
65static inline fp_t _div(fp_t a, fp_t b)
66{
67#if !defined(__KERNEL__) && !defined(unlikely)
68#define unlikely(x) (x)
69#define DO_UNDEF_UNLIKELY
70#endif
71 /* try not to overflow */
72 if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) ))
73 return _fp((a.val / b.val) << FP_SHIFT);
74 else
75 return _fp((a.val << FP_SHIFT) / b.val);
76#ifdef DO_UNDEF_UNLIKELY
77#undef unlikely
78#undef DO_UNDEF_UNLIKELY
79#endif
80}
81
82static inline fp_t _add(fp_t a, fp_t b)
83{
84 return _fp(a.val + b.val);
85}
86
87static inline fp_t _sub(fp_t a, fp_t b)
88{
89 return _fp(a.val - b.val);
90}
91
92static inline fp_t _neg(fp_t x)
93{
94 return _fp(-x.val);
95}
96
97static inline fp_t _abs(fp_t x)
98{
99 return _fp(abs(x.val));
100}
101
102/* works the same as casting float/double to integer */
103static inline fpbuf_t _fp_to_integer(fp_t x)
104{
105 return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1);
106}
107
108static inline fp_t _integer_to_fp(fpbuf_t x)
109{
110 return _frac(x,1);
111}
112
113static inline int _leq(fp_t a, fp_t b)
114{
115 return a.val <= b.val;
116}
117
118static inline int _geq(fp_t a, fp_t b)
119{
120 return a.val >= b.val;
121}
122
123static inline int _lt(fp_t a, fp_t b)
124{
125 return a.val < b.val;
126}
127
128static inline int _gt(fp_t a, fp_t b)
129{
130 return a.val > b.val;
131}
132
133static inline int _eq(fp_t a, fp_t b)
134{
135 return a.val == b.val;
136}
137
138static inline fp_t _max(fp_t a, fp_t b)
139{
140 if (a.val < b.val)
141 return b;
142 else
143 return a;
144}
145#endif
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 68459d4dca41..48ff3e3c657c 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -79,6 +79,52 @@ config SCHED_CPU_AFFINITY
79 79
80 Say Yes if unsure. 80 Say Yes if unsure.
81 81
82choice
83 prompt "EDF Tie-Break Behavior"
84 default EDF_TIE_BREAK_LATENESS_NORM
85 help
86 Allows the configuration of tie-breaking behavior when the deadlines
87 of two EDF-scheduled tasks are equal.
88
89 config EDF_TIE_BREAK_LATENESS
90 bool "Lateness-based Tie Break"
91 help
92 Break ties between to jobs, A and B, based upon the lateness of their
93 prior jobs. The job with the greatest lateness has priority. Note that
94 lateness has a negative value if the prior job finished before its
95 deadline.
96
97 config EDF_TIE_BREAK_LATENESS_NORM
98 bool "Normalized Lateness-based Tie Break"
99 help
100 Break ties between to jobs, A and B, based upon the lateness, normalized
101 by relative deadline, their prior jobs. The job with the greatest
102 normalized lateness has priority. Note that lateness has a negative value
103 if the prior job finished before its deadline.
104
105 Normalized lateness tie-breaks are likely desireable over non-normalized
106 tie-breaks if the execution times and/or relative deadlines of tasks in a
107 task set vary greatly.
108
109 config EDF_TIE_BREAK_HASH
110 bool "Hash-based Tie Breaks"
111 help
112 Break ties between two jobs, A and B, with equal deadlines by using a
113 uniform hash; i.e.: hash(A.pid, A.job_num) < hash(B.pid, B.job_num). Job
114 A has ~50% of winning a given tie-break.
115
116 config EDF_PID_TIE_BREAK
117 bool "PID-based Tie Breaks"
118 help
119 Break ties based upon OS-assigned process IDs. Use this option if
120 required by algorithm's real-time analysis or per-task response-time
121 jitter must be minimized in overload conditions.
122
123 NOTES:
124 * This tie-breaking method was default in Litmus 2012.2 and before.
125
126endchoice
127
82endmenu 128endmenu
83 129
84menu "Tracing" 130menu "Tracing"
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index 54285aaa0405..d71a6921dbc1 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -14,6 +14,32 @@
14 14
15#include <litmus/edf_common.h> 15#include <litmus/edf_common.h>
16 16
17#ifdef CONFIG_EDF_TIE_BREAK_LATENESS_NORM
18#include <litmus/fpmath.h>
19#endif
20
21#ifdef CONFIG_EDF_TIE_BREAK_HASH
22#include <linux/hash.h>
23static inline long edf_hash(struct task_struct *t)
24{
25 /* pid is 32 bits, so normally we would shove that into the
26 * upper 32-bits and and put the job number in the bottom
27 * and hash the 64-bit number with hash_64(). Sadly,
28 * in testing, hash_64() doesn't distribute keys were the
29 * upper bits are close together (as would be the case with
30 * pids) and job numbers are equal (as would be the case with
31 * synchronous task sets with all relative deadlines equal).
32 *
33 * A 2006 Linux patch proposed the following solution
34 * (but for some reason it wasn't accepted...).
35 *
36 * At least this workaround works for 32-bit systems as well.
37 */
38 return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32);
39}
40#endif
41
42
17/* edf_higher_prio - returns true if first has a higher EDF priority 43/* edf_higher_prio - returns true if first has a higher EDF priority
18 * than second. Deadline ties are broken by PID. 44 * than second. Deadline ties are broken by PID.
19 * 45 *
@@ -76,8 +102,13 @@ int edf_higher_prio(struct task_struct* first,
76 return 1; 102 return 1;
77 } 103 }
78 else if (get_deadline(first_task) == get_deadline(second_task)) { 104 else if (get_deadline(first_task) == get_deadline(second_task)) {
79 /* Need to tie break */ 105 /* Need to tie break. All methods must set pid_break to 0/1 if
80 106 * first_task does not have priority over second_task.
107 */
108 int pid_break;
109
110
111#if defined(CONFIG_EDF_TIE_BREAK_LATENESS)
81 /* Tie break by lateness. Jobs with greater lateness get 112 /* Tie break by lateness. Jobs with greater lateness get
82 * priority. This should spread tardiness across all tasks, 113 * priority. This should spread tardiness across all tasks,
83 * especially in task sets where all tasks have the same 114 * especially in task sets where all tasks have the same
@@ -86,8 +117,47 @@ int edf_higher_prio(struct task_struct* first,
86 if (get_lateness(first_task) > get_lateness(second_task)) { 117 if (get_lateness(first_task) > get_lateness(second_task)) {
87 return 1; 118 return 1;
88 } 119 }
89 else if(get_lateness(first_task) == get_lateness(second_task)) { 120 pid_break = (get_lateness(first_task) == get_lateness(second_task));
90 /* Tie break by pid */ 121
122
123#elif defined(CONFIG_EDF_TIE_BREAK_LATENESS_NORM)
124 /* Tie break by lateness, normalized by relative deadline. Jobs with
125 * greater normalized lateness get priority.
126 *
127 * Note: Considered using the algebraically equivalent
128 * lateness(first)*relative_deadline(second) >
129 lateness(second)*relative_deadline(first)
130 * to avoid fixed-point math, but values are prone to overflow if inputs
131 * are on the order of several seconds, even in 64-bit.
132 */
133 fp_t fnorm = _frac(get_lateness(first_task),
134 get_rt_relative_deadline(first_task));
135 fp_t snorm = _frac(get_lateness(second_task),
136 get_rt_relative_deadline(second_task));
137 if (_gt(fnorm, snorm)) {
138 return 1;
139 }
140 pid_break = _eq(fnorm, snorm);
141
142
143#elif defined(CONFIG_EDF_TIE_BREAK_HASH)
144 /* Tie break by comparing hashs of (pid, job#) tuple. There should be
145 * a 50% chance that first_task has a higher priority than second_task.
146 */
147 long fhash = edf_hash(first_task);
148 long shash = edf_hash(second_task);
149 if (fhash < shash) {
150 return 1;
151 }
152 pid_break = (fhash == shash);
153#else
154
155
156 /* CONFIG_EDF_PID_TIE_BREAK */
157 pid_break = 1; // fall through to tie-break by pid;
158#endif
159 /* Tie break by pid */
160 if(pid_break) {
91 if (first_task->pid < second_task->pid) { 161 if (first_task->pid < second_task->pid) {
92 return 1; 162 return 1;
93 } 163 }