aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/edf_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/edf_common.c')
-rw-r--r--litmus/edf_common.c227
1 files changed, 201 insertions, 26 deletions
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index 9b44dc2d8d1e..39ce1816ee04 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -12,40 +12,85 @@
12#include <litmus/sched_plugin.h> 12#include <litmus/sched_plugin.h>
13#include <litmus/sched_trace.h> 13#include <litmus/sched_trace.h>
14 14
15#ifdef CONFIG_LITMUS_NESTED_LOCKING
16#include <litmus/locking.h>
17#endif
18
15#include <litmus/edf_common.h> 19#include <litmus/edf_common.h>
16 20
21#ifdef CONFIG_EDF_TIE_BREAK_LATENESS_NORM
22#include <litmus/fpmath.h>
23#endif
24
25#ifdef CONFIG_EDF_TIE_BREAK_HASH
26#include <linux/hash.h>
27static inline long edf_hash(struct task_struct *t)
28{
29 /* pid is 32 bits, so normally we would shove that into the
30 * upper 32-bits and and put the job number in the bottom
31 * and hash the 64-bit number with hash_64(). Sadly,
32 * in testing, hash_64() doesn't distribute keys were the
33 * upper bits are close together (as would be the case with
34 * pids) and job numbers are equal (as would be the case with
35 * synchronous task sets with all relative deadlines equal).
36 *
37 * A 2006 Linux patch proposed the following solution
38 * (but for some reason it wasn't accepted...).
39 *
40 * At least this workaround works for 32-bit systems as well.
41 */
42 return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32);
43}
44#endif
45
46
17/* edf_higher_prio - returns true if first has a higher EDF priority 47/* edf_higher_prio - returns true if first has a higher EDF priority
18 * than second. Deadline ties are broken by PID. 48 * than second. Deadline ties are broken by PID.
19 * 49 *
20 * both first and second may be NULL 50 * both first and second may be NULL
21 */ 51 */
22int edf_higher_prio(struct task_struct* first, 52#ifdef CONFIG_LITMUS_NESTED_LOCKING
23 struct task_struct* second) 53int __edf_higher_prio(
54 struct task_struct* first, comparison_mode_t first_mode,
55 struct task_struct* second, comparison_mode_t second_mode)
56#else
57int edf_higher_prio(struct task_struct* first, struct task_struct* second)
58#endif
24{ 59{
25 struct task_struct *first_task = first; 60 struct task_struct *first_task = first;
26 struct task_struct *second_task = second; 61 struct task_struct *second_task = second;
27 62
28 /* There is no point in comparing a task to itself. */ 63 /* There is no point in comparing a task to itself. */
29 if (first && first == second) { 64 if (first && first == second) {
30 TRACE_TASK(first, 65 TRACE_CUR("WARNING: pointless edf priority comparison: %s/%d\n", first->comm, first->pid);
31 "WARNING: pointless edf priority comparison.\n"); 66 WARN_ON(1);
32 return 0; 67 return 0;
33 } 68 }
34 69
35 70
36 /* check for NULL tasks */ 71 /* check for NULL tasks */
37 if (!first || !second) 72 if (!first || !second) {
38 return first && !second; 73 return first && !second;
74 }
39 75
40#ifdef CONFIG_LITMUS_LOCKING 76#ifdef CONFIG_LITMUS_LOCKING
41 77 /* Check for EFFECTIVE priorities. Change task
42 /* Check for inherited priorities. Change task
43 * used for comparison in such a case. 78 * used for comparison in such a case.
44 */ 79 */
45 if (unlikely(first->rt_param.inh_task)) 80 if (unlikely(first->rt_param.inh_task)
81#ifdef CONFIG_LITMUS_NESTED_LOCKING
82 && (first_mode == EFFECTIVE)
83#endif
84 ) {
46 first_task = first->rt_param.inh_task; 85 first_task = first->rt_param.inh_task;
47 if (unlikely(second->rt_param.inh_task)) 86 }
87 if (unlikely(second->rt_param.inh_task)
88#ifdef CONFIG_LITMUS_NESTED_LOCKING
89 && (second_mode == EFFECTIVE)
90#endif
91 ) {
48 second_task = second->rt_param.inh_task; 92 second_task = second->rt_param.inh_task;
93 }
49 94
50 /* Check for priority boosting. Tie-break by start of boosting. 95 /* Check for priority boosting. Tie-break by start of boosting.
51 */ 96 */
@@ -53,37 +98,167 @@ int edf_higher_prio(struct task_struct* first,
53 /* first_task is boosted, how about second_task? */ 98 /* first_task is boosted, how about second_task? */
54 if (!is_priority_boosted(second_task) || 99 if (!is_priority_boosted(second_task) ||
55 lt_before(get_boost_start(first_task), 100 lt_before(get_boost_start(first_task),
56 get_boost_start(second_task))) 101 get_boost_start(second_task))) {
57 return 1; 102 return 1;
58 else 103 }
104 else {
59 return 0; 105 return 0;
60 } else if (unlikely(is_priority_boosted(second_task))) 106 }
107 }
108 else if (unlikely(is_priority_boosted(second_task))) {
61 /* second_task is boosted, first is not*/ 109 /* second_task is boosted, first is not*/
62 return 0; 110 return 0;
111 }
63 112
64#endif 113#endif
65 114
66 115 if (!is_realtime(second_task)) {
67 return !is_realtime(second_task) || 116 return 1;
68 117 }
69 /* is the deadline of the first task earlier? 118 else if (earlier_deadline(first_task, second_task)) {
70 * Then it has higher priority. 119 return 1;
120 }
121 else if (get_deadline(first_task) == get_deadline(second_task)) {
122 /* Need to tie break. All methods must set pid_break to 0/1 if
123 * first_task does not have priority over second_task.
71 */ 124 */
72 earlier_deadline(first_task, second_task) || 125 int pid_break;
73 126
74 /* Do we have a deadline tie? 127#if defined(CONFIG_EDF_TIE_BREAK_LATENESS)
75 * Then break by PID. 128 /* Tie break by lateness. Jobs with greater lateness get
129 * priority. This should spread tardiness across all tasks,
130 * especially in task sets where all tasks have the same
131 * period and relative deadlines.
76 */ 132 */
77 (get_deadline(first_task) == get_deadline(second_task) && 133 if (get_lateness(first_task) > get_lateness(second_task)) {
78 (first_task->pid < second_task->pid || 134 return 1;
135 }
136 pid_break = (get_lateness(first_task) == get_lateness(second_task));
137
79 138
80 /* If the PIDs are the same then the task with the inherited 139#elif defined(CONFIG_EDF_TIE_BREAK_LATENESS_NORM)
81 * priority wins. 140 /* Tie break by lateness, normalized by relative deadline. Jobs with
141 * greater normalized lateness get priority.
142 *
143 * Note: Considered using the algebraically equivalent
144 * lateness(first)*relative_deadline(second) >
145 lateness(second)*relative_deadline(first)
146 * to avoid fixed-point math, but values are prone to overflow if inputs
147 * are on the order of several seconds, even in 64-bit.
148 */
149 fp_t fnorm = _frac(get_lateness(first_task),
150 get_rt_relative_deadline(first_task));
151 fp_t snorm = _frac(get_lateness(second_task),
152 get_rt_relative_deadline(second_task));
153 if (_gt(fnorm, snorm)) {
154 return 1;
155 }
156 pid_break = _eq(fnorm, snorm);
157
158
159#elif defined(CONFIG_EDF_TIE_BREAK_HASH)
160 /* Tie break by comparing hashs of (pid, job#) tuple. There should be
161 * a 50% chance that first_task has a higher priority than second_task.
82 */ 162 */
83 (first_task->pid == second_task->pid && 163 long fhash = edf_hash(first_task);
84 !second->rt_param.inh_task))); 164 long shash = edf_hash(second_task);
165 if (fhash < shash) {
166 return 1;
167 }
168 pid_break = (fhash == shash);
169#else
170
171
172 /* CONFIG_EDF_PID_TIE_BREAK */
173 pid_break = 1; // fall through to tie-break by pid;
174#endif
175
176 /* Tie break by pid */
177 if(pid_break) {
178 if (first_task->pid < second_task->pid) {
179 return 1;
180 }
181 else if (first_task->pid == second_task->pid) {
182#ifdef CONFIG_LITMUS_SOFTIRQD
183 if (first_task->rt_param.is_proxy_thread <
184 second_task->rt_param.is_proxy_thread) {
185 return 1;
186 }
187#endif
188 /* Something could be wrong if you get this far. */
189 if (unlikely(first->rt_param.inh_task ==
190 second->rt_param.inh_task)) {
191 /* Both tasks have the same inherited priority.
192 * Likely in a bug-condition.
193 */
194 if (likely(first->pid < second->pid)) {
195 return 1;
196 }
197 else if (first->pid == second->pid) {
198 WARN_ON(1);
199 }
200 }
201 else {
202 /* At least one task must inherit */
203 BUG_ON(!first->rt_param.inh_task &&
204 !second->rt_param.inh_task);
205
206 /* The task with the inherited priority wins. */
207 if (!second->rt_param.inh_task) {
208 TRACE_CUR("unusual comparison: "
209 "first = %s/%d first_task = %s/%d "
210 "second = %s/%d second_task = %s/%d\n",
211 first->comm, first->pid,
212 (first->rt_param.inh_task) ? first->rt_param.inh_task->comm : "(nil)",
213 (first->rt_param.inh_task) ? first->rt_param.inh_task->pid : 0,
214 second->comm, second->pid,
215 (second->rt_param.inh_task) ? second->rt_param.inh_task->comm : "(nil)",
216 (second->rt_param.inh_task) ? second->rt_param.inh_task->pid : 0);
217 return 1;
218 }
219 }
220 }
221 }
222 }
223
224 return 0; /* fall-through. prio(second_task) > prio(first_task) */
225}
226
227
228#ifdef CONFIG_LITMUS_NESTED_LOCKING
229int edf_higher_prio(struct task_struct* first, struct task_struct* second)
230{
231 return __edf_higher_prio(first, EFFECTIVE, second, EFFECTIVE);
232}
233
234int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b)
235{
236 struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node);
237 struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node);
238
239 return __edf_higher_prio(l_a->hp_waiter_eff_prio, EFFECTIVE, l_b->hp_waiter_eff_prio, EFFECTIVE);
240}
241
242int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b)
243{
244 return edf_max_heap_order(b, a); // swap comparison
245}
246
247int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
248{
249 struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node);
250 struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node);
251
252 return __edf_higher_prio(l_a->hp_waiter_eff_prio, BASE, l_b->hp_waiter_eff_prio, BASE);
85} 253}
86 254
255int edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
256{
257 return edf_max_heap_base_priority_order(b, a); // swap comparison
258}
259#endif
260
261
87int edf_ready_order(struct bheap_node* a, struct bheap_node* b) 262int edf_ready_order(struct bheap_node* a, struct bheap_node* b)
88{ 263{
89 return edf_higher_prio(bheap2task(a), bheap2task(b)); 264 return edf_higher_prio(bheap2task(a), bheap2task(b));