aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/edf_split_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/edf_split_common.c')
-rw-r--r--litmus/edf_split_common.c171
1 files changed, 171 insertions, 0 deletions
diff --git a/litmus/edf_split_common.c b/litmus/edf_split_common.c
new file mode 100644
index 000000000000..76b100c9c5b9
--- /dev/null
+++ b/litmus/edf_split_common.c
@@ -0,0 +1,171 @@
1/*
2 * kernel/edf_split_common.c
3 *
4 * Common functions for EDF based scheduler with split jobs.
5 */
6
7#include <linux/percpu.h>
8#include <linux/sched.h>
9#include <linux/list.h>
10
11#include <litmus/litmus.h>
12#include <litmus/sched_plugin.h>
13#include <litmus/sched_trace.h>
14
15#include <litmus/edf_split_common.h>
16
17#ifdef CONFIG_EDF_TIE_BREAK_LATENESS_NORM
18#include <litmus/fpmath.h>
19#endif
20
21#ifdef CONFIG_EDF_TIE_BREAK_HASH
22#include <linux/hash.h>
23static inline long edf_split_hash(struct task_struct *t)
24{
25 /* pid is 32 bits, so normally we would shove that into the
26 * upper 32-bits and and put the job number in the bottom
27 * and hash the 64-bit number with hash_64(). Sadly,
28 * in testing, hash_64() doesn't distribute keys were the
29 * upper bits are close together (as would be the case with
30 * pids) and job numbers are equal (as would be the case with
31 * synchronous task sets with all relative deadlines equal).
32 *
33 * A 2006 Linux patch proposed the following solution
34 * (but for some reason it wasn't accepted...).
35 *
36 * At least this workaround works for 32-bit systems as well.
37 */
38 return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32);
39}
40#endif
41
42/* edf_higher_prio - returns true if first has a higher EDF priority
43 * than second. Deadline ties are broken by PID.
44 *
45 * both first and second may be NULL
46 */
47int edf_split_higher_prio(struct task_struct* first,
48 struct task_struct* second)
49{
50 struct task_struct *first_task = first;
51 struct task_struct *second_task = second;
52
53 /* There is no point in comparing a task to itself. */
54 if (first && first == second) {
55 TRACE_TASK(first,
56 "WARNING: pointless edf priority comparison.\n");
57 return 0;
58 }
59
60
61 /* check for NULL tasks */
62 if (!first || !second)
63 return first && !second;
64
65 if (earlier_deadline(first_task, second_task)) {
66 return 1;
67 }
68 else if (get_deadline(first_task) == get_deadline(second_task)) {
69 /* Need to tie break. All methods must set pid_break to 0/1 if
70 * first_task does not have priority over second_task.
71 */
72 int pid_break;
73
74#if defined(CONFIG_EDF_TIE_BREAK_LATENESS)
75 /* Tie break by lateness. Jobs with greater lateness get
76 * priority. This should spread tardiness across all tasks,
77 * especially in task sets where all tasks have the same
78 * period and relative deadlines.
79 */
80 if (get_lateness(first_task) > get_lateness(second_task)) {
81 return 1;
82 }
83 pid_break = (get_lateness(first_task) == get_lateness(second_task));
84
85
86#elif defined(CONFIG_EDF_TIE_BREAK_LATENESS_NORM)
87 /* Tie break by lateness, normalized by relative deadline. Jobs with
88 * greater normalized lateness get priority.
89 *
90 * Note: Considered using the algebraically equivalent
91 * lateness(first)*relative_deadline(second) >
92 lateness(second)*relative_deadline(first)
93 * to avoid fixed-point math, but values are prone to overflow if inputs
94 * are on the order of several seconds, even in 64-bit.
95 */
96 fp_t fnorm = _frac(get_lateness(first_task),
97 get_rt_relative_deadline(first_task));
98 fp_t snorm = _frac(get_lateness(second_task),
99 get_rt_relative_deadline(second_task));
100 if (_gt(fnorm, snorm)) {
101 return 1;
102 }
103 pid_break = _eq(fnorm, snorm);
104
105#elif defined(CONFIG_EDF_TIE_BREAK_HASH)
106 /* Tie break by comparing hashs of (pid, job#) tuple. There should be
107 * a 50% chance that first_task has a higher priority than second_task.
108 */
109 long fhash = edf_hash(first_task);
110 long shash = edf_hash(second_task);
111 if (fhash < shash) {
112 return 1;
113 }
114 pid_break = (fhash == shash);
115#else
116
117
118 /* CONFIG_EDF_PID_TIE_BREAK */
119 pid_break = 1; // fall through to tie-break by pid;
120#endif
121
122 /* Tie break by pid */
123 if(pid_break) {
124 if (first_task->pid < second_task->pid) {
125 return 1;
126 }
127 else if (first_task->pid == second_task->pid) {
128 /* If the PIDs are the same then the task with the
129 * inherited priority wins.
130 */
131 if (!second->rt_param.inh_task) {
132 return 1;
133 }
134 }
135 }
136 }
137 return 0; /* fall-through. prio(second_task) > prio(first_task) */
138}
139
140int edf_split_ready_order(struct bheap_node* a, struct bheap_node* b)
141{
142 return edf_split_higher_prio(bheap2task(a), bheap2task(b));
143}
144
145void edf_split_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
146 release_jobs_t release)
147{
148 rt_domain_init(rt, edf_split_ready_order, resched, release);
149}
150
151/* need_to_preempt - check whether the task t needs to be preempted
152 * call only with irqs disabled and with ready_lock acquired
153 * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
154 */
155int edf_split_preemption_needed(rt_domain_t* rt, struct task_struct *t)
156{
157 /* we need the read lock for edf_ready_queue */
158 /* no need to preempt if there is nothing pending */
159 if (!__jobs_pending(rt))
160 return 0;
161 /* we need to reschedule if t doesn't exist */
162 if (!t)
163 return 1;
164
165 /* NOTE: We cannot check for non-preemptibility since we
166 * don't know what address space we're currently in.
167 */
168
169 /* make sure to get non-rt stuff out of the way */
170 return !is_realtime(t) || edf_split_higher_prio(__next_ready(rt), t);
171}