aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/rm_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/rm_common.c')
-rw-r--r--litmus/rm_common.c160
1 files changed, 160 insertions, 0 deletions
diff --git a/litmus/rm_common.c b/litmus/rm_common.c
new file mode 100644
index 000000000000..88f83bcbd9d8
--- /dev/null
+++ b/litmus/rm_common.c
@@ -0,0 +1,160 @@
1/*
2 * kernel/rm_common.c
3 *
4 * Common functions for EDF based scheduler.
5 */
6
7#include <linux/percpu.h>
8#include <linux/sched.h>
9#include <linux/list.h>
10
11#include <litmus/litmus.h>
12#include <litmus/sched_plugin.h>
13#include <litmus/sched_trace.h>
14
15#include <litmus/rm_common.h>
16
17/* rm_higher_prio - returns true if first has a higher EDF priority
18 * than second. Deadline ties are broken by PID.
19 *
20 * both first and second may be NULL
21 */
22int rm_higher_prio(struct task_struct* first,
23 struct task_struct* second)
24{
25 struct task_struct *first_task = first;
26 struct task_struct *second_task = second;
27
28 /* There is no point in comparing a task to itself. */
29 if (first && first == second) {
30 TRACE_TASK(first,
31 "WARNING: pointless edf priority comparison.\n");
32 return 0;
33 }
34
35
36 /* check for NULL tasks */
37 if (!first || !second)
38 return first && !second;
39
40#ifdef CONFIG_LITMUS_LOCKING
41
42 /* Check for inherited priorities. Change task
43 * used for comparison in such a case.
44 */
45 if (unlikely(first->rt_param.inh_task))
46 first_task = first->rt_param.inh_task;
47 if (unlikely(second->rt_param.inh_task))
48 second_task = second->rt_param.inh_task;
49
50 /* Check for priority boosting. Tie-break by start of boosting.
51 */
52 if (unlikely(is_priority_boosted(first_task))) {
53 /* first_task is boosted, how about second_task? */
54 if (!is_priority_boosted(second_task) ||
55 lt_before(get_boost_start(first_task),
56 get_boost_start(second_task)))
57 return 1;
58 else
59 return 0;
60 } else if (unlikely(is_priority_boosted(second_task)))
61 /* second_task is boosted, first is not*/
62 return 0;
63
64#endif
65
66 if (!is_realtime(second_task))
67 return true;
68
69 if (shorter_period(first_task, second_task))
70 return true;
71
72 if (get_rt_period(first_task) == get_rt_period(second_task))
73 {
74#ifdef CONFIG_LITMUS_SOFTIRQD
75 if (first_task->rt_param.is_proxy_thread < second_task->rt_param.is_proxy_thread)
76 {
77 return true;
78 }
79 if (first_task->rt_param.is_proxy_thread == second_task->rt_param.is_proxy_thread)
80 {
81#endif
82 if (first_task->pid < second_task->pid)
83 {
84 return true;
85 }
86 if (first_task->pid == second_task->pid)
87 {
88 return !second->rt_param.inh_task;
89 }
90#ifdef CONFIG_LITMUS_SOFTIRQD
91 }
92#endif
93 }
94
95 return false;
96
97#if 0
98 return !is_realtime(second_task) ||
99 shorter_period(first_task, second_task) ||
100 ((get_rt_period(first_task) == get_rt_period(second_task)) && earlier_deadline(first_task, second_task))
101
102#ifdef CONFIG_LITMUS_SOFTIRQD
103 /* proxy threads always lose w/o inheritance. */
104 (first_task->rt_param.is_proxy_thread <
105 second_task->rt_param.is_proxy_thread) ||
106#endif
107
108 /* is the period of the first task shorter?
109 * Then it has higher priority.
110 */
111 shorter_period(first_task, second_task) ||
112
113 (earlier_deadline(first_task, second_task) ||
114
115 /* Do we have a deadline tie?
116 * Then break by PID.
117 */
118 (get_rt_period(first_task) == get_rt_period(second_task) &&
119 (first_task->pid < second_task->pid ||
120
121 /* If the PIDs are the same then the task with the inherited
122 * priority wins.
123 */
124 (first_task->pid == second_task->pid &&
125 !second->rt_param.inh_task)));
126#endif
127}
128
129int rm_ready_order(struct bheap_node* a, struct bheap_node* b)
130{
131 return rm_higher_prio(bheap2task(a), bheap2task(b));
132}
133
134void rm_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
135 release_jobs_t release)
136{
137 rt_domain_init(rt, rm_ready_order, resched, release);
138}
139
140/* need_to_preempt - check whether the task t needs to be preempted
141 * call only with irqs disabled and with ready_lock acquired
142 * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
143 */
144int rm_preemption_needed(rt_domain_t* rt, struct task_struct *t)
145{
146 /* we need the read lock for rm_ready_queue */
147 /* no need to preempt if there is nothing pending */
148 if (!__jobs_pending(rt))
149 return 0;
150 /* we need to reschedule if t doesn't exist */
151 if (!t)
152 return 1;
153
154 /* NOTE: We cannot check for non-preemptibility since we
155 * don't know what address space we're currently in.
156 */
157
158 /* make sure to get non-rt stuff out of the way */
159 return !is_realtime(t) || rm_higher_prio(__next_ready(rt), t);
160}