aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorpeter <ztong@cs.unc.edu>2019-04-16 14:33:51 -0400
committerpeter <ztong@cs.unc.edu>2019-04-16 14:33:51 -0400
commita8eb76c477bdb397ca6204b89a52fbd088c1baa7 (patch)
tree0e87b276a2d2cbcfca686bf4b43d0eb3b6c0cdbf /litmus
parentae762a4dbb7020692f53358e0cb6aa9a923edf48 (diff)
Added sched_edfsc plugin, which is still under construction
Diffstat (limited to 'litmus')
-rw-r--r--litmus/sched_edfsc.c252
1 files changed, 252 insertions, 0 deletions
diff --git a/litmus/sched_edfsc.c b/litmus/sched_edfsc.c
new file mode 100644
index 000000000000..a2224aaf883a
--- /dev/null
+++ b/litmus/sched_edfsc.c
@@ -0,0 +1,252 @@
1#include <linux/spinlock.h>
2#include <linux/percpu.h>
3#include <linux/sched.h>
4#include <linux/slab.h>
5
6#include <litmus/debug_trace.h>
7#include <litmus/litmus.h>
8#include <litmus/jobs.h>
9#include <litmus/sched_plugin.h>
10#include <litmus/edf_common.h>
11#include <litmus/sched_trace.h>
12#include <litmus/trace.h>
13
14#include <litmus/preempt.h>
15#include <litmus/budget.h>
16#include <litmus/np.h>
17
18#include <litmus/bheap.h>
19
20typedef struct {
21 int cpu;
22 struct task_struct* linked;
23 struct task_struct* scheduled; //container or migrating task
24 atomic_t will_schedule;
25 struct bheap_node* hn;
26} cpu_entry_t;
27
28typedef struct {
29 rt_domain_t domain;
30 struct task_struct* container;
31 struct task_struct* scheduled; //fixed task
32 u64 last_exec_t; //start time of scheduled execution
33
34 #define c_lock domain.ready_lock
35
36} cont_domain_t;
37
38DEFINE_PER_CPU(cpu_entry_t, edfsc_cpu_entries);
39
40cpu_entry_t* edfsc_cpus[NR_CPUS];
41
42static struct bheap_node edfsc_cpu_heap_node[NR_CPUS];
43static struct bheap edfsc_cpu_heap;
44
45struct task_struct* container_tasks[NR_CPUS];
46
47static cont_domaint_t container_domains[NR_CPUS];
48
49static rt_domain_t gsched_domain;
50#define g_lock (gsched_domain.ready_lock)
51
52
53
54//takes in the container_domain pointer in container task_struct
55//assuming prev is previous task running on the processor before calling schedule
56static struct task_struct* edfsc_cschedule(rt_domain_t* edf, struct task_struct * prev)
57{
58 cont_domain_t* cedf = container_of(edf, cont_domain_t, domain);
59
60 struct task_struct* next;
61
62 int out_of_time, sleep, preempt,
63 np, exists, blocks, resched;
64
65 raw_spin_lock(&cedf->c_lock);
66
67 /* sanity checking
68 * differently from gedf, when a task exits (dead)
69 * cedf->schedule may be null and prev _is_ realtime
70 */
71 BUG_ON(cedf->scheduled && cedf->scheduled != prev);
72 BUG_ON(cedf->scheduled && !is_realtime(prev));
73
74 /* (0) Determine state */
75 exists = cedf->scheduled != NULL;
76 blocks = exists && !is_current_running();
77 out_of_time = exists && budget_enforced(cedf->scheduled)
78 && budget_exhausted(cedf->scheduled);
79 np = exists && is_np(cedf->scheduled);
80 sleep = exists && is_completed(cedf->scheduled);
81 preempt = edf_preemption_needed(edf, prev);
82
83 /* If we need to preempt do so.
84 * The following checks set resched to 1 in case of special
85 * circumstances.
86 */
87 resched = preempt;
88
89 /* If a task blocks we have no choice but to reschedule.
90 */
91 if (blocks)
92 resched = 1;
93
94 /* Request a sys_exit_np() call if we would like to preempt but cannot.
95 * Multiple calls to request_exit_np() don't hurt.
96 */
97 if (np && (out_of_time || preempt || sleep))
98 request_exit_np(cedf->scheduled);
99
100 /* Any task that is preemptable and either exhausts its execution
101 * budget or wants to sleep completes. We may have to reschedule after
102 * this.
103 */
104 if (!np && (out_of_time || sleep)) {
105 job_completion(cedf->scheduled, !sleep);
106 resched = 1;
107 }
108
109 /* The final scheduling decision. Do we need to switch for some reason?
110 * Switch if we are in RT mode and have no task or if we need to
111 * resched.
112 */
113 next = NULL;
114 if ((!np || blocks) && (resched || !exists)) {
115 /* When preempting a task that does not block, then
116 * re-insert it into either the ready queue or the
117 * release queue (if it completed). requeue() picks
118 * the appropriate queue.
119 */
120 if (cedf->scheduled && !blocks)
121 requeue(pedf->scheduled, edf);
122 next = __take_ready(edf);
123 } else
124 /* Only override Linux scheduler if we have a real-time task
125 * scheduled that needs to continue.
126 */
127 if (exists)
128 next = prev;
129
130 if (next) {
131 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
132 } else {
133 TRACE("becoming idle at %llu\n", litmus_clock());
134 }
135
136 cedf->scheduled = next;
137 sched_state_task_picked();
138 raw_spin_unlock(&cedf->c_lock);
139
140 return next;
141}
142
143//assuming prev is previous task running on the processor before calling schedule
144static struct task_struct* edfsc_gschedule(struct task_struct * prev)
145{
146 cpu_entry_t* entry = this_cpu_ptr(&edfsc_cpu_entries);
147 int out_of_time, sleep, preempt, np, exists, blocks, is_cont;
148 struct task_struct* next = NULL;
149
150 raw_spin_lock(&g_lock);
151
152 /* sanity checking */
153 BUG_ON(entry->scheduled && entry->scheduled != prev && entry->scheduled->container_domain != NULL);
154 BUG_ON(entry->scheduled && !is_realtime(prev));
155 BUG_ON(is_realtime(prev) && !entry->scheduled);
156
157 /* (0) Determine state */
158 exists = entry->scheduled != NULL;
159 blocks = exists && !is_current_running();
160 out_of_time = exists && budget_enforced(entry->scheduled)
161 && budget_exhausted(entry->scheduled);
162 np = exists && is_np(entry->scheduled);
163 sleep = exists && is_completed(entry->scheduled);
164 preempt = entry->scheduled != entry->linked;
165 is_cont = exists && entry->scheduled->container_domain == NULL;
166
167#ifdef WANT_ALL_SCHED_EVENTS
168 TRACE_TASK(prev, "invoked gsnedf_schedule.\n");
169#endif
170
171 if (exists)
172 TRACE_TASK(prev,
173 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
174 "state:%d sig:%d is_cont:%d\n",
175 blocks, out_of_time, np, sleep, preempt,
176 prev->state, signal_pending(prev), is_cont);
177
178 if (is_cont && !sleep && !preempt && !out_of_time)
179 return edfsc_cschedule(entry->scheduled->container_domain, prev);
180
181 if (entry->linked && preempt)
182 TRACE_TASK(prev, "will be preempted by %s/%d\n",
183 entry->linked->comm, entry->linked->pid);
184
185
186 /* If a task blocks we have no choice but to reschedule.
187 * Note: containers never block since their task struct has state = IS_RUNNING
188 */
189 if (blocks)
190 unlink(entry->scheduled);
191
192 /* Request a sys_exit_np() call if we would like to preempt but cannot.
193 * We need to make sure to update the link structure anyway in case
194 * that we are still linked. Multiple calls to request_exit_np() don't
195 * hurt.
196 */
197 if (np && (out_of_time || preempt || sleep)) {
198 unlink(entry->scheduled);
199 request_exit_np(entry->scheduled);
200 }
201
202 /* Any task that is preemptable and either exhausts its execution
203 * budget or wants to sleep completes. We may have to reschedule after
204 * this. Don't do a job completion if we block (can't have timers running
205 * for blocked jobs).
206 */
207 if (!np && (out_of_time || sleep))
208 curr_job_completion(!sleep);
209
210 /* Link pending task if we became unlinked.
211 */
212 if (!entry->linked)
213 link_task_to_cpu(__take_ready(&gsnedf), entry);
214
215 /* The final scheduling decision. Do we need to switch for some reason?
216 * If linked is different from scheduled, then select linked as next.
217 */
218 if ((!np || blocks) &&
219 entry->linked != entry->scheduled) {
220 /* Schedule a linked job? */
221 if (entry->linked) {
222 entry->linked->rt_param.scheduled_on = entry->cpu;
223 next = entry->linked;
224 TRACE_TASK(next, "scheduled_on = P%d\n", smp_processor_id());
225 }
226 if (entry->scheduled) {
227 /* not gonna be scheduled soon */
228 entry->scheduled->rt_param.scheduled_on = NO_CPU;
229 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
230 }
231 } else
232 /* Only override Linux scheduler if we have a real-time task
233 * scheduled that needs to continue.
234 */
235 if (exists)
236 next = prev;
237
238 sched_state_task_picked();
239
240 raw_spin_unlock(&g_lock);
241
242#ifdef WANT_ALL_SCHED_EVENTS
243 TRACE("g_lock released, next=0x%p\n", next);
244
245 if (next)
246 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
247 else if (exists && !next)
248 TRACE("becomes idle at %llu.\n", litmus_clock());
249#endif
250
251 return next;
252} \ No newline at end of file