aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_litmus.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_litmus.c')
-rw-r--r--litmus/sched_litmus.c325
1 files changed, 325 insertions, 0 deletions
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
new file mode 100644
index 00000000000..6553948407d
--- /dev/null
+++ b/litmus/sched_litmus.c
@@ -0,0 +1,325 @@
1/* This file is included from kernel/sched.c */
2
3#include <litmus/litmus.h>
4#include <litmus/budget.h>
5#include <litmus/sched_plugin.h>
6#include <litmus/preempt.h>
7
8static void update_time_litmus(struct rq *rq, struct task_struct *p)
9{
10 u64 delta = rq->clock - p->se.exec_start;
11 if (unlikely((s64)delta < 0))
12 delta = 0;
13 /* per job counter */
14 p->rt_param.job_params.exec_time += delta;
15 /* task counter */
16 p->se.sum_exec_runtime += delta;
17 /* sched_clock() */
18 p->se.exec_start = rq->clock;
19 cpuacct_charge(p, delta);
20}
21
22static void double_rq_lock(struct rq *rq1, struct rq *rq2);
23static void double_rq_unlock(struct rq *rq1, struct rq *rq2);
24
25/*
26 * litmus_tick gets called by scheduler_tick() with HZ freq
27 * Interrupts are disabled
28 */
29static void litmus_tick(struct rq *rq, struct task_struct *p)
30{
31 TS_PLUGIN_TICK_START;
32
33 if (is_realtime(p))
34 update_time_litmus(rq, p);
35
36 /* plugin tick */
37 litmus->tick(p);
38
39 TS_PLUGIN_TICK_END;
40
41 return;
42}
43
44static struct task_struct *
45litmus_schedule(struct rq *rq, struct task_struct *prev)
46{
47 struct rq* other_rq;
48 struct task_struct *next;
49
50 long was_running;
51 lt_t _maybe_deadlock = 0;
52
53 /* let the plugin schedule */
54 next = litmus->schedule(prev);
55
56 sched_state_plugin_check();
57
58 /* check if a global plugin pulled a task from a different RQ */
59 if (next && task_rq(next) != rq) {
60 /* we need to migrate the task */
61 other_rq = task_rq(next);
62 TRACE_TASK(next, "migrate from %d\n", other_rq->cpu);
63
64 /* while we drop the lock, the prev task could change its
65 * state
66 */
67 was_running = is_running(prev);
68 mb();
69 raw_spin_unlock(&rq->lock);
70
71 /* Don't race with a concurrent switch. This could deadlock in
72 * the case of cross or circular migrations. It's the job of
73 * the plugin to make sure that doesn't happen.
74 */
75 TRACE_TASK(next, "stack_in_use=%d\n",
76 next->rt_param.stack_in_use);
77 if (next->rt_param.stack_in_use != NO_CPU) {
78 TRACE_TASK(next, "waiting to deschedule\n");
79 _maybe_deadlock = litmus_clock();
80 }
81 while (next->rt_param.stack_in_use != NO_CPU) {
82 cpu_relax();
83 mb();
84 if (next->rt_param.stack_in_use == NO_CPU)
85 TRACE_TASK(next,"descheduled. Proceeding.\n");
86
87 if (lt_before(_maybe_deadlock + 10000000,
88 litmus_clock())) {
89 /* We've been spinning for 10ms.
90 * Something can't be right!
91 * Let's abandon the task and bail out; at least
92 * we will have debug info instead of a hard
93 * deadlock.
94 */
95 TRACE_TASK(next,"stack too long in use. "
96 "Deadlock?\n");
97 next = NULL;
98
99 /* bail out */
100 raw_spin_lock(&rq->lock);
101 return next;
102 }
103 }
104#ifdef __ARCH_WANT_UNLOCKED_CTXSW
105 if (next->on_cpu)
106 TRACE_TASK(next, "waiting for !oncpu");
107 while (next->on_cpu) {
108 cpu_relax();
109 mb();
110 }
111#endif
112 double_rq_lock(rq, other_rq);
113 mb();
114 if (is_realtime(prev) && is_running(prev) != was_running) {
115 TRACE_TASK(prev,
116 "state changed while we dropped"
117 " the lock: is_running=%d, was_running=%d\n",
118 is_running(prev), was_running);
119 if (is_running(prev) && !was_running) {
120 /* prev task became unblocked
121 * we need to simulate normal sequence of events
122 * to scheduler plugins.
123 */
124 litmus->task_block(prev);
125 litmus->task_wake_up(prev);
126 }
127 }
128
129 set_task_cpu(next, smp_processor_id());
130
131 /* DEBUG: now that we have the lock we need to make sure a
132 * couple of things still hold:
133 * - it is still a real-time task
134 * - it is still runnable (could have been stopped)
135 * If either is violated, then the active plugin is
136 * doing something wrong.
137 */
138 if (!is_realtime(next) || !is_running(next)) {
139 /* BAD BAD BAD */
140 TRACE_TASK(next,"BAD: migration invariant FAILED: "
141 "rt=%d running=%d\n",
142 is_realtime(next),
143 is_running(next));
144 /* drop the task */
145 next = NULL;
146 }
147 /* release the other CPU's runqueue, but keep ours */
148 raw_spin_unlock(&other_rq->lock);
149 }
150 if (next) {
151 next->rt_param.stack_in_use = rq->cpu;
152 next->se.exec_start = rq->clock;
153 }
154
155 update_enforcement_timer(next);
156 return next;
157}
158
159static void enqueue_task_litmus(struct rq *rq, struct task_struct *p,
160 int flags)
161{
162 if (flags & ENQUEUE_WAKEUP) {
163 sched_trace_task_resume(p);
164 tsk_rt(p)->present = 1;
165 /* LITMUS^RT plugins need to update the state
166 * _before_ making it available in global structures.
167 * Linux gets away with being lazy about the task state
168 * update. We can't do that, hence we update the task
169 * state already here.
170 *
171 * WARNING: this needs to be re-evaluated when porting
172 * to newer kernel versions.
173 */
174 p->state = TASK_RUNNING;
175 litmus->task_wake_up(p);
176
177 rq->litmus.nr_running++;
178 } else
179 TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n");
180}
181
182static void dequeue_task_litmus(struct rq *rq, struct task_struct *p,
183 int flags)
184{
185 if (flags & DEQUEUE_SLEEP) {
186 litmus->task_block(p);
187 tsk_rt(p)->present = 0;
188 sched_trace_task_block(p);
189
190 rq->litmus.nr_running--;
191 } else
192 TRACE_TASK(p, "ignoring a dequeue, not going to sleep.\n");
193}
194
195static void yield_task_litmus(struct rq *rq)
196{
197 BUG_ON(rq->curr != current);
198 /* sched_yield() is called to trigger delayed preemptions.
199 * Thus, mark the current task as needing to be rescheduled.
200 * This will cause the scheduler plugin to be invoked, which can
201 * then determine if a preemption is still required.
202 */
203 clear_exit_np(current);
204 litmus_reschedule_local();
205}
206
207/* Plugins are responsible for this.
208 */
209static void check_preempt_curr_litmus(struct rq *rq, struct task_struct *p, int flags)
210{
211}
212
213static void put_prev_task_litmus(struct rq *rq, struct task_struct *p)
214{
215}
216
217static void pre_schedule_litmus(struct rq *rq, struct task_struct *prev)
218{
219 update_time_litmus(rq, prev);
220 if (!is_running(prev))
221 tsk_rt(prev)->present = 0;
222}
223
224/* pick_next_task_litmus() - litmus_schedule() function
225 *
226 * return the next task to be scheduled
227 */
228static struct task_struct *pick_next_task_litmus(struct rq *rq)
229{
230 /* get the to-be-switched-out task (prev) */
231 struct task_struct *prev = rq->litmus.prev;
232 struct task_struct *next;
233
234 /* if not called from schedule() but from somewhere
235 * else (e.g., migration), return now!
236 */
237 if(!rq->litmus.prev)
238 return NULL;
239
240 rq->litmus.prev = NULL;
241
242 TS_PLUGIN_SCHED_START;
243 next = litmus_schedule(rq, prev);
244 TS_PLUGIN_SCHED_END;
245
246 return next;
247}
248
249static void task_tick_litmus(struct rq *rq, struct task_struct *p, int queued)
250{
251 /* nothing to do; tick related tasks are done by litmus_tick() */
252 return;
253}
254
255static void switched_to_litmus(struct rq *rq, struct task_struct *p)
256{
257}
258
259static void prio_changed_litmus(struct rq *rq, struct task_struct *p,
260 int oldprio)
261{
262}
263
264unsigned int get_rr_interval_litmus(struct rq *rq, struct task_struct *p)
265{
266 /* return infinity */
267 return 0;
268}
269
270/* This is called when a task became a real-time task, either due to a SCHED_*
271 * class transition or due to PI mutex inheritance. We don't handle Linux PI
272 * mutex inheritance yet (and probably never will). Use LITMUS provided
273 * synchronization primitives instead.
274 */
275static void set_curr_task_litmus(struct rq *rq)
276{
277 rq->curr->se.exec_start = rq->clock;
278}
279
280
281#ifdef CONFIG_SMP
282/* execve tries to rebalance task in this scheduling domain.
283 * We don't care about the scheduling domain; can gets called from
284 * exec, fork, wakeup.
285 */
286static int
287select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags)
288{
289 /* preemption is already disabled.
290 * We don't want to change cpu here
291 */
292 return task_cpu(p);
293}
294#endif
295
296static const struct sched_class litmus_sched_class = {
297 /* From 34f971f6 the stop/migrate worker threads have a class on
298 * their own, which is the highest prio class. We don't support
299 * cpu-hotplug or cpu throttling. Allows Litmus to use up to 1.0
300 * CPU capacity.
301 */
302 .next = &stop_sched_class,
303 .enqueue_task = enqueue_task_litmus,
304 .dequeue_task = dequeue_task_litmus,
305 .yield_task = yield_task_litmus,
306
307 .check_preempt_curr = check_preempt_curr_litmus,
308
309 .pick_next_task = pick_next_task_litmus,
310 .put_prev_task = put_prev_task_litmus,
311
312#ifdef CONFIG_SMP
313 .select_task_rq = select_task_rq_litmus,
314
315 .pre_schedule = pre_schedule_litmus,
316#endif
317
318 .set_curr_task = set_curr_task_litmus,
319 .task_tick = task_tick_litmus,
320
321 .get_rr_interval = get_rr_interval_litmus,
322
323 .prio_changed = prio_changed_litmus,
324 .switched_to = switched_to_litmus,
325};