aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/litmus.c
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2013-06-25 01:27:07 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2013-08-07 03:46:49 -0400
commit543810eb67bea9c3046ecb58388493bca39fe796 (patch)
treecf65010367e53dfbd3e39a9eb6e89dacf92348f3 /kernel/sched/litmus.c
parent1412c8b72e192a14b8dd620f58a75f55a5490783 (diff)
Add LITMUS^RT core implementation
This patch adds the core of LITMUS^RT: - library functionality (heaps, rt_domain, prioritization, etc.) - budget enforcement logic - job management - system call backends - virtual devices (control page, etc.) - scheduler plugin API (and dummy plugin) This code compiles, but is not yet integrated with the rest of Linux.
Diffstat (limited to 'kernel/sched/litmus.c')
-rw-r--r--kernel/sched/litmus.c350
1 files changed, 350 insertions, 0 deletions
diff --git a/kernel/sched/litmus.c b/kernel/sched/litmus.c
new file mode 100644
index 000000000000..59428036e2c8
--- /dev/null
+++ b/kernel/sched/litmus.c
@@ -0,0 +1,350 @@
1/* This file is included from kernel/sched.c */
2
3#include "sched.h"
4
5#include <litmus/trace.h>
6#include <litmus/sched_trace.h>
7
8#include <litmus/litmus.h>
9#include <litmus/budget.h>
10#include <litmus/sched_plugin.h>
11#include <litmus/preempt.h>
12
13static void update_time_litmus(struct rq *rq, struct task_struct *p)
14{
15 u64 delta = rq->clock - p->se.exec_start;
16 if (unlikely((s64)delta < 0))
17 delta = 0;
18 /* per job counter */
19 p->rt_param.job_params.exec_time += delta;
20 /* task counter */
21 p->se.sum_exec_runtime += delta;
22 /* sched_clock() */
23 p->se.exec_start = rq->clock;
24 cpuacct_charge(p, delta);
25}
26
27static void double_rq_lock(struct rq *rq1, struct rq *rq2);
28static void double_rq_unlock(struct rq *rq1, struct rq *rq2);
29
30/*
31 * litmus_tick gets called by scheduler_tick() with HZ freq
32 * Interrupts are disabled
33 */
34void litmus_tick(struct rq *rq, struct task_struct *p)
35{
36 TS_PLUGIN_TICK_START;
37
38 if (is_realtime(p))
39 update_time_litmus(rq, p);
40
41 /* plugin tick */
42 litmus->tick(p);
43
44 TS_PLUGIN_TICK_END;
45
46 return;
47}
48
49static struct task_struct *
50litmus_schedule(struct rq *rq, struct task_struct *prev)
51{
52 struct task_struct *next;
53
54#ifdef CONFIG_SMP
55 struct rq* other_rq;
56 long was_running;
57 lt_t _maybe_deadlock = 0;
58#endif
59
60 /* let the plugin schedule */
61 next = litmus->schedule(prev);
62
63 sched_state_plugin_check();
64
65#ifdef CONFIG_SMP
66 /* check if a global plugin pulled a task from a different RQ */
67 if (next && task_rq(next) != rq) {
68 /* we need to migrate the task */
69 other_rq = task_rq(next);
70 TRACE_TASK(next, "migrate from %d\n", other_rq->cpu);
71
72 /* while we drop the lock, the prev task could change its
73 * state
74 */
75 was_running = is_running(prev);
76 mb();
77 raw_spin_unlock(&rq->lock);
78
79 /* Don't race with a concurrent switch. This could deadlock in
80 * the case of cross or circular migrations. It's the job of
81 * the plugin to make sure that doesn't happen.
82 */
83 TRACE_TASK(next, "stack_in_use=%d\n",
84 next->rt_param.stack_in_use);
85 if (next->rt_param.stack_in_use != NO_CPU) {
86 TRACE_TASK(next, "waiting to deschedule\n");
87 _maybe_deadlock = litmus_clock();
88 }
89 while (next->rt_param.stack_in_use != NO_CPU) {
90 cpu_relax();
91 mb();
92 if (next->rt_param.stack_in_use == NO_CPU)
93 TRACE_TASK(next,"descheduled. Proceeding.\n");
94
95 if (lt_before(_maybe_deadlock + 1000000000L,
96 litmus_clock())) {
97 /* We've been spinning for 1s.
98 * Something can't be right!
99 * Let's abandon the task and bail out; at least
100 * we will have debug info instead of a hard
101 * deadlock.
102 */
103#ifdef CONFIG_BUG_ON_MIGRATION_DEADLOCK
104 BUG();
105#else
106 TRACE_TASK(next,"stack too long in use. "
107 "Deadlock?\n");
108 next = NULL;
109
110 /* bail out */
111 raw_spin_lock(&rq->lock);
112 return next;
113#endif
114 }
115 }
116#ifdef __ARCH_WANT_UNLOCKED_CTXSW
117 if (next->on_cpu)
118 TRACE_TASK(next, "waiting for !oncpu");
119 while (next->on_cpu) {
120 cpu_relax();
121 mb();
122 }
123#endif
124 double_rq_lock(rq, other_rq);
125 mb();
126 if (is_realtime(prev) && is_running(prev) != was_running) {
127 TRACE_TASK(prev,
128 "state changed while we dropped"
129 " the lock: is_running=%d, was_running=%d\n",
130 is_running(prev), was_running);
131 if (is_running(prev) && !was_running) {
132 /* prev task became unblocked
133 * we need to simulate normal sequence of events
134 * to scheduler plugins.
135 */
136 litmus->task_block(prev);
137 litmus->task_wake_up(prev);
138 }
139 }
140
141 set_task_cpu(next, smp_processor_id());
142
143 /* DEBUG: now that we have the lock we need to make sure a
144 * couple of things still hold:
145 * - it is still a real-time task
146 * - it is still runnable (could have been stopped)
147 * If either is violated, then the active plugin is
148 * doing something wrong.
149 */
150 if (!is_realtime(next) || !is_running(next)) {
151 /* BAD BAD BAD */
152 TRACE_TASK(next,"BAD: migration invariant FAILED: "
153 "rt=%d running=%d\n",
154 is_realtime(next),
155 is_running(next));
156 /* drop the task */
157 next = NULL;
158 }
159 /* release the other CPU's runqueue, but keep ours */
160 raw_spin_unlock(&other_rq->lock);
161 }
162#endif
163
164 if (next) {
165#ifdef CONFIG_SMP
166 next->rt_param.stack_in_use = rq->cpu;
167#else
168 next->rt_param.stack_in_use = 0;
169#endif
170 next->se.exec_start = rq->clock;
171 }
172
173 update_enforcement_timer(next);
174 return next;
175}
176
177static void enqueue_task_litmus(struct rq *rq, struct task_struct *p,
178 int flags)
179{
180 if (flags & ENQUEUE_WAKEUP) {
181 sched_trace_task_resume(p);
182 tsk_rt(p)->present = 1;
183 /* LITMUS^RT plugins need to update the state
184 * _before_ making it available in global structures.
185 * Linux gets away with being lazy about the task state
186 * update. We can't do that, hence we update the task
187 * state already here.
188 *
189 * WARNING: this needs to be re-evaluated when porting
190 * to newer kernel versions.
191 */
192 p->state = TASK_RUNNING;
193 litmus->task_wake_up(p);
194
195 rq->litmus.nr_running++;
196 } else
197 TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n");
198}
199
200static void dequeue_task_litmus(struct rq *rq, struct task_struct *p,
201 int flags)
202{
203 if (flags & DEQUEUE_SLEEP) {
204 litmus->task_block(p);
205 tsk_rt(p)->present = 0;
206 sched_trace_task_block(p);
207
208 rq->litmus.nr_running--;
209 } else
210 TRACE_TASK(p, "ignoring a dequeue, not going to sleep.\n");
211}
212
213static void yield_task_litmus(struct rq *rq)
214{
215 TS_SYSCALL_IN_START;
216 TS_SYSCALL_IN_END;
217
218 BUG_ON(rq->curr != current);
219 /* sched_yield() is called to trigger delayed preemptions.
220 * Thus, mark the current task as needing to be rescheduled.
221 * This will cause the scheduler plugin to be invoked, which can
222 * then determine if a preemption is still required.
223 */
224 clear_exit_np(current);
225 litmus_reschedule_local();
226
227 TS_SYSCALL_OUT_START;
228}
229
230/* Plugins are responsible for this.
231 */
232static void check_preempt_curr_litmus(struct rq *rq, struct task_struct *p, int flags)
233{
234}
235
236static void put_prev_task_litmus(struct rq *rq, struct task_struct *p)
237{
238}
239
240#ifdef CONFIG_SMP
241static void pre_schedule_litmus(struct rq *rq, struct task_struct *prev)
242{
243 update_time_litmus(rq, prev);
244 if (!is_running(prev))
245 tsk_rt(prev)->present = 0;
246}
247#endif
248
249/* pick_next_task_litmus() - litmus_schedule() function
250 *
251 * return the next task to be scheduled
252 */
253static struct task_struct *pick_next_task_litmus(struct rq *rq)
254{
255 /* get the to-be-switched-out task (prev) */
256 struct task_struct *prev = rq->litmus.prev;
257 struct task_struct *next;
258
259 /* if not called from schedule() but from somewhere
260 * else (e.g., migration), return now!
261 */
262 if(!rq->litmus.prev)
263 return NULL;
264
265 rq->litmus.prev = NULL;
266
267 TS_PLUGIN_SCHED_START;
268 next = litmus_schedule(rq, prev);
269 TS_PLUGIN_SCHED_END;
270
271 return next;
272}
273
274static void task_tick_litmus(struct rq *rq, struct task_struct *p, int queued)
275{
276 /* nothing to do; tick related tasks are done by litmus_tick() */
277 return;
278}
279
280static void switched_to_litmus(struct rq *rq, struct task_struct *p)
281{
282}
283
284static void prio_changed_litmus(struct rq *rq, struct task_struct *p,
285 int oldprio)
286{
287}
288
289unsigned int get_rr_interval_litmus(struct rq *rq, struct task_struct *p)
290{
291 /* return infinity */
292 return 0;
293}
294
295/* This is called when a task became a real-time task, either due to a SCHED_*
296 * class transition or due to PI mutex inheritance. We don't handle Linux PI
297 * mutex inheritance yet (and probably never will). Use LITMUS provided
298 * synchronization primitives instead.
299 */
300static void set_curr_task_litmus(struct rq *rq)
301{
302 rq->curr->se.exec_start = rq->clock;
303}
304
305
306#ifdef CONFIG_SMP
307/* execve tries to rebalance task in this scheduling domain.
308 * We don't care about the scheduling domain; can gets called from
309 * exec, fork, wakeup.
310 */
311static int
312select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags)
313{
314 /* preemption is already disabled.
315 * We don't want to change cpu here
316 */
317 return task_cpu(p);
318}
319#endif
320
321const struct sched_class litmus_sched_class = {
322 /* From 34f971f6 the stop/migrate worker threads have a class on
323 * their own, which is the highest prio class. We don't support
324 * cpu-hotplug or cpu throttling. Allows Litmus to use up to 1.0
325 * CPU capacity.
326 */
327 .next = &rt_sched_class,
328 .enqueue_task = enqueue_task_litmus,
329 .dequeue_task = dequeue_task_litmus,
330 .yield_task = yield_task_litmus,
331
332 .check_preempt_curr = check_preempt_curr_litmus,
333
334 .pick_next_task = pick_next_task_litmus,
335 .put_prev_task = put_prev_task_litmus,
336
337#ifdef CONFIG_SMP
338 .select_task_rq = select_task_rq_litmus,
339
340 .pre_schedule = pre_schedule_litmus,
341#endif
342
343 .set_curr_task = set_curr_task_litmus,
344 .task_tick = task_tick_litmus,
345
346 .get_rr_interval = get_rr_interval_litmus,
347
348 .prio_changed = prio_changed_litmus,
349 .switched_to = switched_to_litmus,
350};