aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/litmus.c
diff options
context:
space:
mode:
authorleochanj105 <43393724+leochanj105@users.noreply.github.com>2020-09-30 00:42:19 -0400
committerGitHub <noreply@github.com>2020-09-30 00:42:19 -0400
commite0ce4a455f9cdb311e27a08f8a59b4b613de5703 (patch)
treeb15f3264a097501e4db7f3da540399d03efbf808 /kernel/sched/litmus.c
parent6892b37a7a71e4ca2e933d43646277f74995c2fe (diff)
FIx task balancing and remove dependency on prev
1. Adds a dummy balance function to LITMUS^RT and e-enables Linux's task balancing code. 2. Removes usage of `prev` from `pick_next_task_litmus()` 3. Removes duplicate call to `put_prev_task()` from `pick_next_task_litmus()` (this has been moved into core.c for all schedulers) 4. Fixes an unguarded use of `prev` in the `schedule()` function for SCHED_LITMUS
Diffstat (limited to 'kernel/sched/litmus.c')
-rw-r--r--kernel/sched/litmus.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/kernel/sched/litmus.c b/kernel/sched/litmus.c
index 38f638f7eb4f..a28aa5ab28f7 100644
--- a/kernel/sched/litmus.c
+++ b/kernel/sched/litmus.c
@@ -66,7 +66,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
66 /* while we drop the lock, the prev task could change its 66 /* while we drop the lock, the prev task could change its
67 * state 67 * state
68 */ 68 */
69 BUG_ON(prev != current); 69 BUG_ON(prev && prev != current);
70 was_running = is_current_running(); 70 was_running = is_current_running();
71 71
72 /* Don't race with a concurrent switch. This could deadlock in 72 /* Don't race with a concurrent switch. This could deadlock in
@@ -276,27 +276,14 @@ static struct task_struct *pick_next_task_litmus(struct rq *rq,
276 if (rf) { 276 if (rf) {
277 cookie = rf->cookie; 277 cookie = rf->cookie;
278 } 278 }
279 if (prev && is_realtime(prev)) 279 if (rq->curr && is_realtime(rq->curr))
280 update_time_litmus(rq, prev); 280 update_time_litmus(rq, rq->curr);
281 281
282 lockdep_unpin_lock(&rq->lock, cookie); 282 lockdep_unpin_lock(&rq->lock, cookie);
283 TS_PLUGIN_SCHED_START; 283 TS_PLUGIN_SCHED_START;
284 next = litmus_schedule(rq, prev); 284 next = litmus_schedule(rq, rq->curr);
285 TS_PLUGIN_SCHED_END; 285 TS_PLUGIN_SCHED_END;
286 lockdep_repin_lock(&rq->lock, cookie); 286 lockdep_repin_lock(&rq->lock, cookie);
287
288 /* This is a bit backwards: the other classes call put_prev_task()
289 * _after_ they've determined that the class has some queued tasks.
290 * We can't determine this easily because each plugin manages its own
291 * ready queues, and because in the case of globally shared queues,
292 * we really don't know whether we'll have something ready even if
293 * we test here. So we do it in reverse: first ask the plugin to
294 * provide a task, and if we find one, call put_prev_task() on the
295 * previously scheduled task.
296 */
297 if (next && prev)
298 put_prev_task(rq, prev);
299
300 return next; 287 return next;
301} 288}
302 289
@@ -338,6 +325,13 @@ static void set_next_task_litmus(struct rq *rq, struct task_struct *p)
338 325
339 326
340#ifdef CONFIG_SMP 327#ifdef CONFIG_SMP
328/* Basic no-op balance function
329 */
330static int
331balance_litmus(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
332{
333 return 1;
334}
341/* execve tries to rebalance task in this scheduling domain. 335/* execve tries to rebalance task in this scheduling domain.
342 * We don't care about the scheduling domain; can gets called from 336 * We don't care about the scheduling domain; can gets called from
343 * exec, fork, wakeup. 337 * exec, fork, wakeup.
@@ -379,6 +373,7 @@ const struct sched_class litmus_sched_class = {
379 .put_prev_task = put_prev_task_litmus, 373 .put_prev_task = put_prev_task_litmus,
380 374
381#ifdef CONFIG_SMP 375#ifdef CONFIG_SMP
376 .balance = balance_litmus,
382 .select_task_rq = select_task_rq_litmus, 377 .select_task_rq = select_task_rq_litmus,
383#endif 378#endif
384 379