diff options
author | Bjoern Brandenburg <bbb@mpi-sws.org> | 2013-07-01 17:56:31 -0400 |
---|---|---|
committer | Bjoern Brandenburg <bbb@mpi-sws.org> | 2014-06-07 05:31:04 -0400 |
commit | ca1812816b4a3c02c18bf5dfb6a58fb323d4526c (patch) | |
tree | 0552301bbf1e90b3f50d8be0597345fe1c146284 /kernel | |
parent | d66d0f05f57e052590d0b2020240f24f377aa9d9 (diff) |
Hook into rt scheduling class to protect LITMUS^RT tasks
The rt scheduling class thinks it's the highest-priority scheduling
class around. It is not in LITMUS^RT. Don't go preempting remote cores
that run SCHED_LITMUS tasks.
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/rt.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 15334e6de832..dbe21ae888f8 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -7,6 +7,8 @@ | |||
7 | 7 | ||
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | 9 | ||
10 | #include <litmus/litmus.h> | ||
11 | |||
10 | int sched_rr_timeslice = RR_TIMESLICE; | 12 | int sched_rr_timeslice = RR_TIMESLICE; |
11 | 13 | ||
12 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); | 14 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); |
@@ -436,7 +438,9 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
436 | if (rt_rq->rt_nr_running) { | 438 | if (rt_rq->rt_nr_running) { |
437 | if (rt_se && !on_rt_rq(rt_se)) | 439 | if (rt_se && !on_rt_rq(rt_se)) |
438 | enqueue_rt_entity(rt_se, false); | 440 | enqueue_rt_entity(rt_se, false); |
439 | if (rt_rq->highest_prio.curr < curr->prio) | 441 | if (rt_rq->highest_prio.curr < curr->prio |
442 | /* Don't subject LITMUS^RT tasks to remote reschedules. */ | ||
443 | && !is_realtime(curr)) | ||
440 | resched_task(curr); | 444 | resched_task(curr); |
441 | } | 445 | } |
442 | } | 446 | } |
@@ -530,7 +534,8 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) | |||
530 | 534 | ||
531 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 535 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
532 | { | 536 | { |
533 | if (rt_rq->rt_nr_running) | 537 | if (rt_rq->rt_nr_running |
538 | && !is_realtime(rq_of_rt_rq(rt_rq)->curr)) | ||
534 | resched_task(rq_of_rt_rq(rt_rq)->curr); | 539 | resched_task(rq_of_rt_rq(rt_rq)->curr); |
535 | } | 540 | } |
536 | 541 | ||